Răsfoiți Sursa

Merge branch 'netbsd' into netbsd-ci

Andreas T Jonsson 1 an în urmă
părinte
comite
021271091a
100 a modificat fișierele cu 5680 adăugiri și 753 ștergeri
  1. 5 1
      .gitattributes
  2. 1 1
      .github/workflows/ci.yml
  3. 1 1
      .github/workflows/nightly.yml
  4. 4 0
      .gitignore
  5. 3 3
      base/intrinsics/intrinsics.odin
  6. 2 2
      base/runtime/core_builtin.odin
  7. 13 1
      base/runtime/error_checks.odin
  8. 6 8
      base/runtime/internal.odin
  9. 22 16
      base/runtime/print.odin
  10. 5 5
      build_odin.sh
  11. 4 4
      core/bufio/reader.odin
  12. 4 0
      core/bufio/writer.odin
  13. 1 1
      core/bytes/buffer.odin
  14. 26 29
      core/c/frontend/tokenizer/doc.odin
  15. 1 1
      core/compress/gzip/example.odin
  16. 1 1
      core/compress/gzip/gzip.odin
  17. 1 1
      core/compress/shoco/model.odin
  18. 1 1
      core/compress/shoco/shoco.odin
  19. 1 1
      core/compress/zlib/example.odin
  20. 1 1
      core/compress/zlib/zlib.odin
  21. 1 1
      core/container/bit_array/bit_array.odin
  22. 37 38
      core/container/bit_array/doc.odin
  23. 3 6
      core/crypto/crypto.odin
  24. 3 4
      core/crypto/rand_bsd.odin
  25. 10 9
      core/crypto/rand_darwin.odin
  26. 3 4
      core/crypto/rand_generic.odin
  27. 4 4
      core/crypto/rand_js.odin
  28. 4 4
      core/crypto/rand_linux.odin
  29. 3 4
      core/crypto/rand_windows.odin
  30. 51 0
      core/debug/trace/doc.odin
  31. 47 0
      core/debug/trace/trace.odin
  32. 195 0
      core/debug/trace/trace_cpp.odin
  33. 18 0
      core/debug/trace/trace_nil.odin
  34. 68 0
      core/debug/trace/trace_windows.odin
  35. 2 3
      core/dynlib/doc.odin
  36. 1 1
      core/dynlib/lib.odin
  37. 1 1
      core/encoding/base32/base32.odin
  38. 125 48
      core/encoding/base64/base64.odin
  39. 673 0
      core/encoding/cbor/cbor.odin
  40. 886 0
      core/encoding/cbor/coding.odin
  41. 141 0
      core/encoding/cbor/doc.odin
  42. 575 0
      core/encoding/cbor/marshal.odin
  43. 381 0
      core/encoding/cbor/tags.odin
  44. 932 0
      core/encoding/cbor/unmarshal.odin
  45. 20 6
      core/encoding/csv/reader.odin
  46. 5 2
      core/encoding/csv/writer.odin
  47. 1 1
      core/encoding/entity/entity.odin
  48. 1 1
      core/encoding/entity/generated.odin
  49. 1 1
      core/encoding/hex/hex.odin
  50. 2 2
      core/encoding/json/marshal.odin
  51. 1 1
      core/encoding/json/parser.odin
  52. 1 1
      core/encoding/json/tokenizer.odin
  53. 1 1
      core/encoding/json/types.odin
  54. 2 2
      core/encoding/json/unmarshal.odin
  55. 1 1
      core/encoding/json/validator.odin
  56. 1 1
      core/encoding/varint/doc.odin
  57. 1 1
      core/encoding/varint/leb128.odin
  58. 1 1
      core/encoding/xml/debug_print.odin
  59. 1 1
      core/encoding/xml/helpers.odin
  60. 1 1
      core/encoding/xml/tokenizer.odin
  61. 1 1
      core/encoding/xml/xml_reader.odin
  62. 49 19
      core/fmt/fmt.odin
  63. 55 0
      core/fmt/fmt_js.odin
  64. 21 18
      core/image/netpbm/doc.odin
  65. 24 1
      core/io/io.odin
  66. 14 0
      core/math/big/prime.odin
  67. 2 2
      core/math/linalg/glsl/linalg_glsl.odin
  68. 28 1
      core/math/math.odin
  69. 23 20
      core/math/rand/rand.odin
  70. 0 22
      core/math/rand/system_darwin.odin
  71. 0 14
      core/math/rand/system_js.odin
  72. 0 29
      core/math/rand/system_linux.odin
  73. 0 13
      core/math/rand/system_windows.odin
  74. 1 1
      core/mem/allocators.odin
  75. 32 0
      core/mem/tracking_allocator.odin
  76. 6 2
      core/net/socket_linux.odin
  77. 15 2
      core/net/url.odin
  78. 13 8
      core/odin/parser/parser.odin
  79. 1 0
      core/odin/tokenizer/tokenizer.odin
  80. 2 9
      core/os/os_js.odin
  81. 2 2
      core/os/stat.odin
  82. 21 0
      core/reflect/reflect.odin
  83. 61 0
      core/reflect/types.odin
  84. 2 0
      core/sync/chan/chan.odin
  85. 46 137
      core/sync/futex_netbsd.odin
  86. 34 0
      core/sys/darwin/CoreFoundation/CFBase.odin
  87. 203 0
      core/sys/darwin/CoreFoundation/CFString.odin
  88. 1 1
      core/sys/darwin/Foundation/NSApplication.odin
  89. 6 9
      core/sys/darwin/Foundation/NSString.odin
  90. 386 0
      core/sys/darwin/Security/SecBase.odin
  91. 19 0
      core/sys/darwin/Security/SecRandom.odin
  92. 0 98
      core/sys/darwin/core_foundation.odin
  93. 0 26
      core/sys/darwin/security.odin
  94. 3 3
      core/sys/darwin/xnu_system_call_wrappers.odin
  95. 59 15
      core/sys/info/cpu_arm.odin
  96. 98 0
      core/sys/info/cpu_darwin_arm64.odin
  97. 65 0
      core/sys/info/cpu_linux_arm.odin
  98. 67 64
      core/sys/info/doc.odin
  99. 9 5
      core/sys/info/platform_bsd.odin
  100. 3 2
      core/sys/info/platform_darwin.odin

+ 5 - 1
.gitattributes

@@ -1,2 +1,6 @@
 *.odin linguist-language=Odin
-* text=auto
+* text=auto
+
+# These files must always have *nix line-endings
+Makefile text eol=lf
+*.sh text eol=lf

+ 1 - 1
.github/workflows/ci.yml

@@ -88,7 +88,7 @@ jobs:
         timeout-minutes: 10
   build_macOS:
     name: MacOS Build, Check, and Test
-    runs-on: macos-latest
+    runs-on: macos-13
     steps:
       - uses: actions/checkout@v1
       - name: Download LLVM, and setup PATH

+ 1 - 1
.github/workflows/nightly.yml

@@ -77,7 +77,7 @@ jobs:
   build_macos:
     name: MacOS Build
     if: github.repository == 'odin-lang/Odin'
-    runs-on: macos-latest
+    runs-on: macos-13
     steps:
       - uses: actions/checkout@v1
       - name: Download LLVM and setup PATH

+ 4 - 0
.gitignore

@@ -27,6 +27,8 @@ tests/documentation/all.odin-doc
 tests/internal/test_map
 tests/internal/test_pow
 tests/internal/test_rtti
+tests/core/test_base64
+tests/core/test_cbor
 tests/core/test_core_compress
 tests/core/test_core_container
 tests/core/test_core_filepath
@@ -40,8 +42,10 @@ tests/core/test_core_net
 tests/core/test_core_os_exit
 tests/core/test_core_reflect
 tests/core/test_core_strings
+tests/core/test_core_time
 tests/core/test_crypto
 tests/core/test_hash
+tests/core/test_hex
 tests/core/test_hxa
 tests/core/test_json
 tests/core/test_linalg_glsl_math

+ 3 - 3
base/intrinsics/intrinsics.odin

@@ -38,9 +38,9 @@ count_leading_zeros  :: proc(x: $T) -> T where type_is_integer(T) || type_is_sim
 reverse_bits         :: proc(x: $T) -> T where type_is_integer(T) || type_is_simd_vector(T) ---
 byte_swap            :: proc(x: $T) -> T where type_is_integer(T) || type_is_float(T) ---
 
-overflow_add :: proc(lhs, rhs: $T) -> (T, bool) #optional_ok ---
-overflow_sub :: proc(lhs, rhs: $T) -> (T, bool) #optional_ok ---
-overflow_mul :: proc(lhs, rhs: $T) -> (T, bool) #optional_ok ---
+overflow_add :: proc(lhs, rhs: $T) -> (T, bool) ---
+overflow_sub :: proc(lhs, rhs: $T) -> (T, bool) ---
+overflow_mul :: proc(lhs, rhs: $T) -> (T, bool) ---
 
 sqrt :: proc(x: $T) -> T where type_is_float(T) || (type_is_simd_vector(T) && type_is_float(type_elem_type(T))) ---
 

+ 2 - 2
base/runtime/core_builtin.odin

@@ -40,7 +40,7 @@ copy_slice :: proc "contextless" (dst, src: $T/[]$E) -> int {
 	}
 	return n
 }
-// `copy_from_string` is a built-in procedure that copies elements from a source slice `src` to a destination string `dst`.
+// `copy_from_string` is a built-in procedure that copies elements from a source string `src` to a destination slice `dst`.
 // The source and destination may overlap. Copy returns the number of elements copied, which will be the minimum
 // of len(src) and len(dst).
 //
@@ -53,7 +53,7 @@ copy_from_string :: proc "contextless" (dst: $T/[]$E/u8, src: $S/string) -> int
 	}
 	return n
 }
-// `copy` is a built-in procedure that copies elements from a source slice `src` to a destination slice/string `dst`.
+// `copy` is a built-in procedure that copies elements from a source slice/string `src` to a destination slice `dst`.
 // The source and destination may overlap. Copy returns the number of elements copied, which will be the minimum
 // of len(src) and len(dst).
 @builtin

+ 13 - 1
base/runtime/error_checks.odin

@@ -19,6 +19,7 @@ type_assertion_trap :: proc "contextless" () -> ! {
 }
 
 
+@(disabled=ODIN_NO_BOUNDS_CHECK)
 bounds_check_error :: proc "contextless" (file: string, line, column: i32, index, count: int) {
 	if uint(index) < uint(count) {
 		return
@@ -61,6 +62,7 @@ multi_pointer_slice_handle_error :: proc "contextless" (file: string, line, colu
 }
 
 
+@(disabled=ODIN_NO_BOUNDS_CHECK)
 multi_pointer_slice_expr_error :: proc "contextless" (file: string, line, column: i32, lo, hi: int) {
 	if lo <= hi {
 		return
@@ -68,6 +70,7 @@ multi_pointer_slice_expr_error :: proc "contextless" (file: string, line, column
 	multi_pointer_slice_handle_error(file, line, column, lo, hi)
 }
 
+@(disabled=ODIN_NO_BOUNDS_CHECK)
 slice_expr_error_hi :: proc "contextless" (file: string, line, column: i32, hi: int, len: int) {
 	if 0 <= hi && hi <= len {
 		return
@@ -75,6 +78,7 @@ slice_expr_error_hi :: proc "contextless" (file: string, line, column: i32, hi:
 	slice_handle_error(file, line, column, 0, hi, len)
 }
 
+@(disabled=ODIN_NO_BOUNDS_CHECK)
 slice_expr_error_lo_hi :: proc "contextless" (file: string, line, column: i32, lo, hi: int, len: int) {
 	if 0 <= lo && lo <= len && lo <= hi && hi <= len {
 		return
@@ -82,6 +86,7 @@ slice_expr_error_lo_hi :: proc "contextless" (file: string, line, column: i32, l
 	slice_handle_error(file, line, column, lo, hi, len)
 }
 
+@(disabled=ODIN_NO_BOUNDS_CHECK)
 dynamic_array_expr_error :: proc "contextless" (file: string, line, column: i32, low, high, max: int) {
 	if 0 <= low && low <= high && high <= max {
 		return
@@ -102,6 +107,7 @@ dynamic_array_expr_error :: proc "contextless" (file: string, line, column: i32,
 }
 
 
+@(disabled=ODIN_NO_BOUNDS_CHECK)
 matrix_bounds_check_error :: proc "contextless" (file: string, line, column: i32, row_index, column_index, row_count, column_count: int) {
 	if uint(row_index) < uint(row_count) &&
 	   uint(column_index) < uint(column_count) {
@@ -224,6 +230,7 @@ when ODIN_NO_RTTI {
 }
 
 
+@(disabled=ODIN_NO_BOUNDS_CHECK)
 make_slice_error_loc :: #force_inline proc "contextless" (loc := #caller_location, len: int) {
 	if 0 <= len {
 		return
@@ -239,6 +246,7 @@ make_slice_error_loc :: #force_inline proc "contextless" (loc := #caller_locatio
 	handle_error(loc, len)
 }
 
+@(disabled=ODIN_NO_BOUNDS_CHECK)
 make_dynamic_array_error_loc :: #force_inline proc "contextless" (loc := #caller_location, len, cap: int) {
 	if 0 <= len && len <= cap {
 		return
@@ -256,6 +264,7 @@ make_dynamic_array_error_loc :: #force_inline proc "contextless" (loc := #caller
 	handle_error(loc, len, cap)
 }
 
+@(disabled=ODIN_NO_BOUNDS_CHECK)
 make_map_expr_error_loc :: #force_inline proc "contextless" (loc := #caller_location, cap: int) {
 	if 0 <= cap {
 		return
@@ -274,19 +283,22 @@ make_map_expr_error_loc :: #force_inline proc "contextless" (loc := #caller_loca
 
 
 
-
+@(disabled=ODIN_NO_BOUNDS_CHECK)
 bounds_check_error_loc :: #force_inline proc "contextless" (loc := #caller_location, index, count: int) {
 	bounds_check_error(loc.file_path, loc.line, loc.column, index, count)
 }
 
+@(disabled=ODIN_NO_BOUNDS_CHECK)
 slice_expr_error_hi_loc :: #force_inline proc "contextless" (loc := #caller_location, hi: int, len: int) {
 	slice_expr_error_hi(loc.file_path, loc.line, loc.column, hi, len)
 }
 
+@(disabled=ODIN_NO_BOUNDS_CHECK)
 slice_expr_error_lo_hi_loc :: #force_inline proc "contextless" (loc := #caller_location, lo, hi: int, len: int) {
 	slice_expr_error_lo_hi(loc.file_path, loc.line, loc.column, lo, hi, len)
 }
 
+@(disabled=ODIN_NO_BOUNDS_CHECK)
 dynamic_array_expr_error_loc :: #force_inline proc "contextless" (loc := #caller_location, low, high, max: int) {
 	dynamic_array_expr_error(loc.file_path, loc.line, loc.column, low, high, max)
 }

+ 6 - 8
base/runtime/internal.odin

@@ -1042,19 +1042,17 @@ fixdfti :: proc(a: u64) -> i128 {
 __write_bits :: proc "contextless" (dst, src: [^]byte, offset: uintptr, size: uintptr) {
 	for i in 0..<size {
 		j := offset+i
-		the_bit := byte((src[i/8]) & (1<<(i&7)) != 0)
-		b := the_bit<<(j&7)
-		dst[j/8] &~= b
-		dst[j/8] |=  b
+		the_bit := byte((src[i>>3]) & (1<<(i&7)) != 0)
+		dst[j>>3] &~=       1<<(j&7)
+		dst[j>>3]  |= the_bit<<(j&7)
 	}
 }
 
 __read_bits :: proc "contextless" (dst, src: [^]byte, offset: uintptr, size: uintptr) {
 	for j in 0..<size {
 		i := offset+j
-		the_bit := byte((src[i/8]) & (1<<(i&7)) != 0)
-		b := the_bit<<(j&7)
-		dst[j/8] &~= b
-		dst[j/8] |=  b
+		the_bit := byte((src[i>>3]) & (1<<(i&7)) != 0)
+		dst[j>>3] &~=       1<<(j&7)
+		dst[j>>3]  |= the_bit<<(j&7)
 	}
 }

+ 22 - 16
base/runtime/print.odin

@@ -6,7 +6,7 @@ _INTEGER_DIGITS :: "0123456789abcdefghijklmnopqrstuvwxyz"
 _INTEGER_DIGITS_VAR := _INTEGER_DIGITS
 
 when !ODIN_NO_RTTI {
-	print_any_single :: proc "contextless" (arg: any) {
+	print_any_single :: #force_no_inline proc "contextless" (arg: any) {
 		x := arg
 		if x.data == nil {
 			print_string("nil")
@@ -72,7 +72,7 @@ when !ODIN_NO_RTTI {
 			print_string("<invalid-value>")
 		}
 	}
-	println_any :: proc "contextless" (args: ..any) {
+	println_any :: #force_no_inline proc "contextless" (args: ..any) {
 		context = default_context()
 		loop: for arg, i in args {
 			assert(arg.id != nil)
@@ -122,12 +122,12 @@ encode_rune :: proc "contextless" (c: rune) -> ([4]u8, int) {
 	return buf, 4
 }
 
-print_string :: proc "contextless" (str: string) -> (n: int) {
+print_string :: #force_no_inline proc "contextless" (str: string) -> (n: int) {
 	n, _ = stderr_write(transmute([]byte)str)
 	return
 }
 
-print_strings :: proc "contextless" (args: ..string) -> (n: int) {
+print_strings :: #force_no_inline proc "contextless" (args: ..string) -> (n: int) {
 	for str in args {
 		m, err := stderr_write(transmute([]byte)str)
 		n += m
@@ -138,12 +138,12 @@ print_strings :: proc "contextless" (args: ..string) -> (n: int) {
 	return
 }
 
-print_byte :: proc "contextless" (b: byte) -> (n: int) {
+print_byte :: #force_no_inline proc "contextless" (b: byte) -> (n: int) {
 	n, _ = stderr_write([]byte{b})
 	return
 }
 
-print_encoded_rune :: proc "contextless" (r: rune) {
+print_encoded_rune :: #force_no_inline proc "contextless" (r: rune) {
 	print_byte('\'')
 
 	switch r {
@@ -170,7 +170,7 @@ print_encoded_rune :: proc "contextless" (r: rune) {
 	print_byte('\'')
 }
 
-print_rune :: proc "contextless" (r: rune) -> int #no_bounds_check {
+print_rune :: #force_no_inline proc "contextless" (r: rune) -> int #no_bounds_check {
 	RUNE_SELF :: 0x80
 
 	if r < RUNE_SELF {
@@ -183,7 +183,7 @@ print_rune :: proc "contextless" (r: rune) -> int #no_bounds_check {
 }
 
 
-print_u64 :: proc "contextless" (x: u64) #no_bounds_check {
+print_u64 :: #force_no_inline proc "contextless" (x: u64) #no_bounds_check {
 	a: [129]byte
 	i := len(a)
 	b := u64(10)
@@ -198,7 +198,7 @@ print_u64 :: proc "contextless" (x: u64) #no_bounds_check {
 }
 
 
-print_i64 :: proc "contextless" (x: i64) #no_bounds_check {
+print_i64 :: #force_no_inline proc "contextless" (x: i64) #no_bounds_check {
 	b :: i64(10)
 
 	u := x
@@ -223,25 +223,29 @@ print_uint    :: proc "contextless" (x: uint)    { print_u64(u64(x)) }
 print_uintptr :: proc "contextless" (x: uintptr) { print_u64(u64(x)) }
 print_int     :: proc "contextless" (x: int)     { print_i64(i64(x)) }
 
-print_caller_location :: proc "contextless" (loc: Source_Code_Location) {
+print_caller_location :: #force_no_inline proc "contextless" (loc: Source_Code_Location) {
 	print_string(loc.file_path)
 	when ODIN_ERROR_POS_STYLE == .Default {
 		print_byte('(')
 		print_u64(u64(loc.line))
-		print_byte(':')
-		print_u64(u64(loc.column))
+		if loc.column != 0 {
+			print_byte(':')
+			print_u64(u64(loc.column))
+		}
 		print_byte(')')
 	} else when ODIN_ERROR_POS_STYLE == .Unix {
 		print_byte(':')
 		print_u64(u64(loc.line))
-		print_byte(':')
-		print_u64(u64(loc.column))
+		if loc.column != 0 {
+			print_byte(':')
+			print_u64(u64(loc.column))
+		}
 		print_byte(':')
 	} else {
 		#panic("unhandled ODIN_ERROR_POS_STYLE")
 	}
 }
-print_typeid :: proc "contextless" (id: typeid) {
+print_typeid :: #force_no_inline proc "contextless" (id: typeid) {
 	when ODIN_NO_RTTI {
 		if id == nil {
 			print_string("nil")
@@ -257,7 +261,9 @@ print_typeid :: proc "contextless" (id: typeid) {
 		}
 	}
 }
-print_type :: proc "contextless" (ti: ^Type_Info) {
+
+@(optimization_mode="size")
+print_type :: #force_no_inline proc "contextless" (ti: ^Type_Info) {
 	if ti == nil {
 		print_string("nil")
 		return

+ 5 - 5
build_odin.sh

@@ -32,11 +32,11 @@ if [ -z "$LLVM_CONFIG" ]; then
 	elif [ -n "$(command -v llvm-config-12)" ]; then LLVM_CONFIG="llvm-config-12"
 	elif [ -n "$(command -v llvm-config-11)" ]; then LLVM_CONFIG="llvm-config-11"
 	# freebsd
-	elif [ -n "$(command -v llvm-config17)" ]; then  LLVM_CONFIG="llvm-config-17"
-	elif [ -n "$(command -v llvm-config14)" ]; then  LLVM_CONFIG="llvm-config-14"
-	elif [ -n "$(command -v llvm-config13)" ]; then  LLVM_CONFIG="llvm-config-13"
-	elif [ -n "$(command -v llvm-config12)" ]; then  LLVM_CONFIG="llvm-config-12"
-	elif [ -n "$(command -v llvm-config11)" ]; then  LLVM_CONFIG="llvm-config-11"
+	elif [ -n "$(command -v llvm-config17)" ]; then  LLVM_CONFIG="llvm-config17"
+	elif [ -n "$(command -v llvm-config14)" ]; then  LLVM_CONFIG="llvm-config14"
+	elif [ -n "$(command -v llvm-config13)" ]; then  LLVM_CONFIG="llvm-config13"
+	elif [ -n "$(command -v llvm-config12)" ]; then  LLVM_CONFIG="llvm-config12"
+	elif [ -n "$(command -v llvm-config11)" ]; then  LLVM_CONFIG="llvm-config11"
 	# fallback
 	elif [ -n "$(command -v llvm-config)" ]; then LLVM_CONFIG="llvm-config"
 	else

+ 4 - 4
core/bufio/reader.odin

@@ -81,7 +81,7 @@ _reader_read_new_chunk :: proc(b: ^Reader) -> io.Error {
 	for i := b.max_consecutive_empty_reads; i > 0; i -= 1 {
 		n, err := io.read(b.rd, b.buf[b.w:])
 		if n < 0 {
-			return .Negative_Read
+			return err if err != nil else .Negative_Read
 		}
 		b.w += n
 		if err != nil {
@@ -189,7 +189,7 @@ reader_read :: proc(b: ^Reader, p: []byte) -> (n: int, err: io.Error) {
 		if len(p) >= len(b.buf) {
 			n, b.err = io.read(b.rd, p)
 			if n < 0 {
-				return 0, .Negative_Read
+				return 0, b.err if b.err != nil else .Negative_Read
 			}
 
 			if n > 0 {
@@ -202,7 +202,7 @@ reader_read :: proc(b: ^Reader, p: []byte) -> (n: int, err: io.Error) {
 		b.r, b.w = 0, 0
 		n, b.err = io.read(b.rd, b.buf)
 		if n < 0 {
-			return 0, .Negative_Read
+			return 0, b.err if b.err != nil else .Negative_Read
 		}
 		if n == 0 {
 			return 0, _reader_consume_err(b)
@@ -290,7 +290,7 @@ reader_write_to :: proc(b: ^Reader, w: io.Writer) -> (n: i64, err: io.Error) {
 	write_buf :: proc(b: ^Reader, w: io.Writer) -> (i64, io.Error) {
 		n, err := io.write(w, b.buf[b.r:b.w])
 		if n < 0 {
-			return 0, .Negative_Write
+			return 0, err if err != nil else .Negative_Write
 		}
 		b.r += n
 		return i64(n), err

+ 4 - 0
core/bufio/writer.odin

@@ -95,6 +95,10 @@ writer_write :: proc(b: ^Writer, p: []byte) -> (n: int, err: io.Error) {
 		m: int
 		if writer_buffered(b) == 0 {
 			m, b.err = io.write(b.wr, p)
+			if m < 0 && b.err == nil {
+				b.err = .Negative_Write
+				break
+			}
 		} else {
 			m = copy(b.buf[b.n:], p)
 			b.n += m

+ 1 - 1
core/bytes/buffer.odin

@@ -359,7 +359,7 @@ buffer_read_from :: proc(b: ^Buffer, r: io.Reader) -> (n: i64, err: io.Error) #n
 		resize(&b.buf, i)
 		m, e := io.read(r, b.buf[i:cap(b.buf)])
 		if m < 0 {
-			err = .Negative_Read
+			err = e if e != nil else .Negative_Read
 			return
 		}
 

+ 26 - 29
core/c/frontend/tokenizer/doc.odin

@@ -1,34 +1,31 @@
 /*
-package demo
-
-import tokenizer "core:c/frontend/tokenizer"
-import preprocessor "core:c/frontend/preprocessor"
-import "core:fmt"
-
-main :: proc() {
-	t := &tokenizer.Tokenizer{};
-	tokenizer.init_defaults(t);
-
-	cpp := &preprocessor.Preprocessor{};
-	cpp.warn, cpp.err = t.warn, t.err;
-	preprocessor.init_lookup_tables(cpp);
-	preprocessor.init_default_macros(cpp);
-	cpp.include_paths = {"my/path/to/include"};
-
-	tok := tokenizer.tokenize_file(t, "the/source/file.c", 1);
-
-	tok = preprocessor.preprocess(cpp, tok);
-	if tok != nil {
-		for t := tok; t.kind != .EOF; t = t.next {
-			fmt.println(t.lit);
+Example:
+	package demo
+
+	import tokenizer "core:c/frontend/tokenizer"
+	import preprocessor "core:c/frontend/preprocessor"
+	import "core:fmt"
+
+	main :: proc() {
+		t := &tokenizer.Tokenizer{};
+		tokenizer.init_defaults(t);
+
+		cpp := &preprocessor.Preprocessor{};
+		cpp.warn, cpp.err = t.warn, t.err;
+		preprocessor.init_lookup_tables(cpp);
+		preprocessor.init_default_macros(cpp);
+		cpp.include_paths = {"my/path/to/include"};
+
+		tok := tokenizer.tokenize_file(t, "the/source/file.c", 1);
+
+		tok = preprocessor.preprocess(cpp, tok);
+		if tok != nil {
+			for t := tok; t.kind != .EOF; t = t.next {
+				fmt.println(t.lit);
+			}
 		}
-	}
 
-	fmt.println("[Done]");
-}
+		fmt.println("[Done]");
+	}
 */
-
-
 package c_frontend_tokenizer
-
-

+ 1 - 1
core/compress/gzip/example.odin

@@ -1,5 +1,5 @@
 //+build ignore
-package gzip
+package compress_gzip
 
 /*
 	Copyright 2021 Jeroen van Rijn <[email protected]>.

+ 1 - 1
core/compress/gzip/gzip.odin

@@ -1,4 +1,4 @@
-package gzip
+package compress_gzip
 
 /*
 	Copyright 2021 Jeroen van Rijn <[email protected]>.

+ 1 - 1
core/compress/shoco/model.odin

@@ -5,7 +5,7 @@
 */
 
 // package shoco is an implementation of the shoco short string compressor
-package shoco
+package compress_shoco
 
 DEFAULT_MODEL :: Shoco_Model {
 	min_char = 39,

+ 1 - 1
core/compress/shoco/shoco.odin

@@ -9,7 +9,7 @@
 */
 
 // package shoco is an implementation of the shoco short string compressor
-package shoco
+package compress_shoco
 
 import "base:intrinsics"
 import "core:compress"

+ 1 - 1
core/compress/zlib/example.odin

@@ -1,5 +1,5 @@
 //+build ignore
-package zlib
+package compress_zlib
 
 /*
 	Copyright 2021 Jeroen van Rijn <[email protected]>.

+ 1 - 1
core/compress/zlib/zlib.odin

@@ -1,5 +1,5 @@
 //+vet !using-param
-package zlib
+package compress_zlib
 
 /*
 	Copyright 2021 Jeroen van Rijn <[email protected]>.

+ 1 - 1
core/container/bit_array/bit_array.odin

@@ -1,4 +1,4 @@
-package dynamic_bit_array
+package container_dynamic_bit_array
 
 import "base:intrinsics"
 import "core:mem"

+ 37 - 38
core/container/bit_array/doc.odin

@@ -1,53 +1,52 @@
-package dynamic_bit_array
-
 /*
-	The Bit Array can be used in several ways:
+The Bit Array can be used in several ways:
 
-	-- By default you don't need to instantiate a Bit Array:
+- By default you don't need to instantiate a Bit Array:
 
-		package test
+	package test
 
-		import "core:fmt"
-		import "core:container/bit_array"
+	import "core:fmt"
+	import "core:container/bit_array"
 
-		main :: proc() {
-			using bit_array
+	main :: proc() {
+		using bit_array
 
-			bits: Bit_Array
+		bits: Bit_Array
 
-			// returns `true`
-			fmt.println(set(&bits, 42))
+		// returns `true`
+		fmt.println(set(&bits, 42))
 
-			// returns `false`, `false`, because this Bit Array wasn't created to allow negative indices.
-			was_set, was_retrieved := get(&bits, -1)
-			fmt.println(was_set, was_retrieved) 
-			destroy(&bits)
-		}
+		// returns `false`, `false`, because this Bit Array wasn't created to allow negative indices.
+		was_set, was_retrieved := get(&bits, -1)
+		fmt.println(was_set, was_retrieved) 
+		destroy(&bits)
+	}
 
-	-- A Bit Array can optionally allow for negative indices, if the mininum value was given during creation:
+- A Bit Array can optionally allow for negative indices, if the minimum value was given during creation:
 
-		package test
+	package test
 
-		import "core:fmt"
-		import "core:container/bit_array"
+	import "core:fmt"
+	import "core:container/bit_array"
 
-		main :: proc() {
-			Foo :: enum int {
-				Negative_Test = -42,
-				Bar           = 420,
-				Leaves        = 69105,
-			}
+	main :: proc() {
+		Foo :: enum int {
+			Negative_Test = -42,
+			Bar           = 420,
+			Leaves        = 69105,
+		}
 
-			using bit_array
+		using bit_array
 
-			bits := create(int(max(Foo)), int(min(Foo)))
-			defer destroy(bits)
+		bits := create(int(max(Foo)), int(min(Foo)))
+		defer destroy(bits)
 
-			fmt.printf("Set(Bar):           %v\n",     set(bits, Foo.Bar))
-			fmt.printf("Get(Bar):           %v, %v\n", get(bits, Foo.Bar))
-			fmt.printf("Set(Negative_Test): %v\n",     set(bits, Foo.Negative_Test))
-			fmt.printf("Get(Leaves):        %v, %v\n", get(bits, Foo.Leaves))
-			fmt.printf("Get(Negative_Test): %v, %v\n", get(bits, Foo.Negative_Test))
-			fmt.printf("Freed.\n")
-		}
-*/
+		fmt.printf("Set(Bar):           %v\n",     set(bits, Foo.Bar))
+		fmt.printf("Get(Bar):           %v, %v\n", get(bits, Foo.Bar))
+		fmt.printf("Set(Negative_Test): %v\n",     set(bits, Foo.Negative_Test))
+		fmt.printf("Get(Leaves):        %v, %v\n", get(bits, Foo.Leaves))
+		fmt.printf("Get(Negative_Test): %v, %v\n", get(bits, Foo.Negative_Test))
+		fmt.printf("Freed.\n")
+	}
+*/
+package container_dynamic_bit_array

+ 3 - 6
core/crypto/crypto.odin

@@ -49,15 +49,12 @@ compare_byte_ptrs_constant_time :: proc "contextless" (a, b: ^byte, n: int) -> i
 // the system entropy source.  This routine will block if the system entropy
 // source is not ready yet.  All system entropy source failures are treated
 // as catastrophic, resulting in a panic.
+//
+// Support for the system entropy source can be checked with the
+// `HAS_RAND_BYTES` boolean constant.
 rand_bytes :: proc (dst: []byte) {
 	// zero-fill the buffer first
 	mem.zero_explicit(raw_data(dst), len(dst))
 
 	_rand_bytes(dst)
 }
-
-// has_rand_bytes returns true iff the target has support for accessing the
-// system entropty source.
-has_rand_bytes :: proc () -> bool {
-	return _has_rand_bytes()
-}

+ 3 - 4
core/crypto/rand_bsd.odin

@@ -3,14 +3,13 @@ package crypto
 
 foreign import libc "system:c"
 
+HAS_RAND_BYTES :: true
+
 foreign libc {
 	arc4random_buf :: proc(buf: [^]byte, nbytes: uint) ---
 }
 
+@(private)
 _rand_bytes :: proc(dst: []byte) {
 	arc4random_buf(raw_data(dst), len(dst))
 }
-
-_has_rand_bytes :: proc () -> bool {
-	return true
-}

+ 10 - 9
core/crypto/rand_darwin.odin

@@ -1,16 +1,17 @@
 package crypto
 
 import "core:fmt"
-import "core:sys/darwin"
 
+import CF "core:sys/darwin/CoreFoundation"
+import Sec "core:sys/darwin/Security"
+
+HAS_RAND_BYTES :: true
+
+@(private)
 _rand_bytes :: proc(dst: []byte) {
-	res := darwin.SecRandomCopyBytes(count=len(dst), bytes=raw_data(dst))
-	if res != .Success {
-		msg := darwin.CFStringCopyToOdinString(darwin.SecCopyErrorMessageString(res))
-		panic(fmt.tprintf("crypto/rand_bytes: SecRandomCopyBytes returned non-zero result: %v %s", res, msg))
+	err := Sec.RandomCopyBytes(count=len(dst), bytes=raw_data(dst))
+	if err != .Success {
+        msg := CF.StringCopyToOdinString(Sec.CopyErrorMessageString(err))
+        panic(fmt.tprintf("crypto/rand_bytes: SecRandomCopyBytes returned non-zero result: %v %s", err, msg))
 	}
 }
-
-_has_rand_bytes :: proc () -> bool {
-	return true
-}

+ 3 - 4
core/crypto/rand_generic.odin

@@ -7,10 +7,9 @@
 //+build !js
 package crypto
 
+HAS_RAND_BYTES :: false
+
+@(private)
 _rand_bytes :: proc(dst: []byte) {
 	unimplemented("crypto: rand_bytes not supported on this OS")
 }
-
-_has_rand_bytes :: proc () -> bool {
-	return false
-}

+ 4 - 4
core/crypto/rand_js.odin

@@ -6,8 +6,12 @@ foreign odin_env {
 	env_rand_bytes :: proc "contextless" (buf: []byte) ---
 }
 
+HAS_RAND_BYTES :: true
+
+@(private)
 _MAX_PER_CALL_BYTES :: 65536 // 64kiB
 
+@(private)
 _rand_bytes :: proc(dst: []byte) {
 	dst := dst
 
@@ -18,7 +22,3 @@ _rand_bytes :: proc(dst: []byte) {
 		dst = dst[to_read:]
 	}
 }
-
-_has_rand_bytes :: proc () -> bool {
-	return true
-}

+ 4 - 4
core/crypto/rand_linux.odin

@@ -4,8 +4,12 @@ import "core:fmt"
 
 import "core:sys/linux"
 
+HAS_RAND_BYTES :: true
+
+@(private)
 _MAX_PER_CALL_BYTES :: 33554431 // 2^25 - 1
 
+@(private)
 _rand_bytes :: proc (dst: []byte) {
 	dst := dst
 	l := len(dst)
@@ -34,7 +38,3 @@ _rand_bytes :: proc (dst: []byte) {
 		dst = dst[n_read:]
 	}
 }
-
-_has_rand_bytes :: proc () -> bool {
-	return true
-}

+ 3 - 4
core/crypto/rand_windows.odin

@@ -4,6 +4,9 @@ import win32 "core:sys/windows"
 import "core:os"
 import "core:fmt"
 
+HAS_RAND_BYTES :: true
+
+@(private)
 _rand_bytes :: proc(dst: []byte) {
 	ret := (os.Errno)(win32.BCryptGenRandom(nil, raw_data(dst), u32(len(dst)), win32.BCRYPT_USE_SYSTEM_PREFERRED_RNG))
 	if ret != os.ERROR_NONE {
@@ -21,7 +24,3 @@ _rand_bytes :: proc(dst: []byte) {
 		}
 	}
 }
-
-_has_rand_bytes :: proc () -> bool {
-	return true
-}

+ 51 - 0
core/debug/trace/doc.odin

@@ -0,0 +1,51 @@
+/*
+A debug stack trace library. Only works when debug symbols are enabled `-debug`.
+
+Example:
+	import "base:runtime"
+	import "core:debug/trace"
+
+	import "core:fmt"
+
+	global_trace_ctx: trace.Context
+
+	debug_trace_assertion_failure_proc :: proc(prefix, message: string, loc := #caller_location) -> ! {
+		runtime.print_caller_location(loc)
+		runtime.print_string(" ")
+		runtime.print_string(prefix)
+		if len(message) > 0 {
+			runtime.print_string(": ")
+			runtime.print_string(message)
+		}
+		runtime.print_byte('\n')
+
+		ctx := &trace_ctx
+		if !trace.in_resolve(ctx) {
+			buf: [64]trace.Frame
+			runtime.print_string("Debug Trace:\n")
+			frames := trace.frames(ctx, 1, buf[:])
+			for f, i in frames {
+				fl := trace.resolve(ctx, f, context.temp_allocator)
+				if fl.loc.file_path == "" && fl.loc.line == 0 {
+					continue
+				}
+				runtime.print_caller_location(fl.loc)
+				runtime.print_string(" - frame ")
+				runtime.print_int(i)
+				runtime.print_byte('\n')
+			}
+		}
+		runtime.trap()
+	}
+
+	main :: proc() {
+		trace.init(&global_trace_ctx)
+		defer trace.destroy(&global_trace_ctx)
+
+		context.assertion_failure_proc = debug_trace_assertion_failure_proc
+
+		...
+	}
+
+*/
+package debug_trace

+ 47 - 0
core/debug/trace/trace.odin

@@ -0,0 +1,47 @@
+package debug_trace
+
+import "base:intrinsics"
+import "base:runtime"
+
+Frame :: distinct uintptr
+
+Frame_Location :: struct {
+	using loc: runtime.Source_Code_Location,
+	allocator: runtime.Allocator,
+}
+
+delete_frame_location :: proc(fl: Frame_Location) -> runtime.Allocator_Error {
+	allocator := fl.allocator
+	delete(fl.loc.procedure, allocator) or_return
+	delete(fl.loc.file_path, allocator) or_return
+	return nil
+}
+
+Context :: struct {
+	in_resolve: bool, // atomic
+	impl: _Context,
+}
+
+init :: proc(ctx: ^Context) -> bool {
+	return _init(ctx)
+}
+
+destroy :: proc(ctx: ^Context) -> bool {
+	return _destroy(ctx)
+}
+
+@(require_results)
+frames :: proc(ctx: ^Context, skip: uint, frames_buffer: []Frame) -> []Frame {
+	return _frames(ctx, skip, frames_buffer)
+}
+
+@(require_results)
+resolve :: proc(ctx: ^Context, frame: Frame, allocator: runtime.Allocator) -> (result: Frame_Location) {
+	return _resolve(ctx, frame, allocator)
+}
+
+
+@(require_results)
+in_resolve :: proc "contextless" (ctx: ^Context) -> bool {
+	return intrinsics.atomic_load(&ctx.in_resolve)
+}

+ 195 - 0
core/debug/trace/trace_cpp.odin

@@ -0,0 +1,195 @@
+//+private file
+//+build linux, darwin
+package debug_trace
+
+import "base:intrinsics"
+import "base:runtime"
+import "core:strings"
+import "core:fmt"
+import "core:c"
+
+// NOTE: Relies on C++23 which adds <stacktrace> and becomes ABI and that can be used
+foreign import stdcpplibbacktrace "system:stdc++_libbacktrace"
+
+foreign import libdl "system:dl"
+
+backtrace_state :: struct {}
+backtrace_error_callback   :: proc "c" (data: rawptr, msg: cstring, errnum: c.int)
+backtrace_simple_callback  :: proc "c" (data: rawptr, pc: uintptr) -> c.int
+backtrace_full_callback    :: proc "c" (data: rawptr, pc: uintptr, filename: cstring, lineno: c.int, function: cstring) -> c.int
+backtrace_syminfo_callback :: proc "c" (data: rawptr, pc: uintptr, symname: cstring, symval: uintptr, symsize: uintptr)
+
+@(default_calling_convention="c", link_prefix="__glibcxx_")
+foreign stdcpplibbacktrace {
+	backtrace_create_state :: proc(
+		filename:       cstring,
+		threaded:       c.int,
+		error_callback: backtrace_error_callback,
+		data:           rawptr,
+	) -> ^backtrace_state ---
+	backtrace_simple  :: proc(
+		state:          ^backtrace_state,
+		skip:           c.int,
+		callback:       backtrace_simple_callback,
+		error_callback: backtrace_error_callback,
+		data:           rawptr,
+	) -> c.int ---
+	backtrace_pcinfo  :: proc(
+		state:          ^backtrace_state,
+		pc:             uintptr,
+		callback:       backtrace_full_callback,
+		error_callback: backtrace_error_callback,
+		data:           rawptr,
+	) -> c.int ---
+	backtrace_syminfo :: proc(
+		state:          ^backtrace_state,
+		addr:           uintptr,
+		callback:       backtrace_syminfo_callback,
+		error_callback: backtrace_error_callback,
+		data:           rawptr,
+	) -> c.int ---
+
+	// NOTE(bill): this is technically an internal procedure, but it is exposed
+	backtrace_free    :: proc(
+		state: ^backtrace_state,
+		p:              rawptr,
+		size:           c.size_t,                 // unused
+		error_callback: backtrace_error_callback, // unused
+		data:           rawptr,                   // unused
+		) ---
+}
+
+Dl_info :: struct {
+	dli_fname: cstring,
+	dli_fbase: rawptr,
+	dli_sname: cstring,
+	dli_saddr: rawptr,
+}
+
+@(default_calling_convention="c")
+foreign libdl {
+	dladdr :: proc(addr: rawptr, info: ^Dl_info) -> c.int ---
+}
+
+@(private="package")
+_Context :: struct {
+	state: ^backtrace_state,
+}
+
+@(private="package")
+_init :: proc(ctx: ^Context) -> (ok: bool) {
+	defer if !ok do destroy(ctx)
+
+	ctx.impl.state = backtrace_create_state("odin-debug-trace", 1, nil, ctx)
+	return ctx.impl.state != nil
+}
+
+@(private="package")
+_destroy :: proc(ctx: ^Context) -> bool {
+	if ctx != nil {
+		backtrace_free(ctx.impl.state, nil, 0, nil, nil)
+	}
+	return true
+}
+
+@(private="package")
+_frames :: proc "contextless" (ctx: ^Context, skip: uint, frames_buffer: []Frame) -> (frames: []Frame) {
+	Backtrace_Context :: struct {
+		ctx:         ^Context,
+		frames:      []Frame,
+		frame_count: int,
+	}
+
+	btc := &Backtrace_Context{
+		ctx = ctx,
+		frames = frames_buffer,
+	}
+	backtrace_simple(
+		ctx.impl.state,
+		c.int(skip + 2),
+		proc "c" (user: rawptr, address: uintptr) -> c.int {
+			btc := (^Backtrace_Context)(user)
+			address := Frame(address)
+			if address == 0 {
+				return 1
+			}
+			if btc.frame_count == len(btc.frames) {
+				return 1
+			}
+			btc.frames[btc.frame_count] = address
+			btc.frame_count += 1
+			return 0
+		},
+		nil,
+		btc,
+	)
+
+	if btc.frame_count > 0 {
+		frames = btc.frames[:btc.frame_count]
+	}
+	return
+}
+
+@(private="package")
+_resolve :: proc(ctx: ^Context, frame: Frame, allocator: runtime.Allocator) -> Frame_Location {
+	intrinsics.atomic_store(&ctx.in_resolve, true)
+	defer intrinsics.atomic_store(&ctx.in_resolve, false)
+
+	Backtrace_Context :: struct {
+		rt_ctx:    runtime.Context,
+		allocator: runtime.Allocator,
+		frame:     Frame_Location,
+	}
+
+	btc := &Backtrace_Context{
+		rt_ctx = context,
+		allocator = allocator,
+	}
+	done := backtrace_pcinfo(
+		ctx.impl.state,
+		uintptr(frame),
+		proc "c" (data: rawptr, address: uintptr, file: cstring, line: c.int, symbol: cstring) -> c.int {
+			btc := (^Backtrace_Context)(data)
+			context = btc.rt_ctx
+
+			frame := &btc.frame
+
+			if file != nil {
+				frame.file_path = strings.clone_from_cstring(file, btc.allocator)
+			} else if info: Dl_info; dladdr(rawptr(address), &info) != 0 && info.dli_fname != "" {
+				frame.file_path = strings.clone_from_cstring(info.dli_fname, btc.allocator)
+			}
+			if symbol != nil {
+				frame.procedure = strings.clone_from_cstring(symbol, btc.allocator)
+			} else if info: Dl_info; dladdr(rawptr(address), &info) != 0 && info.dli_sname != "" {
+				frame.procedure = strings.clone_from_cstring(info.dli_sname, btc.allocator)
+			} else {
+				frame.procedure = fmt.aprintf("(procedure: 0x%x)", allocator=btc.allocator)
+			}
+			frame.line = i32(line)
+			return 0
+		},
+		nil,
+		btc,
+	)
+	if done != 0 {
+		return btc.frame
+	}
+
+	// NOTE(bill): pcinfo cannot resolve, but it might be possible to get the procedure name at least
+	backtrace_syminfo(
+		ctx.impl.state,
+		uintptr(frame),
+		proc "c" (data: rawptr, address: uintptr, symbol: cstring, _ignore0, _ignore1: uintptr) {
+			if symbol != nil {
+				btc := (^Backtrace_Context)(data)
+				context = btc.rt_ctx
+				btc.frame.procedure = strings.clone_from_cstring(symbol, btc.allocator)
+			}
+		},
+		nil,
+		btc,
+	)
+
+	return btc.frame
+}

+ 18 - 0
core/debug/trace/trace_nil.odin

@@ -0,0 +1,18 @@
+//+build !windows !linux !darwin
+package debug_trace
+
+_Context :: struct {
+}
+
+_init :: proc(ctx: ^Context) -> (ok: bool) {
+	return true
+}
+_destroy :: proc(ctx: ^Context) -> bool {
+	return true
+}
+_frames :: proc(ctx: ^Context, skip: uint, allocator: runtime.Allocator) -> []Frame {
+	return nil
+}
+_resolve :: proc(ctx: ^Context, frame: Frame, allocator: runtime.Allocator) -> (result: runtime.Source_Code_Location) {
+	return
+}

+ 68 - 0
core/debug/trace/trace_windows.odin

@@ -0,0 +1,68 @@
+//+private
+//+build windows
+package debug_trace
+
+import "base:intrinsics"
+import "base:runtime"
+
+import win32 "core:sys/windows"
+import "core:fmt"
+
+_Context :: struct {
+	hProcess: win32.HANDLE,
+	lock:     win32.SRWLOCK,
+}
+
+_init :: proc "contextless" (ctx: ^Context) -> (ok: bool) {
+	defer if !ok { _destroy(ctx) }
+	ctx.impl.hProcess = win32.GetCurrentProcess()
+	win32.SymInitialize(ctx.impl.hProcess, nil, true) or_return
+	win32.SymSetOptions(win32.SYMOPT_LOAD_LINES)
+	return true
+}
+
+_destroy :: proc "contextless" (ctx: ^Context) -> bool {
+	if ctx != nil {
+		win32.SymCleanup(ctx.impl.hProcess)
+	}
+	return true
+}
+
+_frames :: proc "contextless" (ctx: ^Context, skip: uint, frames_buffer: []Frame) -> []Frame {
+	frame_count := win32.RtlCaptureStackBackTrace(u32(skip) + 2, u32(len(frames_buffer)), ([^]rawptr)(&frames_buffer[0]), nil)
+	for i in 0..<frame_count {
+		// NOTE: Return address is one after the call instruction so subtract a byte to
+		// end up back inside the call instruction which is needed for SymFromAddr.
+		frames_buffer[i] -= 1
+	}
+	return frames_buffer[:frame_count]
+}
+
+
+_resolve :: proc(ctx: ^Context, frame: Frame, allocator: runtime.Allocator) -> (fl: Frame_Location) {
+	intrinsics.atomic_store(&ctx.in_resolve, true)
+	defer intrinsics.atomic_store(&ctx.in_resolve, false)
+
+	// NOTE(bill): Dbghelp is not thread-safe
+	win32.AcquireSRWLockExclusive(&ctx.impl.lock)
+	defer win32.ReleaseSRWLockExclusive(&ctx.impl.lock)
+
+	data: [size_of(win32.SYMBOL_INFOW) + size_of([256]win32.WCHAR)]byte
+	symbol := (^win32.SYMBOL_INFOW)(&data[0])
+	symbol.SizeOfStruct = size_of(symbol)
+	symbol.MaxNameLen = 255
+	if win32.SymFromAddrW(ctx.impl.hProcess, win32.DWORD64(frame), &{}, symbol) {
+		fl.procedure, _ = win32.wstring_to_utf8(&symbol.Name[0], -1, allocator)
+	} else {
+		fl.procedure = fmt.aprintf("(procedure: 0x%x)", frame, allocator=allocator)
+	}
+
+	line: win32.IMAGEHLP_LINE64
+	line.SizeOfStruct = size_of(line)
+	if win32.SymGetLineFromAddrW64(ctx.impl.hProcess, win32.DWORD64(frame), &{}, &line) {
+		fl.file_path, _ = win32.wstring_to_utf8(line.FileName, -1, allocator)
+		fl.line = i32(line.LineNumber)
+	}
+
+	return
+}

+ 2 - 3
core/dynlib/doc.odin

@@ -1,6 +1,5 @@
-//+build ignore
 /*
-Package core:dynlib implements loading of shared libraries/DLLs and their symbols.
+Package `core:dynlib` implements loading of shared libraries/DLLs and their symbols.
 
 The behaviour of dynamically loaded libraries is specific to the target platform of the program.
 For in depth detail on the underlying behaviour please refer to your target platform's documentation.
@@ -8,4 +7,4 @@ For in depth detail on the underlying behaviour please refer to your target plat
 See `example` directory for an example library exporting 3 symbols and a host program loading them automatically
 by defining a symbol table struct.
 */
-package dynlib
+package dynlib

+ 1 - 1
core/dynlib/lib.odin

@@ -135,7 +135,7 @@ initialize_symbols :: proc(
 	prefixed_symbol_buf: [2048]u8 = ---
 
 	count = 0
-	for field, i in reflect.struct_fields_zipped(T) {
+	for field in reflect.struct_fields_zipped(T) {
 		// Calculate address of struct member
 		field_ptr := rawptr(uintptr(symbol_table) + field.offset)
 

+ 1 - 1
core/encoding/base32/base32.odin

@@ -1,4 +1,4 @@
-package base32
+package encoding_base32
 
 // @note(zh): Encoding utility for Base32
 // A secondary param can be used to supply a custom alphabet to

+ 125 - 48
core/encoding/base64/base64.odin

@@ -1,4 +1,8 @@
-package base64
+package encoding_base64
+
+import "core:io"
+import "core:mem"
+import "core:strings"
 
 // @note(zh): Encoding utility for Base64
 // A secondary param can be used to supply a custom alphabet to
@@ -39,59 +43,132 @@ DEC_TABLE := [128]int {
     49, 50, 51, -1, -1, -1, -1, -1,
 }
 
-encode :: proc(data: []byte, ENC_TBL := ENC_TABLE, allocator := context.allocator) -> string #no_bounds_check {
-    length := len(data)
-    if length == 0 {
-        return ""
-    }
+encode :: proc(data: []byte, ENC_TBL := ENC_TABLE, allocator := context.allocator) -> (encoded: string, err: mem.Allocator_Error) #optional_allocator_error {
+	out_length := encoded_len(data)
+	if out_length == 0 {
+		return
+	}
+
+	out   := strings.builder_make(0, out_length, allocator) or_return
+	ioerr := encode_into(strings.to_stream(&out), data, ENC_TBL)
+
+	assert(ioerr == nil,                           "string builder should not IO error")
+	assert(strings.builder_cap(out) == out_length, "buffer resized, `encoded_len` was wrong")
+
+	return strings.to_string(out), nil
+}
+
+encode_into :: proc(w: io.Writer, data: []byte, ENC_TBL := ENC_TABLE) -> io.Error {
+	length := len(data)
+	if length == 0 {
+		return nil
+	}
+
+	c0, c1, c2, block: int
+	out: [4]byte
+	for i := 0; i < length; i += 3 {
+		#no_bounds_check {
+			c0, c1, c2 = int(data[i]), -1, -1
+
+			if i + 1 < length { c1 = int(data[i + 1]) }
+			if i + 2 < length { c2 = int(data[i + 2]) }
+
+			block = (c0 << 16) | (max(c1, 0) << 8) | max(c2, 0)
+			
+			out[0] = ENC_TBL[block >> 18 & 63]
+			out[1] = ENC_TBL[block >> 12 & 63]
+			out[2] = c1 == -1 ? PADDING : ENC_TBL[block >> 6 & 63]
+			out[3] = c2 == -1 ? PADDING : ENC_TBL[block & 63]
+		}
+		io.write_full(w, out[:]) or_return
+	}
+	return nil
+}
 
-    out_length := ((4 * length / 3) + 3) &~ 3
-    out := make([]byte, out_length, allocator)
+encoded_len :: proc(data: []byte) -> int {
+	length := len(data)
+	if length == 0 {
+		return 0
+	}
 
-    c0, c1, c2, block: int
+	return ((4 * length / 3) + 3) &~ 3
+}
 
-    for i, d := 0, 0; i < length; i, d = i + 3, d + 4 {
-        c0, c1, c2 = int(data[i]), -1, -1
+decode :: proc(data: string, DEC_TBL := DEC_TABLE, allocator := context.allocator) -> (decoded: []byte, err: mem.Allocator_Error) #optional_allocator_error {
+	out_length := decoded_len(data)
 
-        if i + 1 < length { c1 = int(data[i + 1]) }
-        if i + 2 < length { c2 = int(data[i + 2]) }
+	out   := strings.builder_make(0, out_length, allocator) or_return
+	ioerr := decode_into(strings.to_stream(&out), data, DEC_TBL)
 
-        block = (c0 << 16) | (max(c1, 0) << 8) | max(c2, 0)
+	assert(ioerr == nil,                           "string builder should not IO error")
+	assert(strings.builder_cap(out) == out_length, "buffer resized, `decoded_len` was wrong")
+
+	return out.buf[:], nil
+}
 
-        out[d]     = ENC_TBL[block >> 18 & 63]
-        out[d + 1] = ENC_TBL[block >> 12 & 63]
-        out[d + 2] = c1 == -1 ? PADDING : ENC_TBL[block >> 6 & 63]
-        out[d + 3] = c2 == -1 ? PADDING : ENC_TBL[block & 63]
-    }
-    return string(out)
+decode_into :: proc(w: io.Writer, data: string, DEC_TBL := DEC_TABLE) -> io.Error {
+	length := decoded_len(data)
+	if length == 0 {
+		return nil
+	}
+
+	c0, c1, c2, c3: int
+	b0, b1, b2: int
+	buf: [3]byte
+	i, j: int
+	for ; j + 3 <= length; i, j = i + 4, j + 3 {
+		#no_bounds_check {
+			c0 = DEC_TBL[data[i]]
+			c1 = DEC_TBL[data[i + 1]]
+			c2 = DEC_TBL[data[i + 2]]
+			c3 = DEC_TBL[data[i + 3]]
+
+			b0 = (c0 << 2) | (c1 >> 4)
+			b1 = (c1 << 4) | (c2 >> 2)
+			b2 = (c2 << 6) | c3
+
+			buf[0] = byte(b0)
+			buf[1] = byte(b1)
+			buf[2] = byte(b2)
+		}
+
+		io.write_full(w, buf[:]) or_return
+	}
+
+	rest := length - j
+	if rest > 0 {
+		#no_bounds_check {
+			c0 = DEC_TBL[data[i]]
+			c1 = DEC_TBL[data[i + 1]]
+			c2 = DEC_TBL[data[i + 2]]
+
+			b0 = (c0 << 2) | (c1 >> 4)
+			b1 = (c1 << 4) | (c2 >> 2)
+		}
+
+		switch rest {
+		case 1: io.write_byte(w, byte(b0))             or_return
+		case 2: io.write_full(w, {byte(b0), byte(b1)}) or_return
+		}
+	}
+
+	return nil
 }
 
-decode :: proc(data: string, DEC_TBL := DEC_TABLE, allocator := context.allocator) -> []byte #no_bounds_check {
-    length := len(data)
-    if length == 0 {
-        return nil
-    }
-
-    pad_count := data[length - 1] == PADDING ? (data[length - 2] == PADDING ? 2 : 1) : 0
-    out_length := ((length * 6) >> 3) - pad_count
-    out := make([]byte, out_length, allocator)
-
-    c0, c1, c2, c3: int
-    b0, b1, b2: int
-
-    for i, j := 0, 0; i < length; i, j = i + 4, j + 3 {
-        c0 = DEC_TBL[data[i]]
-        c1 = DEC_TBL[data[i + 1]]
-        c2 = DEC_TBL[data[i + 2]]
-        c3 = DEC_TBL[data[i + 3]]
-
-        b0 = (c0 << 2) | (c1 >> 4)
-        b1 = (c1 << 4) | (c2 >> 2)
-        b2 = (c2 << 6) | c3
-
-        out[j]     = byte(b0)
-        out[j + 1] = byte(b1)
-        out[j + 2] = byte(b2)
-    }
-    return out
+decoded_len :: proc(data: string) -> int {
+	length := len(data)
+	if length == 0 {
+		return 0
+	}
+
+	padding: int
+	if data[length - 1] == PADDING {
+		if length > 1 && data[length - 2] == PADDING {
+			padding = 2
+		} else {
+			padding = 1
+		}
+	}
+
+	return ((length * 6) >> 3) - padding
 }

+ 673 - 0
core/encoding/cbor/cbor.odin

@@ -0,0 +1,673 @@
+package encoding_cbor
+
+import "base:intrinsics"
+
+import "core:encoding/json"
+import "core:io"
+import "core:mem"
+import "core:strconv"
+import "core:strings"
+
+// If we are decoding a stream of either a map or list, the initial capacity will be this value.
+INITIAL_STREAMED_CONTAINER_CAPACITY :: 8
+
+// If we are decoding a stream of either text or bytes, the initial capacity will be this value.
+INITIAL_STREAMED_BYTES_CAPACITY :: 16
+
+// The default maximum amount of bytes to allocate on a buffer/container at once to prevent
+// malicious input from causing massive allocations.
+DEFAULT_MAX_PRE_ALLOC :: mem.Kilobyte
+
+// Known/common headers are defined, undefined headers can still be valid.
+// Higher 3 bits is for the major type and lower 5 bits for the additional information.
+Header :: enum u8 {
+	U8  = (u8(Major.Unsigned) << 5) | u8(Add.One_Byte),
+	U16 = (u8(Major.Unsigned) << 5) | u8(Add.Two_Bytes),
+	U32 = (u8(Major.Unsigned) << 5) | u8(Add.Four_Bytes),
+	U64 = (u8(Major.Unsigned) << 5) | u8(Add.Eight_Bytes),
+
+	Neg_U8  = (u8(Major.Negative) << 5) | u8(Add.One_Byte),
+	Neg_U16 = (u8(Major.Negative) << 5) | u8(Add.Two_Bytes),
+	Neg_U32 = (u8(Major.Negative) << 5) | u8(Add.Four_Bytes),
+	Neg_U64 = (u8(Major.Negative) << 5) | u8(Add.Eight_Bytes),
+
+	False = (u8(Major.Other) << 5) | u8(Add.False),
+	True  = (u8(Major.Other) << 5) | u8(Add.True),
+
+	Nil       = (u8(Major.Other) << 5) | u8(Add.Nil),
+	Undefined = (u8(Major.Other) << 5) | u8(Add.Undefined),
+
+	Simple = (u8(Major.Other) << 5) | u8(Add.One_Byte),
+
+	F16 = (u8(Major.Other) << 5) | u8(Add.Two_Bytes),
+	F32 = (u8(Major.Other) << 5) | u8(Add.Four_Bytes),
+	F64 = (u8(Major.Other) << 5) | u8(Add.Eight_Bytes),
+
+	Break = (u8(Major.Other) << 5) | u8(Add.Break),
+}
+
+// The higher 3 bits of the header which denotes what type of value it is.
+Major :: enum u8 {
+	Unsigned,
+	Negative,
+	Bytes,
+	Text,
+	Array,
+	Map,
+	Tag,
+	Other,
+}
+
+// The lower 3 bits of the header which denotes additional information for the type of value.
+Add :: enum u8 {
+	False     = 20,
+	True      = 21,
+	Nil       = 22,
+	Undefined = 23,
+
+	One_Byte    = 24,
+	Two_Bytes   = 25,
+	Four_Bytes  = 26,
+	Eight_Bytes = 27,
+
+	Length_Unknown = 31,
+	Break          = Length_Unknown,
+}
+
+Value :: union {
+	u8,
+	u16,
+	u32,
+	u64,
+
+	Negative_U8,
+	Negative_U16,
+	Negative_U32,
+	Negative_U64,
+	
+	// Pointers so the size of the Value union stays small.
+	^Bytes,
+	^Text,
+	^Array,
+	^Map,
+	^Tag,
+
+	Simple,
+	f16,
+	f32,
+	f64,
+	bool,
+	Undefined,
+	Nil,
+}
+
+Bytes :: []byte
+Text :: string
+
+Array :: []Value
+
+Map :: []Map_Entry
+Map_Entry :: struct {
+	key:   Value, // Can be any unsigned, negative, float, Simple, bool, Text.
+	value: Value,
+}
+
+Tag :: struct {
+	number: Tag_Number,
+	value:  Value, // Value based on the number.
+}
+
+Tag_Number :: u64
+
+Nil       :: distinct rawptr
+Undefined :: distinct rawptr
+
+// A distinct atom-like number, range from `0..=19` and `32..=max(u8)`.
+Simple :: distinct u8
+Atom   :: Simple
+
+Unmarshal_Error :: union #shared_nil {
+	io.Error,
+	mem.Allocator_Error,
+	Decode_Data_Error,
+	Unmarshal_Data_Error,
+	Maybe(Unsupported_Type_Error),
+}
+
+Marshal_Error :: union #shared_nil {
+	io.Error,
+	mem.Allocator_Error,
+	Encode_Data_Error,
+	Marshal_Data_Error,
+	Maybe(Unsupported_Type_Error),
+}
+
+Decode_Error :: union #shared_nil {
+	io.Error,
+	mem.Allocator_Error,
+	Decode_Data_Error,
+}
+
+Encode_Error :: union #shared_nil {
+	io.Error,
+	mem.Allocator_Error,
+	Encode_Data_Error,
+}
+
+Decode_Data_Error :: enum {
+	None,
+	Bad_Major,                // An invalid major type was encountered.
+	Bad_Argument,             // A general unexpected value (most likely invalid additional info in header).
+	Bad_Tag_Value,            // When the type of value for the given tag is not valid.
+	Nested_Indefinite_Length, // When an streamed/indefinite length container nests another, this is not allowed.
+	Nested_Tag,               // When a tag's value is another tag, this is not allowed.
+	Length_Too_Big,           // When the length of a container (map, array, bytes, string) is more than `max(int)`.
+	Disallowed_Streaming,     // When the `.Disallow_Streaming` flag is set and a streaming header is encountered.
+	Break,                    // When the `break` header was found without any stream to break off.
+}
+
+Encode_Data_Error :: enum {
+	None,
+	Invalid_Simple, // When a simple is being encoded that is out of the range `0..=19` and `32..=max(u8)`.
+	Int_Too_Big,    // When an int is being encoded that is larger than `max(u64)` or smaller than `min(u64)`.
+	Bad_Tag_Value,  // When the type of value is not supported by the tag implementation.
+}
+
+Unmarshal_Data_Error :: enum {
+	None,
+	Invalid_Parameter,     // When the given `any` can not be unmarshalled into.
+	Non_Pointer_Parameter, // When the given `any` is not a pointer.
+}
+
+Marshal_Data_Error :: enum {
+	None,
+	Invalid_CBOR_Tag, // When the struct tag `cbor_tag:""` is not a registered name or number.
+}
+
+// Error that is returned when a type couldn't be marshalled into or out of, as much information
+// as possible/available is added.
+Unsupported_Type_Error :: struct {
+	id:  typeid,
+	hdr: Header,
+	add: Add,
+}
+
+_unsupported :: proc(v: any, hdr: Header, add: Add = nil) -> Maybe(Unsupported_Type_Error) {
+	return Unsupported_Type_Error{
+		id = v.id,
+		hdr = hdr,
+		add = add,
+	}
+}
+
+// Actual value is `-1 - x` (be careful of overflows).
+
+Negative_U8  :: distinct u8
+Negative_U16 :: distinct u16
+Negative_U32 :: distinct u32
+Negative_U64 :: distinct u64
+
+// Turns the CBOR negative unsigned int type into a signed integer type.
+negative_to_int :: proc {
+	negative_u8_to_int,
+	negative_u16_to_int,
+	negative_u32_to_int,
+	negative_u64_to_int,
+}
+
+negative_u8_to_int :: #force_inline proc(u: Negative_U8) -> i16 {
+	return -1 - i16(u)
+}
+
+negative_u16_to_int :: #force_inline proc(u: Negative_U16) -> i32 {
+	return -1 - i32(u)
+}
+
+negative_u32_to_int :: #force_inline proc(u: Negative_U32) -> i64 {
+	return -1 - i64(u)
+}
+
+negative_u64_to_int :: #force_inline proc(u: Negative_U64) -> i128 {
+	return -1 - i128(u)
+}
+
+// Utility for converting between the different errors when they are subsets of the other.
+err_conv :: proc {
+	encode_to_marshal_err,
+	encode_to_marshal_err_p2,
+	decode_to_unmarshal_err,
+	decode_to_unmarshal_err_p,
+	decode_to_unmarshal_err_p2,
+}
+
+encode_to_marshal_err :: #force_inline proc(err: Encode_Error) -> Marshal_Error {
+	switch e in err {
+	case nil:                 return nil
+	case io.Error:            return e
+	case mem.Allocator_Error: return e
+	case Encode_Data_Error:   return e
+	case:                     return nil
+	}
+}
+
+encode_to_marshal_err_p2 :: #force_inline proc(v: $T, v2: $T2, err: Encode_Error) -> (T, T2, Marshal_Error) {
+	return v, v2, err_conv(err)
+}
+
+decode_to_unmarshal_err :: #force_inline proc(err: Decode_Error) -> Unmarshal_Error {
+	switch e in err {
+	case nil:                 return nil
+	case io.Error:            return e
+	case mem.Allocator_Error: return e
+	case Decode_Data_Error:   return e
+	case:                     return nil
+	}
+}
+
+decode_to_unmarshal_err_p :: #force_inline proc(v: $T, err: Decode_Error) -> (T, Unmarshal_Error) {
+	return v, err_conv(err)
+}
+
+decode_to_unmarshal_err_p2 :: #force_inline proc(v: $T, v2: $T2, err: Decode_Error) -> (T, T2, Unmarshal_Error) {
+	return v, v2, err_conv(err)
+}
+
+// Recursively frees all memory allocated when decoding the passed value.
+destroy :: proc(val: Value, allocator := context.allocator) {
+	context.allocator = allocator
+	#partial switch v in val {
+	case ^Map:
+		if v == nil { return }
+		for entry in v {
+			destroy(entry.key)
+			destroy(entry.value)
+		}
+		delete(v^)
+		free(v)
+	case ^Array:
+		if v == nil { return }
+		for entry in v {
+			destroy(entry)
+		}
+		delete(v^)
+		free(v)
+	case ^Text:
+		if v == nil { return }
+		delete(v^)
+		free(v)
+	case ^Bytes:
+		if v == nil { return }
+		delete(v^)
+		free(v)
+	case ^Tag:
+		if v == nil { return }
+		destroy(v.value)
+		free(v)
+	}
+}
+
+/*
+to_diagnostic_format either writes or returns a human-readable representation of the value,
+optionally formatted, defined as the diagnostic format in [[RFC 8949 Section 8;https://www.rfc-editor.org/rfc/rfc8949.html#name-diagnostic-notation]].
+
+Incidentally, if the CBOR does not contain any of the additional types defined on top of JSON
+this will also be valid JSON.
+*/
+to_diagnostic_format :: proc {
+	to_diagnostic_format_string,
+	to_diagnostic_format_writer,
+}
+
+// Turns the given CBOR value into a human-readable string.
+// See docs on the proc group `diagnose` for more info.
+to_diagnostic_format_string :: proc(val: Value, padding := 0, allocator := context.allocator) -> (string, mem.Allocator_Error) #optional_allocator_error {
+	b := strings.builder_make(allocator)
+	w := strings.to_stream(&b)
+	err := to_diagnostic_format_writer(w, val, padding)
+	if err == .EOF {
+		// The string builder stream only returns .EOF, and only if it can't write (out of memory).
+		return "", .Out_Of_Memory
+	}
+	assert(err == nil)
+
+	return strings.to_string(b), nil
+}
+
+// Writes the given CBOR value into the writer as human-readable text.
+// See docs on the proc group `diagnose` for more info.
+to_diagnostic_format_writer :: proc(w: io.Writer, val: Value, padding := 0) -> io.Error {
+	@(require_results)
+	indent :: proc(padding: int) -> int {
+		padding := padding
+		if padding != -1 {
+			padding += 1
+		}
+		return padding
+	}
+
+	@(require_results)
+	dedent :: proc(padding: int) -> int {
+		padding := padding
+		if padding != -1 {
+			padding -= 1
+		}
+		return padding
+	}
+
+	comma :: proc(w: io.Writer, padding: int) -> io.Error {
+		_ = io.write_string(w, ", " if padding == -1 else ",") or_return
+		return nil
+	}
+
+	newline :: proc(w: io.Writer, padding: int) -> io.Error {
+		if padding != -1 {
+			io.write_string(w, "\n") or_return
+			for _ in 0..<padding {
+				io.write_string(w, "\t") or_return
+			}
+		}
+		return nil
+	}
+
+	padding := padding
+	switch v in val {
+	case u8:  io.write_uint(w, uint(v)) or_return
+	case u16: io.write_uint(w, uint(v)) or_return
+	case u32: io.write_uint(w, uint(v)) or_return
+	case u64: io.write_u64(w, v) or_return
+	case Negative_U8:  io.write_int(w, int(negative_to_int(v))) or_return
+	case Negative_U16: io.write_int(w, int(negative_to_int(v))) or_return
+	case Negative_U32: io.write_int(w, int(negative_to_int(v))) or_return
+	case Negative_U64: io.write_i128(w, i128(negative_to_int(v))) or_return
+
+	// NOTE: not using io.write_float because it removes the sign, 
+	// which we want for the diagnostic format.
+	case f16:
+		buf: [64]byte
+		str := strconv.append_float(buf[:], f64(v), 'f', 2*size_of(f16), 8*size_of(f16))
+		if str[0] == '+' && str != "+Inf" { str = str[1:] }
+		io.write_string(w, str) or_return
+	case f32:
+		buf: [128]byte
+		str := strconv.append_float(buf[:], f64(v), 'f', 2*size_of(f32), 8*size_of(f32))
+		if str[0] == '+' && str != "+Inf" { str = str[1:] }
+		io.write_string(w, str) or_return
+	case f64:
+		buf: [256]byte
+		str := strconv.append_float(buf[:], f64(v), 'f', 2*size_of(f64), 8*size_of(f64))
+		if str[0] == '+' && str != "+Inf" { str = str[1:] }
+		io.write_string(w, str) or_return
+
+	case bool: io.write_string(w, "true" if v else "false") or_return
+	case Nil: io.write_string(w, "nil") or_return
+	case Undefined: io.write_string(w, "undefined") or_return
+	case ^Bytes:
+		io.write_string(w, "h'") or_return
+		for b in v { io.write_int(w, int(b), 16) or_return }
+		io.write_string(w, "'") or_return
+	case ^Text:
+		io.write_string(w, `"`) or_return
+		io.write_string(w, v^) or_return
+		io.write_string(w, `"`) or_return
+	case ^Array:
+		if v == nil || len(v) == 0 {
+			io.write_string(w, "[]") or_return
+			return nil
+		}
+
+		io.write_string(w, "[") or_return
+
+		padding = indent(padding)
+		newline(w, padding) or_return
+
+		for entry, i in v {
+			to_diagnostic_format(w, entry, padding) or_return
+			if i != len(v)-1 {
+				comma(w, padding) or_return
+				newline(w, padding) or_return
+			}
+		}
+
+		padding = dedent(padding)
+		newline(w, padding) or_return
+
+		io.write_string(w, "]") or_return
+	case ^Map:
+		if v == nil || len(v) == 0 {
+			io.write_string(w, "{}") or_return
+			return nil
+		}
+
+		io.write_string(w, "{") or_return
+
+		padding = indent(padding)
+		newline(w, padding) or_return
+
+		for entry, i in v {
+			to_diagnostic_format(w, entry.key, padding) or_return
+			io.write_string(w, ": ") or_return
+			to_diagnostic_format(w, entry.value, padding) or_return
+			if i != len(v)-1 {
+				comma(w, padding) or_return
+				newline(w, padding) or_return
+			}
+		}
+
+		padding = dedent(padding)
+		newline(w, padding) or_return
+
+		io.write_string(w, "}") or_return
+	case ^Tag:
+		io.write_u64(w, v.number) or_return
+		io.write_string(w, "(") or_return
+		to_diagnostic_format(w, v.value, padding) or_return
+		io.write_string(w, ")") or_return
+	case Simple:
+		io.write_string(w, "simple(") or_return
+		io.write_uint(w, uint(v)) or_return
+		io.write_string(w, ")") or_return
+	}
+	return nil
+}
+
+/*
+Converts from JSON to CBOR.
+
+Everything is copied to the given allocator, the passed in JSON value can be deleted after.
+*/
+from_json :: proc(val: json.Value, allocator := context.allocator) -> (Value, mem.Allocator_Error) #optional_allocator_error {
+	internal :: proc(val: json.Value) -> (ret: Value, err: mem.Allocator_Error) {
+		switch v in val {
+		case json.Null: return Nil{}, nil
+		case json.Integer:
+			i, major := _int_to_uint(v)
+			#partial switch major {
+			case .Unsigned: return i, nil
+			case .Negative: return Negative_U64(i), nil
+			case:           unreachable()
+			}
+		case json.Float:   return v, nil
+		case json.Boolean: return v, nil
+		case json.String:
+			container := new(Text) or_return
+
+			// We need the string to have a nil byte at the end so we clone to cstring.
+			container^ = string(strings.clone_to_cstring(v) or_return)
+			return container, nil
+		case json.Array:
+			arr  := new(Array) or_return
+			arr^  = make([]Value, len(v)) or_return
+			for _, i in arr {
+				arr[i] = internal(v[i]) or_return
+			}
+			return arr, nil
+		case json.Object:
+			m  := new(Map) or_return
+			dm := make([dynamic]Map_Entry, 0, len(v)) or_return
+			for mkey, mval in v {
+				append(&dm, Map_Entry{from_json(mkey) or_return, from_json(mval) or_return})
+			}
+			m^ = dm[:]
+			return m, nil
+		}
+		return nil, nil
+	}
+
+	context.allocator = allocator
+	return internal(val)
+}
+
+/*
+Converts from CBOR to JSON.
+
+NOTE: overflow on integers or floats is not handled.
+
+Everything is copied to the given allocator, the passed in CBOR value can be `destroy`'ed after.
+
+If a CBOR map with non-string keys is encountered it is turned into an array of tuples.
+*/
+to_json :: proc(val: Value, allocator := context.allocator) -> (json.Value, mem.Allocator_Error) #optional_allocator_error {
+	internal :: proc(val: Value) -> (ret: json.Value, err: mem.Allocator_Error) {
+		switch v in val {
+		case Simple: return json.Integer(v), nil
+
+		case u8:  return json.Integer(v), nil
+		case u16: return json.Integer(v), nil
+		case u32: return json.Integer(v), nil
+		case u64: return json.Integer(v), nil
+
+		case Negative_U8:  return json.Integer(negative_to_int(v)), nil
+		case Negative_U16: return json.Integer(negative_to_int(v)), nil
+		case Negative_U32: return json.Integer(negative_to_int(v)), nil
+		case Negative_U64: return json.Integer(negative_to_int(v)), nil
+
+		case f16: return json.Float(v), nil
+		case f32: return json.Float(v), nil
+		case f64: return json.Float(v), nil
+
+		case bool: return json.Boolean(v), nil
+
+		case Undefined: return json.Null{}, nil
+		case Nil:       return json.Null{}, nil
+
+		case ^Bytes: return json.String(strings.clone(string(v^)) or_return), nil
+		case ^Text:  return json.String(strings.clone(v^) or_return),         nil
+
+		case ^Map:
+			keys_all_strings :: proc(m: ^Map) -> bool {
+				for entry in m {
+					#partial switch kv in entry.key {
+					case ^Bytes:
+					case ^Text:
+					case: return false
+					}
+				}
+				return false
+			}
+
+			if keys_all_strings(v) {
+				obj := make(json.Object, len(v)) or_return
+				for entry in v {
+					k: string
+					#partial switch kv in entry.key {
+					case ^Bytes: k = string(kv^)
+					case ^Text:  k = kv^
+					case:        unreachable()
+					}
+
+					v := internal(entry.value) or_return
+					obj[k] = v
+				}
+				return obj, nil
+			} else {
+				// Resort to an array of tuples if keys aren't all strings.
+				arr := make(json.Array, 0, len(v)) or_return
+				for entry in v {
+					entry_arr := make(json.Array, 0, 2) or_return
+					append(&entry_arr, internal(entry.key) or_return) or_return
+					append(&entry_arr, internal(entry.value) or_return) or_return
+					append(&arr, entry_arr) or_return
+				}
+				return arr, nil
+			}
+
+		case ^Array:
+			arr := make(json.Array, 0, len(v)) or_return
+			for entry in v {
+				append(&arr, internal(entry) or_return) or_return
+			}
+			return arr, nil
+
+		case ^Tag:
+			obj := make(json.Object, 2) or_return
+			obj[strings.clone("number") or_return] = internal(v.number) or_return
+			obj[strings.clone("value") or_return]  = internal(v.value) or_return
+			return obj, nil
+
+		case: return json.Null{}, nil
+		}
+	}
+
+	context.allocator = allocator
+	return internal(val)
+}
+
+_int_to_uint :: proc {
+	_i8_to_uint,
+	_i16_to_uint,
+	_i32_to_uint,
+	_i64_to_uint,
+	_i128_to_uint,
+}
+
+_u128_to_u64 :: #force_inline proc(v: u128) -> (u64, Encode_Data_Error) {
+	if v > u128(max(u64)) {
+		return 0, .Int_Too_Big
+	}
+
+	return u64(v), nil
+}
+
+_i8_to_uint :: #force_inline proc(v: i8) -> (u: u8, m: Major) {
+	if v < 0 {
+		return u8(abs(v)-1), .Negative
+	}
+
+	return u8(v), .Unsigned
+}
+
+_i16_to_uint :: #force_inline proc(v: i16) -> (u: u16, m: Major) {
+	if v < 0 {
+		return u16(abs(v)-1), .Negative
+	}
+
+	return u16(v), .Unsigned
+}
+
+_i32_to_uint :: #force_inline proc(v: i32) -> (u: u32, m: Major) {
+	if v < 0 {
+		return u32(abs(v)-1), .Negative
+	}
+
+	return u32(v), .Unsigned
+}
+
+_i64_to_uint :: #force_inline proc(v: i64) -> (u: u64, m: Major) {
+	if v < 0 {
+		return u64(abs(v)-1), .Negative
+	}
+
+	return u64(v), .Unsigned
+}
+
+_i128_to_uint :: proc(v: i128) -> (u: u64, m: Major, err: Encode_Data_Error) {
+	if v < 0 {
+		m = .Negative
+		u, err = _u128_to_u64(u128(abs(v) - 1))
+		return
+	}
+
+	m = .Unsigned
+	u, err = _u128_to_u64(u128(v))
+	return
+}

+ 886 - 0
core/encoding/cbor/coding.odin

@@ -0,0 +1,886 @@
+package encoding_cbor
+
+import "base:intrinsics"
+import "base:runtime"
+
+import "core:bytes"
+import "core:encoding/endian"
+import "core:io"
+import "core:slice"
+import "core:strings"
+
+Encoder_Flag :: enum {
+	// CBOR defines a tag header that also acts as a file/binary header,
+	// this way decoders can check the first header of the binary and see if it is CBOR.
+	Self_Described_CBOR,
+
+	// Integers are stored in the smallest integer type it fits.
+	// This involves checking each int against the max of all its smaller types.
+	Deterministic_Int_Size,
+
+	// Floats are stored in the smallest size float type without losing precision.
+	// This involves casting each float down to its smaller types and checking if it changed.
+	Deterministic_Float_Size,
+
+	// Sort maps by their keys in bytewise lexicographic order of their deterministic encoding.
+	// NOTE: In order to do this, all keys of a map have to be pre-computed, sorted, and
+	// then written, this involves temporary allocations for the keys and a copy of the map itself.
+	Deterministic_Map_Sorting, 
+}
+
+Encoder_Flags :: bit_set[Encoder_Flag]
+
+// Flags for fully deterministic output (if you are not using streaming/indeterminate length).
+ENCODE_FULLY_DETERMINISTIC :: Encoder_Flags{.Deterministic_Int_Size, .Deterministic_Float_Size, .Deterministic_Map_Sorting}
+
+// Flags for the smallest encoding output.
+ENCODE_SMALL :: Encoder_Flags{.Deterministic_Int_Size, .Deterministic_Float_Size}
+
+Encoder :: struct {
+	flags:          Encoder_Flags,
+	writer:         io.Writer,
+	temp_allocator: runtime.Allocator,
+}
+
+Decoder_Flag :: enum {
+	// Rejects (with an error `.Disallowed_Streaming`) when a streaming CBOR header is encountered.
+	Disallow_Streaming,
+
+	// Pre-allocates buffers and containers with the size that was set in the CBOR header.
+	// This should only be enabled when you control both ends of the encoding, if you don't,
+	// attackers can craft input that causes massive (`max(u64)`) byte allocations for a few bytes of
+	// CBOR.
+	Trusted_Input,
+	
+	// Makes the decoder shrink of excess capacity from allocated buffers/containers before returning.
+	Shrink_Excess,
+}
+
+Decoder_Flags :: bit_set[Decoder_Flag]
+
+Decoder :: struct {
+	// The max amount of bytes allowed to pre-allocate when `.Trusted_Input` is not set on the
+	// flags.
+	max_pre_alloc: int,
+
+	flags:  Decoder_Flags,
+	reader: io.Reader,
+}
+
+/*
+Decodes both deterministic and non-deterministic CBOR into a `Value` variant.
+
+`Text` and `Bytes` can safely be cast to cstrings because of an added 0 byte.
+
+Allocations are done using the given allocator,
+*no* allocations are done on the `context.temp_allocator`.
+
+A value can be (fully and recursively) deallocated using the `destroy` proc in this package.
+
+Disable streaming/indeterminate lengths with the `.Disallow_Streaming` flag.
+
+Shrink excess bytes in buffers and containers with the `.Shrink_Excess` flag.
+
+Mark the input as trusted input with the `.Trusted_Input` flag, this turns off the safety feature
+of not pre-allocating more than `max_pre_alloc` bytes before reading into the bytes. You should only
+do this when you own both sides of the encoding and are sure there can't be malicious bytes used as
+an input.
+*/
+decode_from :: proc {
+	decode_from_string,
+	decode_from_reader,
+	decode_from_decoder,
+}
+decode :: decode_from
+
+// Decodes the given string as CBOR.
+// See docs on the proc group `decode` for more information.
+decode_from_string :: proc(s: string, flags: Decoder_Flags = {}, allocator := context.allocator) -> (v: Value, err: Decode_Error) {
+	r: strings.Reader
+	strings.reader_init(&r, s)
+	return decode_from_reader(strings.reader_to_stream(&r), flags, allocator)
+}
+
+// Reads a CBOR value from the given reader.
+// See docs on the proc group `decode` for more information.
+decode_from_reader :: proc(r: io.Reader, flags: Decoder_Flags = {}, allocator := context.allocator) -> (v: Value, err: Decode_Error) {
+	return decode_from_decoder(
+		Decoder{ DEFAULT_MAX_PRE_ALLOC, flags, r },
+		allocator=allocator,
+	)
+}
+
+// Reads a CBOR value from the given decoder.
+// See docs on the proc group `decode` for more information.
+decode_from_decoder :: proc(d: Decoder, allocator := context.allocator) -> (v: Value, err: Decode_Error) {
+	context.allocator = allocator
+	
+	d := d
+
+	if d.max_pre_alloc <= 0 {
+		d.max_pre_alloc = DEFAULT_MAX_PRE_ALLOC
+	}
+
+	v, err = _decode_from_decoder(d)
+	// Normal EOF does not exist here, we try to read the exact amount that is said to be provided.
+	if err == .EOF { err = .Unexpected_EOF }
+	return
+}
+
+_decode_from_decoder :: proc(d: Decoder, hdr: Header = Header(0)) -> (v: Value, err: Decode_Error) {
+	hdr := hdr
+	r := d.reader
+	if hdr == Header(0) { hdr = _decode_header(r) or_return }
+	switch hdr {
+	case .U8:  return _decode_u8 (r)
+	case .U16: return _decode_u16(r)
+	case .U32: return _decode_u32(r)
+	case .U64: return _decode_u64(r)
+
+	case .Neg_U8:  return Negative_U8 (_decode_u8 (r) or_return), nil
+	case .Neg_U16: return Negative_U16(_decode_u16(r) or_return), nil
+	case .Neg_U32: return Negative_U32(_decode_u32(r) or_return), nil
+	case .Neg_U64: return Negative_U64(_decode_u64(r) or_return), nil
+
+	case .Simple: return _decode_simple(r)
+
+	case .F16: return _decode_f16(r)
+	case .F32: return _decode_f32(r)
+	case .F64: return _decode_f64(r)
+
+	case .True:  return true, nil
+	case .False: return false, nil
+	
+	case .Nil:       return Nil{}, nil
+	case .Undefined: return Undefined{}, nil
+
+	case .Break: return nil, .Break
+	}
+
+	maj, add := _header_split(hdr)
+	switch maj {
+	case .Unsigned: return _decode_tiny_u8(add)
+	case .Negative: return Negative_U8(_decode_tiny_u8(add) or_return), nil
+	case .Bytes:    return _decode_bytes_ptr(d, add)
+	case .Text:     return _decode_text_ptr(d, add)
+	case .Array:    return _decode_array_ptr(d, add)
+	case .Map:      return _decode_map_ptr(d, add)
+	case .Tag:      return _decode_tag_ptr(d, add)
+	case .Other:    return _decode_tiny_simple(add)
+	case:           return nil, .Bad_Major
+	}
+}
+
+/*
+Encodes the CBOR value into a binary CBOR.
+
+Flags can be used to control the output (mainly determinism, which coincidently affects size).
+
+The default flags `ENCODE_SMALL` (`.Deterministic_Int_Size`, `.Deterministic_Float_Size`) will try
+to put ints and floats into their smallest possible byte size without losing equality.
+
+Adding the `.Self_Described_CBOR` flag will wrap the value in a tag that lets generic decoders know
+the contents are CBOR from just reading the first byte.
+
+Adding the `.Deterministic_Map_Sorting` flag will sort the encoded maps by the byte content of the
+encoded key. This flag has a cost on performance and memory efficiency because all keys in a map
+have to be precomputed, sorted and only then written to the output.
+
+Empty flags will do nothing extra to the value.
+
+The allocations for the `.Deterministic_Map_Sorting` flag are done using the given temp_allocator.
+but are followed by the necessary `delete` and `free` calls if the allocator supports them.
+This is helpful when the CBOR size is so big that you don't want to collect all the temporary
+allocations until the end.
+*/
+encode_into :: proc {
+	encode_into_bytes,
+	encode_into_builder,
+	encode_into_writer,
+	encode_into_encoder,
+}
+encode :: encode_into
+
+// Encodes the CBOR value into binary CBOR allocated on the given allocator.
+// See the docs on the proc group `encode_into` for more info.
+encode_into_bytes :: proc(v: Value, flags := ENCODE_SMALL, allocator := context.allocator, temp_allocator := context.temp_allocator) -> (data: []byte, err: Encode_Error) {
+	b := strings.builder_make(allocator) or_return
+	encode_into_builder(&b, v, flags, temp_allocator) or_return
+	return b.buf[:], nil
+}
+
+// Encodes the CBOR value into binary CBOR written to the given builder.
+// See the docs on the proc group `encode_into` for more info.
+encode_into_builder :: proc(b: ^strings.Builder, v: Value, flags := ENCODE_SMALL, temp_allocator := context.temp_allocator) -> Encode_Error {
+	return encode_into_writer(strings.to_stream(b), v, flags, temp_allocator)
+}
+
+// Encodes the CBOR value into binary CBOR written to the given writer.
+// See the docs on the proc group `encode_into` for more info.
+encode_into_writer :: proc(w: io.Writer, v: Value, flags := ENCODE_SMALL, temp_allocator := context.temp_allocator) -> Encode_Error {
+	return encode_into_encoder(Encoder{flags, w, temp_allocator}, v)
+}
+
+// Encodes the CBOR value into binary CBOR written to the given encoder.
+// See the docs on the proc group `encode_into` for more info.
+encode_into_encoder :: proc(e: Encoder, v: Value) -> Encode_Error {
+	e := e
+
+	if e.temp_allocator.procedure == nil {
+		e.temp_allocator = context.temp_allocator
+	}
+
+	if .Self_Described_CBOR in e.flags {
+		_encode_u64(e, TAG_SELF_DESCRIBED_CBOR, .Tag) or_return
+		e.flags &~= { .Self_Described_CBOR }
+	}
+
+	switch v_spec in v {
+	case u8:           return _encode_u8(e.writer, v_spec, .Unsigned)
+	case u16:          return _encode_u16(e, v_spec, .Unsigned)
+	case u32:          return _encode_u32(e, v_spec, .Unsigned)
+	case u64:          return _encode_u64(e, v_spec, .Unsigned)
+	case Negative_U8:  return _encode_u8(e.writer, u8(v_spec), .Negative)
+	case Negative_U16: return _encode_u16(e, u16(v_spec), .Negative)
+	case Negative_U32: return _encode_u32(e, u32(v_spec), .Negative)
+	case Negative_U64: return _encode_u64(e, u64(v_spec), .Negative)
+	case ^Bytes:       return _encode_bytes(e, v_spec^)
+	case ^Text:        return _encode_text(e, v_spec^)
+	case ^Array:       return _encode_array(e, v_spec^)
+	case ^Map:         return _encode_map(e, v_spec^)
+	case ^Tag:         return _encode_tag(e, v_spec^)
+	case Simple:       return _encode_simple(e.writer, v_spec)
+	case f16:          return _encode_f16(e.writer, v_spec)
+	case f32:          return _encode_f32(e, v_spec)
+	case f64:          return _encode_f64(e, v_spec)
+	case bool:         return _encode_bool(e.writer, v_spec)
+	case Nil:          return _encode_nil(e.writer)
+	case Undefined:    return _encode_undefined(e.writer)
+	case:              return nil
+	}
+}
+
+_decode_header :: proc(r: io.Reader) -> (hdr: Header, err: io.Error) {
+	hdr = Header(_decode_u8(r) or_return)
+	return
+}
+
+_header_split :: proc(hdr: Header) -> (Major, Add) {
+	return Major(u8(hdr) >> 5), Add(u8(hdr) & 0x1f)
+}
+
+_decode_u8 :: proc(r: io.Reader) -> (v: u8, err: io.Error) {
+	byte: [1]byte = ---
+	io.read_full(r, byte[:]) or_return
+	return byte[0], nil
+}
+
+_encode_uint :: proc {
+	_encode_u8,
+	_encode_u16,
+	_encode_u32,
+	_encode_u64,
+}
+
+_encode_u8 :: proc(w: io.Writer, v: u8, major: Major = .Unsigned) -> (err: io.Error) {
+	header := u8(major) << 5
+	if v < u8(Add.One_Byte) {
+		header |= v
+		_, err = io.write_full(w, {header})
+		return
+	}
+
+	header |= u8(Add.One_Byte)
+	_, err = io.write_full(w, {header, v})
+	return
+}
+
+_decode_tiny_u8 :: proc(additional: Add) -> (u8, Decode_Data_Error) {
+	if additional < .One_Byte {
+		return u8(additional), nil
+	}
+
+	return 0, .Bad_Argument
+}
+
+_decode_u16 :: proc(r: io.Reader) -> (v: u16, err: io.Error) {
+	bytes: [2]byte = ---
+	io.read_full(r, bytes[:]) or_return
+	return endian.unchecked_get_u16be(bytes[:]), nil
+}
+
+_encode_u16 :: proc(e: Encoder, v: u16, major: Major = .Unsigned) -> Encode_Error {
+	if .Deterministic_Int_Size in e.flags {
+		return _encode_deterministic_uint(e.writer, v, major)
+	}
+	return _encode_u16_exact(e.writer, v, major)
+}
+
+_encode_u16_exact :: proc(w: io.Writer, v: u16, major: Major = .Unsigned) -> (err: io.Error) {
+	bytes: [3]byte = ---
+	bytes[0] = (u8(major) << 5) | u8(Add.Two_Bytes)
+	endian.unchecked_put_u16be(bytes[1:], v)
+	_, err = io.write_full(w, bytes[:])
+	return
+}
+
+_decode_u32 :: proc(r: io.Reader) -> (v: u32, err: io.Error) {
+	bytes: [4]byte = ---
+	io.read_full(r, bytes[:]) or_return
+	return endian.unchecked_get_u32be(bytes[:]), nil
+}
+
+_encode_u32 :: proc(e: Encoder, v: u32, major: Major = .Unsigned) -> Encode_Error {
+	if .Deterministic_Int_Size in e.flags {
+		return _encode_deterministic_uint(e.writer, v, major)
+	}
+	return _encode_u32_exact(e.writer, v, major)
+}
+
+_encode_u32_exact :: proc(w: io.Writer, v: u32, major: Major = .Unsigned) -> (err: io.Error) {
+	bytes: [5]byte = ---
+	bytes[0] = (u8(major) << 5) | u8(Add.Four_Bytes)
+	endian.unchecked_put_u32be(bytes[1:], v)
+	_, err = io.write_full(w, bytes[:])
+	return
+}
+
+_decode_u64 :: proc(r: io.Reader) -> (v: u64, err: io.Error) {
+	bytes: [8]byte = ---
+	io.read_full(r, bytes[:]) or_return
+	return endian.unchecked_get_u64be(bytes[:]), nil
+}
+
+_encode_u64 :: proc(e: Encoder, v: u64, major: Major = .Unsigned) -> Encode_Error {
+	if .Deterministic_Int_Size in e.flags {
+		return _encode_deterministic_uint(e.writer, v, major)
+	}
+	return _encode_u64_exact(e.writer, v, major)
+}
+
+_encode_u64_exact :: proc(w: io.Writer, v: u64, major: Major = .Unsigned) -> (err: io.Error) {
+	bytes: [9]byte = ---
+	bytes[0] = (u8(major) << 5) | u8(Add.Eight_Bytes)
+	endian.unchecked_put_u64be(bytes[1:], v)
+	_, err = io.write_full(w, bytes[:])
+	return
+}
+
+_decode_bytes_ptr :: proc(d: Decoder, add: Add, type: Major = .Bytes) -> (v: ^Bytes, err: Decode_Error) {
+	v = new(Bytes) or_return
+	defer if err != nil { free(v) }
+
+	v^ = _decode_bytes(d, add, type) or_return
+	return
+}
+
+_decode_bytes :: proc(d: Decoder, add: Add, type: Major = .Bytes, allocator := context.allocator) -> (v: Bytes, err: Decode_Error) {
+	context.allocator = allocator
+
+	add := add
+	n, scap := _decode_len_str(d, add) or_return
+	
+	buf := strings.builder_make(0, scap) or_return
+	defer if err != nil { strings.builder_destroy(&buf) }
+	buf_stream := strings.to_stream(&buf)
+
+	if n == -1 {
+		indefinite_loop: for {
+			header := _decode_header(d.reader) or_return
+			maj: Major
+			maj, add = _header_split(header)
+			#partial switch maj {
+			case type:
+				iter_n, iter_cap := _decode_len_str(d, add) or_return
+				if iter_n == -1 {
+					return nil, .Nested_Indefinite_Length
+				}
+				reserve(&buf.buf, len(buf.buf) + iter_cap) or_return
+				io.copy_n(buf_stream, d.reader, i64(iter_n)) or_return
+
+			case .Other:
+				if add != .Break { return nil, .Bad_Argument }
+				break indefinite_loop
+
+			case:
+				return nil, .Bad_Major
+			}
+		}
+	} else {
+		io.copy_n(buf_stream, d.reader, i64(n)) or_return
+	}
+
+	v = buf.buf[:]
+
+	// Write zero byte so this can be converted to cstring.
+	strings.write_byte(&buf, 0)
+
+	if .Shrink_Excess in d.flags { shrink(&buf.buf) }
+	return
+}
+
+_encode_bytes :: proc(e: Encoder, val: Bytes, major: Major = .Bytes) -> (err: Encode_Error) {
+	assert(len(val) >= 0)
+	_encode_u64(e, u64(len(val)), major) or_return
+    _, err = io.write_full(e.writer, val[:])
+	return
+}
+
+_decode_text_ptr :: proc(d: Decoder, add: Add) -> (v: ^Text, err: Decode_Error) {
+	v = new(Text) or_return
+	defer if err != nil { free(v) }
+
+	v^ = _decode_text(d, add) or_return
+	return
+}
+
+_decode_text :: proc(d: Decoder, add: Add, allocator := context.allocator) -> (v: Text, err: Decode_Error) {
+	return (Text)(_decode_bytes(d, add, .Text, allocator) or_return), nil
+}
+
+_encode_text :: proc(e: Encoder, val: Text) -> Encode_Error {
+    return _encode_bytes(e, transmute([]byte)val, .Text)
+}
+
+_decode_array_ptr :: proc(d: Decoder, add: Add) -> (v: ^Array, err: Decode_Error) {
+	v = new(Array) or_return
+	defer if err != nil { free(v) }
+
+	v^ = _decode_array(d, add) or_return
+	return
+}
+
+_decode_array :: proc(d: Decoder, add: Add) -> (v: Array, err: Decode_Error) {
+	n, scap := _decode_len_container(d, add) or_return
+	array := make([dynamic]Value, 0, scap) or_return
+	defer if err != nil {
+		for entry in array { destroy(entry) }
+		delete(array)
+	}
+	
+	for i := 0; n == -1 || i < n; i += 1 {
+		val, verr := _decode_from_decoder(d)
+		if n == -1 && verr == .Break {
+			break
+		} else if verr != nil {
+			err = verr
+			return
+		}
+
+		append(&array, val) or_return
+	}
+
+	if .Shrink_Excess in d.flags { shrink(&array) }
+	
+	v = array[:]
+	return
+}
+
+_encode_array :: proc(e: Encoder, arr: Array) -> Encode_Error {
+	assert(len(arr) >= 0)
+	_encode_u64(e, u64(len(arr)), .Array)
+    for val in arr {
+        encode(e, val) or_return
+    }
+    return nil
+}
+
+_decode_map_ptr :: proc(d: Decoder, add: Add) -> (v: ^Map, err: Decode_Error) {
+	v = new(Map) or_return
+	defer if err != nil { free(v) }
+
+	v^ = _decode_map(d, add) or_return
+	return
+}
+
+_decode_map :: proc(d: Decoder, add: Add) -> (v: Map, err: Decode_Error) {
+	n, scap := _decode_len_container(d, add) or_return
+	items := make([dynamic]Map_Entry, 0, scap) or_return
+	defer if err != nil { 
+		for entry in items {
+			destroy(entry.key)
+			destroy(entry.value)
+		}
+		delete(items)
+	}
+
+	for i := 0; n == -1 || i < n; i += 1 {
+		key, kerr := _decode_from_decoder(d)
+		if n == -1 && kerr == .Break {
+			break
+		} else if kerr != nil {
+			return nil, kerr
+		} 
+
+		value := _decode_from_decoder(d) or_return
+
+		append(&items, Map_Entry{
+			key   = key,
+			value = value,
+		}) or_return
+	}
+
+	if .Shrink_Excess in d.flags { shrink(&items) }
+	
+	v = items[:]
+	return
+}
+
+_encode_map :: proc(e: Encoder, m: Map) -> (err: Encode_Error) {
+	assert(len(m) >= 0)
+	_encode_u64(e, u64(len(m)), .Map) or_return
+	
+	if .Deterministic_Map_Sorting not_in e.flags {
+		for entry in m {
+			encode(e, entry.key)   or_return
+			encode(e, entry.value) or_return
+		}
+		return
+	}
+
+	// Deterministic_Map_Sorting needs us to sort the entries by the byte contents of the
+	// encoded key.
+	//
+	// This means we have to store and sort them before writing incurring extra (temporary) allocations.
+
+	Map_Entry_With_Key :: struct {
+		encoded_key: []byte,
+		entry:       Map_Entry,
+	}
+
+	entries := make([]Map_Entry_With_Key, len(m), e.temp_allocator) or_return
+	defer delete(entries, e.temp_allocator)
+
+	for &entry, i in entries {
+		entry.entry = m[i]
+
+		buf := strings.builder_make(e.temp_allocator) or_return
+		
+		ke := e
+		ke.writer = strings.to_stream(&buf)
+
+		encode(ke, entry.entry.key) or_return
+		entry.encoded_key = buf.buf[:]
+	}
+	
+	// Sort lexicographic on the bytes of the key.
+	slice.sort_by_cmp(entries, proc(a, b: Map_Entry_With_Key) -> slice.Ordering {
+		return slice.Ordering(bytes.compare(a.encoded_key, b.encoded_key))
+	})
+
+	for entry in entries {
+		io.write_full(e.writer, entry.encoded_key) or_return
+		delete(entry.encoded_key, e.temp_allocator)
+
+		encode(e, entry.entry.value) or_return
+	}
+
+    return nil
+}
+
+_decode_tag_ptr :: proc(d: Decoder, add: Add) -> (v: Value, err: Decode_Error) {
+	tag := _decode_tag(d, add) or_return
+	if t, ok := tag.?; ok {
+		defer if err != nil { destroy(t.value) }
+		tp := new(Tag) or_return
+		tp^ = t
+		return tp, nil
+	}
+
+	// no error, no tag, this was the self described CBOR tag, skip it.
+	return _decode_from_decoder(d)
+}
+
+_decode_tag :: proc(d: Decoder, add: Add) -> (v: Maybe(Tag), err: Decode_Error) {
+	num := _decode_uint_as_u64(d.reader, add) or_return
+
+	// CBOR can be wrapped in a tag that decoders can use to see/check if the binary data is CBOR.
+	// We can ignore it here.
+	if num == TAG_SELF_DESCRIBED_CBOR {
+		return
+	}
+
+	t := Tag{
+		number = num,
+		value = _decode_from_decoder(d) or_return,
+	}
+
+	if nested, ok := t.value.(^Tag); ok {
+		destroy(nested)
+		return nil, .Nested_Tag
+	}
+
+	return t, nil
+}
+
+_decode_uint_as_u64 :: proc(r: io.Reader, add: Add) -> (nr: u64, err: Decode_Error) {
+	#partial switch add {
+	case .One_Byte:    return u64(_decode_u8(r) or_return), nil
+	case .Two_Bytes:   return u64(_decode_u16(r) or_return), nil
+	case .Four_Bytes:  return u64(_decode_u32(r) or_return), nil
+	case .Eight_Bytes: return u64(_decode_u64(r) or_return), nil
+	case:              return u64(_decode_tiny_u8(add) or_return), nil
+	}
+}
+
+_encode_tag :: proc(e: Encoder, val: Tag) -> Encode_Error {
+	_encode_u64(e, val.number, .Tag) or_return
+    return encode(e, val.value)
+}
+
+_decode_simple :: proc(r: io.Reader) -> (v: Simple, err: io.Error) {
+	buf: [1]byte = ---
+	io.read_full(r, buf[:]) or_return
+	return Simple(buf[0]), nil
+}
+
+_encode_simple :: proc(w: io.Writer, v: Simple) -> (err: Encode_Error) {
+	header := u8(Major.Other) << 5
+
+	if v < Simple(Add.False) {
+		header |= u8(v)
+		_, err = io.write_full(w, {header})
+		return
+	} else if v <= Simple(Add.Break) {
+		return .Invalid_Simple
+	}
+	
+	header |= u8(Add.One_Byte)
+	_, err = io.write_full(w, {header, u8(v)})
+	return
+}
+
+_decode_tiny_simple :: proc(add: Add) -> (Simple, Decode_Data_Error) {
+	if add < Add.False {
+		return Simple(add), nil
+	}
+	
+	return 0, .Bad_Argument
+}
+
+_decode_f16 :: proc(r: io.Reader) -> (v: f16, err: io.Error) {
+	bytes: [2]byte = ---
+	io.read_full(r, bytes[:]) or_return
+	n := endian.unchecked_get_u16be(bytes[:])
+	return transmute(f16)n, nil
+}
+
+_encode_f16 :: proc(w: io.Writer, v: f16) -> (err: io.Error) {
+	bytes: [3]byte = ---
+	bytes[0] = u8(Header.F16)
+	endian.unchecked_put_u16be(bytes[1:], transmute(u16)v)
+	_, err = io.write_full(w, bytes[:])
+	return
+}
+
+_decode_f32 :: proc(r: io.Reader) -> (v: f32, err: io.Error) {
+	bytes: [4]byte = ---
+	io.read_full(r, bytes[:]) or_return
+	n := endian.unchecked_get_u32be(bytes[:])
+	return transmute(f32)n, nil
+}
+
+_encode_f32 :: proc(e: Encoder, v: f32) -> io.Error {
+	if .Deterministic_Float_Size in e.flags {
+		return _encode_deterministic_float(e.writer, v)
+	}
+	return _encode_f32_exact(e.writer, v)
+}
+
+_encode_f32_exact :: proc(w: io.Writer, v: f32) -> (err: io.Error) {
+	bytes: [5]byte = ---
+	bytes[0] = u8(Header.F32)
+	endian.unchecked_put_u32be(bytes[1:], transmute(u32)v)
+	_, err = io.write_full(w, bytes[:])
+	return
+}
+
+_decode_f64 :: proc(r: io.Reader) -> (v: f64, err: io.Error) {
+	bytes: [8]byte = ---
+	io.read_full(r, bytes[:]) or_return
+	n := endian.unchecked_get_u64be(bytes[:])
+	return transmute(f64)n, nil
+}
+
+_encode_f64 :: proc(e: Encoder, v: f64) -> io.Error {
+	if .Deterministic_Float_Size in e.flags {
+		return _encode_deterministic_float(e.writer, v)
+	}
+	return _encode_f64_exact(e.writer, v)
+}
+
+_encode_f64_exact :: proc(w: io.Writer, v: f64) -> (err: io.Error) {
+	bytes: [9]byte = ---
+	bytes[0] = u8(Header.F64)
+	endian.unchecked_put_u64be(bytes[1:], transmute(u64)v)
+	_, err = io.write_full(w, bytes[:])
+	return
+}
+
+_encode_bool :: proc(w: io.Writer, v: bool) -> (err: io.Error) {
+	switch v {
+	case true:  _, err = io.write_full(w, {u8(Header.True )}); return
+	case false: _, err = io.write_full(w, {u8(Header.False)}); return
+	case:       unreachable()
+	}
+}
+
+_encode_undefined :: proc(w: io.Writer) -> io.Error {
+	_, err := io.write_full(w, {u8(Header.Undefined)})
+	return err
+}
+
+_encode_nil :: proc(w: io.Writer) -> io.Error {
+	_, err := io.write_full(w, {u8(Header.Nil)})
+	return err
+}
+
+// Streaming
+
+encode_stream_begin :: proc(w: io.Writer, major: Major) -> (err: io.Error) {
+    assert(major >= Major(.Bytes) && major <= Major(.Map), "illegal stream type")
+
+    header := (u8(major) << 5) | u8(Add.Length_Unknown)
+    _, err = io.write_full(w, {header})
+	return
+}
+
+encode_stream_end :: proc(w: io.Writer) -> io.Error {
+    header := (u8(Major.Other) << 5) | u8(Add.Break)
+    _, err := io.write_full(w, {header})
+	return err
+}
+
+encode_stream_bytes      :: _encode_bytes
+encode_stream_text       :: _encode_text
+encode_stream_array_item :: encode
+
+encode_stream_map_entry :: proc(e: Encoder, key: Value, val: Value) -> Encode_Error {
+    encode(e, key) or_return
+    return encode(e, val)
+}
+
+// For `Bytes` and `Text` strings: Decodes the number of items the header says follows.
+// If the number is not specified -1 is returned and streaming should be initiated.
+// A suitable starting capacity is also returned for a buffer that is allocated up the stack.
+_decode_len_str :: proc(d: Decoder, add: Add) -> (n: int, scap: int, err: Decode_Error) {
+	if add == .Length_Unknown {
+		if .Disallow_Streaming in d.flags {
+			return -1, -1, .Disallowed_Streaming
+		}
+		return -1, INITIAL_STREAMED_BYTES_CAPACITY, nil
+	}
+
+	_n := _decode_uint_as_u64(d.reader, add) or_return
+	if _n > u64(max(int)) { return -1, -1, .Length_Too_Big }
+	n = int(_n)
+
+	scap = n + 1 // Space for zero byte.
+	if .Trusted_Input not_in d.flags {
+		scap = min(d.max_pre_alloc, scap)
+	}
+
+	return
+}
+
+// For `Array` and `Map` types: Decodes the number of items the header says follows.
+// If the number is not specified -1 is returned and streaming should be initiated.
+// A suitable starting capacity is also returned for a buffer that is allocated up the stack.
+_decode_len_container :: proc(d: Decoder, add: Add) -> (n: int, scap: int, err: Decode_Error) {
+	if add == .Length_Unknown {
+		if .Disallow_Streaming in d.flags {
+			return -1, -1, .Disallowed_Streaming
+		}
+		return -1, INITIAL_STREAMED_CONTAINER_CAPACITY, nil
+	}
+
+	_n := _decode_uint_as_u64(d.reader, add) or_return
+	if _n > u64(max(int)) { return -1, -1, .Length_Too_Big }
+	n = int(_n)
+
+	scap = n
+	if .Trusted_Input not_in d.flags {
+		// NOTE: if this is a map it will be twice this.
+		scap = min(d.max_pre_alloc / size_of(Value), scap)
+	}
+
+	return
+}
+
+// Deterministic encoding is (among other things) encoding all values into their smallest
+// possible representation.
+// See section 4 of RFC 8949.
+
+_encode_deterministic_uint :: proc {
+	_encode_u8,
+	_encode_deterministic_u16,
+	_encode_deterministic_u32,
+	_encode_deterministic_u64,
+	_encode_deterministic_u128,
+}
+
+_encode_deterministic_u16 :: proc(w: io.Writer, v: u16, major: Major = .Unsigned) -> Encode_Error {
+	switch {
+	case v <= u16(max(u8)): return _encode_u8(w, u8(v), major)
+	case:                   return _encode_u16_exact(w, v, major)
+	}
+}
+
+_encode_deterministic_u32 :: proc(w: io.Writer, v: u32, major: Major = .Unsigned) -> Encode_Error {
+	switch {
+	case v <= u32(max(u8)):  return _encode_u8(w, u8(v), major)
+	case v <= u32(max(u16)): return _encode_u16_exact(w, u16(v), major)
+	case:                    return _encode_u32_exact(w, u32(v), major)
+	}
+}
+
+_encode_deterministic_u64 :: proc(w: io.Writer, v: u64, major: Major = .Unsigned) -> Encode_Error {
+	switch {
+	case v <= u64(max(u8)):  return _encode_u8(w, u8(v), major)
+	case v <= u64(max(u16)): return _encode_u16_exact(w, u16(v), major)
+	case v <= u64(max(u32)): return _encode_u32_exact(w, u32(v), major)
+	case:                    return _encode_u64_exact(w, u64(v), major)
+	}
+}
+
+_encode_deterministic_u128 :: proc(w: io.Writer, v: u128, major: Major = .Unsigned) -> Encode_Error {
+	switch {
+	case v <= u128(max(u8)):  return _encode_u8(w, u8(v), major)
+	case v <= u128(max(u16)): return _encode_u16_exact(w, u16(v), major)
+	case v <= u128(max(u32)): return _encode_u32_exact(w, u32(v), major)
+	case v <= u128(max(u64)): return _encode_u64_exact(w, u64(v), major)
+	case:                     return .Int_Too_Big
+	}
+}
+
+_encode_deterministic_negative :: #force_inline proc(w: io.Writer, v: $T) -> Encode_Error
+	where T == Negative_U8 || T == Negative_U16 || T == Negative_U32 || T == Negative_U64 {
+	return _encode_deterministic_uint(w, v, .Negative)
+}
+
+// A Deterministic float is a float in the smallest type that stays the same after down casting.
+_encode_deterministic_float :: proc {
+	_encode_f16,
+	_encode_deterministic_f32,
+	_encode_deterministic_f64,
+}
+
+_encode_deterministic_f32 :: proc(w: io.Writer, v: f32) -> io.Error {
+	if (f32(f16(v)) == v) {
+		return _encode_f16(w, f16(v))
+	}
+
+	return _encode_f32_exact(w, v)
+}
+
+_encode_deterministic_f64 :: proc(w: io.Writer, v: f64) -> io.Error {
+	if (f64(f16(v)) == v) {
+		return _encode_f16(w, f16(v))
+	}
+
+	if (f64(f32(v)) == v) {
+		return _encode_f32_exact(w, f32(v))
+	}
+
+	return _encode_f64_exact(w, v)
+}

+ 141 - 0
core/encoding/cbor/doc.odin

@@ -0,0 +1,141 @@
+/*
+Package cbor encodes, decodes, marshals and unmarshals types from/into RCF 8949 compatible CBOR binary.
+Also provided are conversion to and from JSON and the CBOR diagnostic format.
+
+**Allocations:**
+
+In general, when in the following table it says allocations are done on the `temp_allocator`, these allocations
+are still attempted to be deallocated.
+This allows you to use an allocator with freeing implemented as the `temp_allocator` which is handy with big CBOR.
+
+- *Encoding*:  If the `.Deterministic_Map_Sorting` flag is set on the encoder, this allocates on the given `temp_allocator`
+               some space for the keys of maps in order to sort them and then write them.
+               Other than that there are no allocations (only for the final bytes if you use `cbor.encode_into_bytes`.
+
+- *Decoding*:  Allocates everything on the given allocator and input given can be deleted after decoding.
+               *No* temporary allocations are done.
+
+- *Marshal*:   Same allocation strategy as encoding.
+
+- *Unmarshal*: Allocates everything on the given allocator and input given can be deleted after unmarshalling.
+               Some temporary allocations are done on the given `temp_allocator`.
+
+**Determinism:**
+
+CBOR defines a deterministic en/decoder, which among other things uses the smallest type possible for integers and floats,
+and sorts map keys by their (encoded) lexical bytewise order.
+
+You can enable this behaviour using a combination of flags, also available as the `cbor.ENCODE_FULLY_DETERMINISTIC` constant.
+If you just want the small size that comes with this, but not the map sorting (which has a performance cost) you can use the
+`cbor.ENCODE_SMALL` constant for the flags.
+
+A deterministic float is a float in the smallest type (f16, f32, f64) that hasn't changed after conversion.
+A deterministic integer is an integer in the smallest representation (u8, u16, u32, u64) it fits in.
+
+**Untrusted Input:**
+
+By default input is treated as untrusted, this means the sizes that are encoded in the CBOR are not blindly trusted.
+If you were to trust these sizes, and allocate space for them an attacker would be able to cause massive allocations with small payloads.
+
+The decoder has a `max_pre_alloc` field that specifies the maximum amount of bytes (roughly) to pre allocate, a KiB by default.
+
+This does mean reallocations are more common though, you can, if you know the input is trusted, add the `.Trusted_Input` flag to the decoder.
+
+**Tags:**
+
+CBOR describes tags that you can wrap values with to assign a number to describe what type of data will follow.
+
+More information and a list of default tags can be found here: [[RFC 8949 Section 3.4;https://www.rfc-editor.org/rfc/rfc8949.html#name-tagging-of-items]].
+
+A list of registered extension types can be found here: [[IANA CBOR assignments;https://www.iana.org/assignments/cbor-tags/cbor-tags.xhtml]].
+
+Tags can either be assigned to a distinct Odin type (used by default),
+or be used with struct tags (`cbor_tag:"base64"`, or `cbor_tag:"1"` for example).
+
+By default, the following tags are supported/provided by this implementation:
+
+- *1/epoch*:   Assign this tag to `time.Time` or integer fields to use the defined seconds since epoch format.
+
+- *24/cbor*:   Assign this tag to string or byte fields to store encoded CBOR (not decoding it).
+
+- *34/base64*: Assign this tag to string or byte fields to store and decode the contents in base64.
+
+- *2 & 3*:     Used automatically by the implementation to encode and decode big numbers into/from `core:math/big`.
+
+- *55799*:     Self described CBOR, used when `.Self_Described_CBOR` flag is used to wrap the entire binary.
+               This shows other implementations that we are dealing with CBOR by just looking at the first byte of input.
+
+- *1010*:      An extension tag that defines a string type followed by its value, this is used by this implementation to support Odin's unions.
+
+Users can provide their own tag implementations using the `cbor.tag_register_type(...)` to register a tag for a distinct Odin type
+used automatically when it is encountered during marshal and unmarshal.
+Or with `cbor.tag_register_number(...)` to register a tag number along with an identifier for convenience that can be used with struct tags,
+e.g. `cbor_tag:"69"` or `cbor_tag:"my_tag"`.
+
+You can look at the default tags provided for pointers on how these implementations work.
+
+Example:
+	package main
+
+	import "core:encoding/cbor"
+	import "core:fmt"
+	import "core:time"
+
+	Possibilities :: union {
+		string,
+		int,
+	}
+
+	Data :: struct {
+		str: string,
+		neg: cbor.Negative_U16,            // Store a CBOR value directly.
+		now: time.Time `cbor_tag:"epoch"`, // Wrapped in the epoch tag.
+		ignore_this: ^Data `cbor:"-"`,     // Ignored by implementation.
+		renamed: f32 `cbor:"renamed :)"`,  // Renamed when encoded.
+		my_union: Possibilities,           // Union support.
+	}
+
+	main :: proc() {
+		now := time.Time{_nsec = 1701117968 * 1e9}
+
+		data := Data{
+			str         = "Hello, World!",
+			neg         = 300,
+			now         = now,
+			ignore_this = &Data{},
+			renamed     = 123123.125,
+			my_union    = 3,
+		}
+		
+		// Marshal the struct into binary CBOR.
+		binary, err := cbor.marshal(data, cbor.ENCODE_FULLY_DETERMINISTIC)
+		assert(err == nil)
+		defer delete(binary)
+		
+		// Decode the binary data into a `cbor.Value`.
+		decoded, derr := cbor.decode(string(binary))
+		assert(derr == nil)
+		defer cbor.destroy(decoded)
+
+		// Turn the CBOR into a human readable representation defined as the diagnostic format in [[RFC 8949 Section 8;https://www.rfc-editor.org/rfc/rfc8949.html#name-diagnostic-notation]].
+		diagnosis, eerr := cbor.to_diagnostic_format(decoded)
+		assert(eerr == nil)
+		defer delete(diagnosis)
+
+		fmt.println(diagnosis)
+	}
+
+Output:
+	{
+		"my_union": 1010([
+			"int",
+			3
+		]),
+		"neg": -301,
+		"now": 1(1701117968),
+		"renamed :)": 123123.12500000,
+		"str": "Hello, World!"
+	}
+*/
+package encoding_cbor
+

+ 575 - 0
core/encoding/cbor/marshal.odin

@@ -0,0 +1,575 @@
+package encoding_cbor
+
+import "base:intrinsics"
+import "base:runtime"
+
+import "core:bytes"
+import "core:io"
+import "core:mem"
+import "core:reflect"
+import "core:slice"
+import "core:strconv"
+import "core:strings"
+import "core:unicode/utf8"
+
+/*
+Marshal a value into binary CBOR.
+
+Flags can be used to control the output (mainly determinism, which coincidently affects size).
+
+The default flags `ENCODE_SMALL` (`.Deterministic_Int_Size`, `.Deterministic_Float_Size`) will try
+to put ints and floats into their smallest possible byte size without losing equality.
+
+Adding the `.Self_Described_CBOR` flag will wrap the value in a tag that lets generic decoders know
+the contents are CBOR from just reading the first byte.
+
+Adding the `.Deterministic_Map_Sorting` flag will sort the encoded maps by the byte content of the
+encoded key. This flag has a cost on performance and memory efficiency because all keys in a map
+have to be precomputed, sorted and only then written to the output.
+
+Empty flags will do nothing extra to the value.
+
+The allocations for the `.Deterministic_Map_Sorting` flag are done using the given `temp_allocator`.
+but are followed by the necessary `delete` and `free` calls if the allocator supports them.
+This is helpful when the CBOR size is so big that you don't want to collect all the temporary
+allocations until the end.
+*/
+marshal_into :: proc {
+	marshal_into_bytes,
+	marshal_into_builder,
+	marshal_into_writer,
+	marshal_into_encoder,
+}
+
+marshal :: marshal_into
+
+// Marshals the given value into a CBOR byte stream (allocated using the given allocator).
+// See docs on the `marshal_into` proc group for more info.
+marshal_into_bytes :: proc(v: any, flags := ENCODE_SMALL, allocator := context.allocator, temp_allocator := context.temp_allocator) -> (bytes: []byte, err: Marshal_Error) {
+	b, alloc_err := strings.builder_make(allocator)
+ 	// The builder as a stream also returns .EOF if it ran out of memory so this is consistent.
+	if alloc_err != nil {
+		return nil, .EOF
+	}
+
+	defer if err != nil { strings.builder_destroy(&b) }
+
+	if err = marshal_into_builder(&b, v, flags, temp_allocator); err != nil {
+		return
+	}
+
+	return b.buf[:], nil
+}
+
+// Marshals the given value into a CBOR byte stream written to the given builder.
+// See docs on the `marshal_into` proc group for more info.
+marshal_into_builder :: proc(b: ^strings.Builder, v: any, flags := ENCODE_SMALL, temp_allocator := context.temp_allocator) -> Marshal_Error {
+	return marshal_into_writer(strings.to_writer(b), v, flags, temp_allocator)
+}
+
+// Marshals the given value into a CBOR byte stream written to the given writer.
+// See docs on the `marshal_into` proc group for more info.
+marshal_into_writer :: proc(w: io.Writer, v: any, flags := ENCODE_SMALL, temp_allocator := context.temp_allocator) -> Marshal_Error {
+	encoder := Encoder{flags, w, temp_allocator}
+	return marshal_into_encoder(encoder, v)
+}
+
+// Marshals the given value into a CBOR byte stream written to the given encoder.
+// See docs on the `marshal_into` proc group for more info.
+marshal_into_encoder :: proc(e: Encoder, v: any) -> (err: Marshal_Error) {
+	e := e
+
+	if e.temp_allocator.procedure == nil {
+		e.temp_allocator = context.temp_allocator
+	}
+
+	if .Self_Described_CBOR in e.flags {
+		err_conv(_encode_u64(e, TAG_SELF_DESCRIBED_CBOR, .Tag)) or_return
+		e.flags &~= { .Self_Described_CBOR }
+	}
+
+	if v == nil {
+		return _encode_nil(e.writer)
+	}
+	
+	// Check if type has a tag implementation to use.
+	if impl, ok := _tag_implementations_type[v.id]; ok {
+		return impl->marshal(e, v)
+	}
+
+	ti := runtime.type_info_base(type_info_of(v.id))
+	a := any{v.data, ti.id}
+
+	#partial switch info in ti.variant {
+	case runtime.Type_Info_Named:
+		unreachable()
+
+	case runtime.Type_Info_Pointer:
+		switch vv in v {
+		case Undefined: return _encode_undefined(e.writer)
+		case Nil:       return _encode_nil(e.writer)
+		}
+
+	case runtime.Type_Info_Integer:
+		switch vv in v {
+		case Simple:       return err_conv(_encode_simple(e.writer, vv))
+		case Negative_U8:  return _encode_u8(e.writer, u8(vv), .Negative)
+		case Negative_U16: return err_conv(_encode_u16(e, u16(vv), .Negative))
+		case Negative_U32: return err_conv(_encode_u32(e, u32(vv), .Negative))
+		case Negative_U64: return err_conv(_encode_u64(e, u64(vv), .Negative))
+		}
+
+		switch i in a {
+		case i8:      return _encode_uint(e.writer, _int_to_uint(i))
+		case i16:     return err_conv(_encode_uint(e, _int_to_uint(i)))
+		case i32:     return err_conv(_encode_uint(e, _int_to_uint(i)))
+		case i64:     return err_conv(_encode_uint(e, _int_to_uint(i)))
+		case i128:    return err_conv(_encode_uint(e, _int_to_uint(i128(i)) or_return))
+		case int:     return err_conv(_encode_uint(e, _int_to_uint(i64(i))))
+
+		case u8:      return _encode_uint(e.writer, i)
+		case u16:     return err_conv(_encode_uint(e, i))
+		case u32:     return err_conv(_encode_uint(e, i))
+		case u64:     return err_conv(_encode_uint(e, i))
+		case u128:    return err_conv(_encode_uint(e, _u128_to_u64(u128(i)) or_return))
+		case uint:    return err_conv(_encode_uint(e, u64(i)))
+		case uintptr: return err_conv(_encode_uint(e, u64(i)))
+
+		case i16le:  return err_conv(_encode_uint(e, _int_to_uint(i16(i))))
+		case i32le:  return err_conv(_encode_uint(e, _int_to_uint(i32(i))))
+		case i64le:  return err_conv(_encode_uint(e, _int_to_uint(i64(i))))
+		case i128le: return err_conv(_encode_uint(e, _int_to_uint(i128(i)) or_return))
+
+		case u16le:  return err_conv(_encode_uint(e, u16(i)))
+		case u32le:  return err_conv(_encode_uint(e, u32(i)))
+		case u64le:  return err_conv(_encode_uint(e, u64(i)))
+		case u128le: return err_conv(_encode_uint(e, _u128_to_u64(u128(i)) or_return))
+
+		case i16be:  return err_conv(_encode_uint(e, _int_to_uint(i16(i))))
+		case i32be:  return err_conv(_encode_uint(e, _int_to_uint(i32(i))))
+		case i64be:  return err_conv(_encode_uint(e, _int_to_uint(i64(i))))
+		case i128be: return err_conv(_encode_uint(e, _int_to_uint(i128(i)) or_return))
+
+		case u16be:  return err_conv(_encode_uint(e, u16(i)))
+		case u32be:  return err_conv(_encode_uint(e, u32(i)))
+		case u64be:  return err_conv(_encode_uint(e, u64(i)))
+		case u128be: return err_conv(_encode_uint(e, _u128_to_u64(u128(i)) or_return))
+		}
+
+	case runtime.Type_Info_Rune:
+		buf, w := utf8.encode_rune(a.(rune))
+		return err_conv(_encode_text(e, string(buf[:w])))
+
+	case runtime.Type_Info_Float:
+		switch f in a {
+		case f16: return _encode_f16(e.writer, f)
+		case f32: return _encode_f32(e, f)
+		case f64: return _encode_f64(e, f)
+
+		case f16le: return _encode_f16(e.writer, f16(f))
+		case f32le: return _encode_f32(e, f32(f))
+		case f64le: return _encode_f64(e, f64(f))
+
+		case f16be: return _encode_f16(e.writer, f16(f))
+		case f32be: return _encode_f32(e, f32(f))
+		case f64be: return _encode_f64(e, f64(f))
+		}
+
+	case runtime.Type_Info_Complex:
+		switch z in a {
+		case complex32:
+			arr: [2]Value = {real(z), imag(z)}
+			return err_conv(_encode_array(e, arr[:]))
+		case complex64:
+			arr: [2]Value = {real(z), imag(z)}
+			return err_conv(_encode_array(e, arr[:]))
+		case complex128:
+			arr: [2]Value = {real(z), imag(z)}
+			return err_conv(_encode_array(e, arr[:]))
+		}
+
+	case runtime.Type_Info_Quaternion:
+		switch q in a {
+		case quaternion64:
+			arr: [4]Value = {imag(q), jmag(q), kmag(q), real(q)}
+			return err_conv(_encode_array(e, arr[:]))
+		case quaternion128:
+			arr: [4]Value = {imag(q), jmag(q), kmag(q), real(q)}
+			return err_conv(_encode_array(e, arr[:]))
+		case quaternion256:
+			arr: [4]Value = {imag(q), jmag(q), kmag(q), real(q)}
+			return err_conv(_encode_array(e, arr[:]))
+		}
+
+	case runtime.Type_Info_String:
+		switch s in a {
+		case string:  return err_conv(_encode_text(e, s))
+		case cstring: return err_conv(_encode_text(e, string(s)))
+		}
+
+	case runtime.Type_Info_Boolean:
+		switch b in a {
+		case bool: return _encode_bool(e.writer, b)
+		case b8:   return _encode_bool(e.writer, bool(b))
+		case b16:  return _encode_bool(e.writer, bool(b))
+		case b32:  return _encode_bool(e.writer, bool(b))
+		case b64:  return _encode_bool(e.writer, bool(b))
+		}
+
+	case runtime.Type_Info_Array:
+		if info.elem.id == byte {
+			raw := ([^]byte)(v.data)
+			return err_conv(_encode_bytes(e, raw[:info.count]))
+		}
+
+		err_conv(_encode_u64(e, u64(info.count), .Array)) or_return
+		for i in 0..<info.count {
+			data := uintptr(v.data) + uintptr(i*info.elem_size)
+			marshal_into(e, any{rawptr(data), info.elem.id}) or_return
+		}
+		return
+
+	case runtime.Type_Info_Enumerated_Array:
+		// index := runtime.type_info_base(info.index).variant.(runtime.Type_Info_Enum)
+		err_conv(_encode_u64(e, u64(info.count), .Array)) or_return
+		for i in 0..<info.count {
+			data := uintptr(v.data) + uintptr(i*info.elem_size)
+			marshal_into(e, any{rawptr(data), info.elem.id}) or_return
+		}
+		return
+		
+	case runtime.Type_Info_Dynamic_Array:
+		if info.elem.id == byte {
+			raw := (^[dynamic]byte)(v.data)
+			return err_conv(_encode_bytes(e, raw[:]))
+		}
+
+		array := (^mem.Raw_Dynamic_Array)(v.data)
+		err_conv(_encode_u64(e, u64(array.len), .Array)) or_return
+		for i in 0..<array.len {
+			data := uintptr(array.data) + uintptr(i*info.elem_size)
+			marshal_into(e, any{rawptr(data), info.elem.id}) or_return
+		}
+		return
+
+	case runtime.Type_Info_Slice:
+		if info.elem.id == byte {
+			raw := (^[]byte)(v.data)
+			return err_conv(_encode_bytes(e, raw^))
+		}
+
+		array := (^mem.Raw_Slice)(v.data)
+		err_conv(_encode_u64(e, u64(array.len), .Array)) or_return
+		for i in 0..<array.len {
+			data := uintptr(array.data) + uintptr(i*info.elem_size)
+			marshal_into(e, any{rawptr(data), info.elem.id}) or_return
+		}
+		return
+
+	case runtime.Type_Info_Map:
+		m := (^mem.Raw_Map)(v.data)
+		err_conv(_encode_u64(e, u64(runtime.map_len(m^)), .Map)) or_return
+		if m != nil {
+			if info.map_info == nil {
+				return _unsupported(v.id, nil)
+			}
+
+			map_cap := uintptr(runtime.map_cap(m^))
+			ks, vs, hs, _, _ := runtime.map_kvh_data_dynamic(m^, info.map_info)
+
+			if .Deterministic_Map_Sorting not_in e.flags {
+				for bucket_index in 0..<map_cap {
+					runtime.map_hash_is_valid(hs[bucket_index]) or_continue
+
+					key   := rawptr(runtime.map_cell_index_dynamic(ks, info.map_info.ks, bucket_index))
+					value := rawptr(runtime.map_cell_index_dynamic(vs, info.map_info.vs, bucket_index))
+
+					marshal_into(e, any{ key, info.key.id }) or_return
+					marshal_into(e, any{ value, info.value.id }) or_return
+				}
+
+				return
+			}
+
+			// Deterministic_Map_Sorting needs us to sort the entries by the byte contents of the
+			// encoded key.
+			//
+			// This means we have to store and sort them before writing incurring extra (temporary) allocations.
+			//
+			// If the map key is a `string` or `cstring` we only allocate space for a dynamic array of entries
+			// we sort.
+			//
+			// If the map key is of another type we also allocate space for encoding the key into.
+
+			// To sort a string/cstring we need to first sort by their encoded header/length.
+			// This fits in 9 bytes at most.
+			pre_key :: #force_inline proc(e: Encoder, str: string) -> (res: [10]byte) {
+				e := e
+				builder := strings.builder_from_slice(res[:])
+				e.writer = strings.to_stream(&builder)
+
+				assert(_encode_u64(e, u64(len(str)), .Text) == nil)
+				res[9] = u8(len(builder.buf))
+				assert(res[9] < 10)
+				return
+			}
+
+			Encoded_Entry_Fast :: struct($T: typeid) {
+				pre_key: [10]byte,
+				key:     T,
+				val_idx: uintptr,
+			}
+
+			Encoded_Entry :: struct {
+				key:     ^[dynamic]byte,
+				val_idx: uintptr,
+			}
+
+			switch info.key.id {
+			case string:
+				entries := make([dynamic]Encoded_Entry_Fast(^[]byte), 0, map_cap, e.temp_allocator) or_return
+				defer delete(entries)
+
+				for bucket_index in 0..<map_cap {
+					runtime.map_hash_is_valid(hs[bucket_index]) or_continue
+
+					key := (^[]byte)(runtime.map_cell_index_dynamic(ks, info.map_info.ks, bucket_index))
+					append(&entries, Encoded_Entry_Fast(^[]byte){
+						pre_key = pre_key(e, string(key^)),
+						key     = key,
+						val_idx = bucket_index,
+					})
+				}
+
+				slice.sort_by_cmp(entries[:], proc(a, b: Encoded_Entry_Fast(^[]byte)) -> slice.Ordering {
+					a, b := a, b
+					pre_cmp := slice.Ordering(bytes.compare(a.pre_key[:a.pre_key[9]], b.pre_key[:b.pre_key[9]]))
+					if pre_cmp != .Equal {
+						return pre_cmp
+					}
+
+					return slice.Ordering(bytes.compare(a.key^, b.key^))
+				})
+				
+				for &entry in entries {
+					io.write_full(e.writer, entry.pre_key[:entry.pre_key[9]]) or_return
+					io.write_full(e.writer, entry.key^) or_return
+
+					value := rawptr(runtime.map_cell_index_dynamic(vs, info.map_info.vs, entry.val_idx))
+					marshal_into(e, any{ value, info.value.id }) or_return
+				}
+				return
+
+			case cstring:
+				entries := make([dynamic]Encoded_Entry_Fast(^cstring), 0, map_cap, e.temp_allocator) or_return
+				defer delete(entries)
+
+				for bucket_index in 0..<map_cap {
+					runtime.map_hash_is_valid(hs[bucket_index]) or_continue
+
+					key := (^cstring)(runtime.map_cell_index_dynamic(ks, info.map_info.ks, bucket_index))
+					append(&entries, Encoded_Entry_Fast(^cstring){
+						pre_key = pre_key(e, string(key^)),
+						key     = key,
+						val_idx = bucket_index,
+					})
+				}
+
+				slice.sort_by_cmp(entries[:], proc(a, b: Encoded_Entry_Fast(^cstring)) -> slice.Ordering {
+					a, b := a, b
+					pre_cmp := slice.Ordering(bytes.compare(a.pre_key[:a.pre_key[9]], b.pre_key[:b.pre_key[9]]))
+					if pre_cmp != .Equal {
+						return pre_cmp
+					}
+
+					ab := transmute([]byte)string(a.key^)
+					bb := transmute([]byte)string(b.key^)
+					return slice.Ordering(bytes.compare(ab, bb))
+				})
+
+				for &entry in entries {
+					io.write_full(e.writer, entry.pre_key[:entry.pre_key[9]]) or_return
+					io.write_full(e.writer, transmute([]byte)string(entry.key^)) or_return
+
+					value := rawptr(runtime.map_cell_index_dynamic(vs, info.map_info.vs, entry.val_idx))
+					marshal_into(e, any{ value, info.value.id }) or_return
+				}
+				return
+
+			case:
+				entries := make([dynamic]Encoded_Entry, 0, map_cap, e.temp_allocator) or_return
+				defer delete(entries)
+
+				for bucket_index in 0..<map_cap {
+					runtime.map_hash_is_valid(hs[bucket_index]) or_continue
+
+					key := rawptr(runtime.map_cell_index_dynamic(ks, info.map_info.ks, bucket_index))
+					key_builder := strings.builder_make(0, 8, e.temp_allocator) or_return
+					marshal_into(Encoder{e.flags, strings.to_stream(&key_builder), e.temp_allocator}, any{ key, info.key.id }) or_return
+					append(&entries, Encoded_Entry{ &key_builder.buf, bucket_index }) or_return
+				}
+
+				slice.sort_by_cmp(entries[:], proc(a, b: Encoded_Entry) -> slice.Ordering {
+					return slice.Ordering(bytes.compare(a.key[:], b.key[:]))
+				})
+
+				for entry in entries {
+					io.write_full(e.writer, entry.key[:]) or_return
+					delete(entry.key^)
+
+					value := rawptr(runtime.map_cell_index_dynamic(vs, info.map_info.vs, entry.val_idx))
+					marshal_into(e, any{ value, info.value.id }) or_return
+				}
+				return
+			}
+		}
+
+	case runtime.Type_Info_Struct:
+		switch vv in v {
+		case Tag: return err_conv(_encode_tag(e, vv))
+		}
+
+		field_name :: #force_inline proc(info: runtime.Type_Info_Struct, i: int) -> string {
+			if cbor_name := string(reflect.struct_tag_get(reflect.Struct_Tag(info.tags[i]), "cbor")); cbor_name != "" {
+				return cbor_name
+			} else {
+				return info.names[i]
+			}
+		}
+
+		marshal_entry :: #force_inline proc(e: Encoder, info: runtime.Type_Info_Struct, v: any, name: string, i: int) -> Marshal_Error {
+			err_conv(_encode_text(e, name)) or_return
+
+			id := info.types[i].id
+			data := rawptr(uintptr(v.data) + info.offsets[i])
+			field_any := any{data, id}
+			
+			if tag := string(reflect.struct_tag_get(reflect.Struct_Tag(info.tags[i]), "cbor_tag")); tag != "" {
+				if impl, ok := _tag_implementations_id[tag]; ok {
+					return impl->marshal(e, field_any)
+				}
+
+				nr, ok := strconv.parse_u64_of_base(tag, 10)
+				if !ok { return .Invalid_CBOR_Tag }
+				
+				if impl, nok := _tag_implementations_nr[nr]; nok {
+					return impl->marshal(e, field_any)
+				}
+				
+				err_conv(_encode_u64(e, nr, .Tag)) or_return
+			}
+
+			return marshal_into(e, field_any)
+		}
+		
+		n: u64; {
+			for _, i in info.names {
+				if field_name(info, i) != "-" {
+					n += 1
+				}
+			}
+			err_conv(_encode_u64(e, n, .Map)) or_return
+		}
+
+		if .Deterministic_Map_Sorting in e.flags {
+			Name :: struct {
+				name:  string,
+				field: int,
+			}
+			entries := make([dynamic]Name, 0, n, e.temp_allocator) or_return
+			defer delete(entries)
+
+			for _, i in info.names {
+				fname := field_name(info, i)
+				if fname == "-" {
+					continue
+				}
+
+				append(&entries, Name{fname, i}) or_return
+			}
+
+			// Sort lexicographic on the bytes of the key.
+			slice.sort_by_cmp(entries[:], proc(a, b: Name) -> slice.Ordering {
+				return slice.Ordering(bytes.compare(transmute([]byte)a.name, transmute([]byte)b.name))
+			})
+
+			for entry in entries {
+				marshal_entry(e, info, v, entry.name, entry.field) or_return
+			}
+		} else {
+			for _, i in info.names {
+				fname := field_name(info, i)
+				if fname == "-" {
+					continue
+				}
+
+				marshal_entry(e, info, v, fname, i) or_return
+			}
+		}
+		return
+
+	case runtime.Type_Info_Union:
+		switch vv in v {
+		case Value: return err_conv(encode(e, vv))
+		}
+
+		id := reflect.union_variant_typeid(v)
+		if v.data == nil || id == nil {
+			return _encode_nil(e.writer)
+		}
+
+		if len(info.variants) == 1 {
+			return marshal_into(e, any{v.data, id})
+		}
+
+		// Encode a non-nil multi-variant union as the `TAG_OBJECT_TYPE`.
+		// Which is a tag of an array, where the first element is the textual id/type of the object
+		// that follows it.
+
+		err_conv(_encode_u16(e, TAG_OBJECT_TYPE, .Tag)) or_return
+		_encode_u8(e.writer, 2, .Array) or_return
+
+		vti := reflect.union_variant_type_info(v)
+		#partial switch vt in vti.variant {
+		case reflect.Type_Info_Named:
+			err_conv(_encode_text(e, vt.name)) or_return
+		case:
+			builder := strings.builder_make(e.temp_allocator) or_return
+			defer strings.builder_destroy(&builder)
+			reflect.write_type(&builder, vti)
+			err_conv(_encode_text(e, strings.to_string(builder))) or_return
+		}
+
+		return marshal_into(e, any{v.data, vti.id})
+
+	case runtime.Type_Info_Enum:
+		return marshal_into(e, any{v.data, info.base.id})
+
+	case runtime.Type_Info_Bit_Set:
+		// Store bit_set as big endian just like the protocol.
+		do_byte_swap := !reflect.bit_set_is_big_endian(v)
+		switch ti.size * 8 {
+		case  0:
+			return _encode_u8(e.writer, 0)
+		case  8:
+			x := (^u8)(v.data)^
+			return _encode_u8(e.writer, x)
+		case 16:
+			x := (^u16)(v.data)^
+			if do_byte_swap { x = intrinsics.byte_swap(x) }
+			return err_conv(_encode_u16(e, x))
+		case 32:
+			x := (^u32)(v.data)^
+			if do_byte_swap { x = intrinsics.byte_swap(x) }
+			return err_conv(_encode_u32(e, x))
+		case 64:
+			x := (^u64)(v.data)^
+			if do_byte_swap { x = intrinsics.byte_swap(x) }
+			return err_conv(_encode_u64(e, x))
+		case:
+			panic("unknown bit_size size")
+		}
+	}
+
+	return _unsupported(v.id, nil)
+}

+ 381 - 0
core/encoding/cbor/tags.odin

@@ -0,0 +1,381 @@
+package encoding_cbor
+
+import "base:runtime"
+
+import "core:encoding/base64"
+import "core:io"
+import "core:math"
+import "core:math/big"
+import "core:mem"
+import "core:reflect"
+import "core:strings"
+import "core:time"
+
+// Tags defined in RFC 7049 that we provide implementations for.
+
+// UTC time in seconds, unmarshalled into a `core:time` `time.Time` or integer.
+// Use the struct tag `cbor_tag:"1"` or `cbor_tag:"epoch"` to have your `time.Time` field en/decoded as epoch time.
+TAG_EPOCH_TIME_NR :: 1
+TAG_EPOCH_TIME_ID :: "epoch"
+
+// Using `core:math/big`, big integers are properly encoded and decoded during marshal and unmarshal.
+// These fields use this tag by default, no struct tag required.
+TAG_UNSIGNED_BIG_NR :: 2
+// Using `core:math/big`, big integers are properly encoded and decoded during marshal and unmarshal.
+// These fields use this tag by default, no struct tag required.
+TAG_NEGATIVE_BIG_NR :: 3
+
+// TAG_DECIMAL_FRACTION :: 4  // NOTE: We could probably implement this with `math/fixed`.
+
+// Sometimes it is beneficial to carry an embedded CBOR data item that is not meant to be decoded
+// immediately at the time the enclosing data item is being decoded. Tag number 24 (CBOR data item)
+// can be used to tag the embedded byte string as a single data item encoded in CBOR format.
+// Use the struct tag `cbor_tag:"24"` or `cbor_tag:"cbor"` to keep a non-decoded field (string or bytes) of raw CBOR.
+TAG_CBOR_NR :: 24
+TAG_CBOR_ID :: "cbor"
+
+// The contents of this tag are base64 encoded during marshal and decoded during unmarshal.
+// Use the struct tag `cbor_tag:"34"` or `cbor_tag:"base64"` to have your field string or bytes field en/decoded as base64.
+TAG_BASE64_NR :: 34
+TAG_BASE64_ID :: "base64"
+
+// A tag that is used to detect the contents of a binary buffer (like a file) are CBOR.
+// This tag would wrap everything else, decoders can then check for this header and see if the
+// given content is definitely CBOR.
+// Added by the encoder if it has the flag `.Self_Described_CBOR`, decoded by default.
+TAG_SELF_DESCRIBED_CBOR :: 55799
+
+// A tag that is used to assign a textual type to the object following it.
+// The tag's value must be an array of 2 items, where the first is text (describing the following type)
+// and the second is any valid CBOR value.
+//
+// See the registration: https://datatracker.ietf.org/doc/draft-rundgren-cotx/05/
+//
+// We use this in Odin to marshal and unmarshal unions.
+TAG_OBJECT_TYPE :: 1010
+
+// A tag implementation that handles marshals and unmarshals for the tag it is registered on.
+Tag_Implementation :: struct {
+	data:      rawptr,
+	unmarshal: Tag_Unmarshal_Proc,
+	marshal:   Tag_Marshal_Proc,
+}
+
+// Procedure responsible for umarshalling the tag out of the reader into the given `any`.
+Tag_Unmarshal_Proc :: #type proc(self: ^Tag_Implementation, d: Decoder, tag_nr: Tag_Number, v: any) -> Unmarshal_Error
+
+// Procedure responsible for marshalling the tag in the given `any` into the given encoder.
+Tag_Marshal_Proc   :: #type proc(self: ^Tag_Implementation, e: Encoder, v: any) -> Marshal_Error
+
+// When encountering a tag in the CBOR being unmarshalled, the implementation is used to unmarshal it.
+// When encountering a struct tag like `cbor_tag:"Tag_Number"`, the implementation is used to marshal it. 
+_tag_implementations_nr: map[Tag_Number]Tag_Implementation
+
+// Same as the number implementations but friendlier to use as a struct tag.
+// Instead of `cbor_tag:"34"` you can use `cbor_tag:"base64"`.
+_tag_implementations_id: map[string]Tag_Implementation
+
+// Tag implementations that are always used by a type, if that type is encountered in marshal it
+// will rely on the implementation to marshal it.
+//
+// This is good for types that don't make sense or can't marshal in its default form.
+_tag_implementations_type: map[typeid]Tag_Implementation
+
+// Register a custom tag implementation to be used when marshalling that type and unmarshalling that tag number.
+tag_register_type :: proc(impl: Tag_Implementation, nr: Tag_Number, type: typeid) {
+	_tag_implementations_nr[nr] = impl
+	_tag_implementations_type[type] = impl
+}
+
+// Register a custom tag implementation to be used when marshalling that tag number or marshalling
+// a field with the struct tag `cbor_tag:"nr"`.
+tag_register_number :: proc(impl: Tag_Implementation, nr: Tag_Number, id: string) {
+	_tag_implementations_nr[nr] = impl
+	_tag_implementations_id[id] = impl
+}
+
+// Controls initialization of default tag implementations.
+// JS and WASI default to a panic allocator so we don't want to do it on those.
+INITIALIZE_DEFAULT_TAGS :: #config(CBOR_INITIALIZE_DEFAULT_TAGS, !ODIN_DEFAULT_TO_PANIC_ALLOCATOR && !ODIN_DEFAULT_TO_NIL_ALLOCATOR)
+
+@(private, init, disabled=!INITIALIZE_DEFAULT_TAGS)
+tags_initialize_defaults :: proc() {
+	tags_register_defaults()
+}
+
+// Registers tags that have implementations provided by this package.
+// This is done by default and can be controlled with the `CBOR_INITIALIZE_DEFAULT_TAGS` define.
+tags_register_defaults :: proc() {
+	tag_register_number({nil, tag_time_unmarshal,   tag_time_marshal},   TAG_EPOCH_TIME_NR, TAG_EPOCH_TIME_ID)
+	tag_register_number({nil, tag_base64_unmarshal, tag_base64_marshal}, TAG_BASE64_NR,     TAG_BASE64_ID)
+	tag_register_number({nil, tag_cbor_unmarshal,   tag_cbor_marshal},   TAG_CBOR_NR,       TAG_CBOR_ID)
+
+	// These following tags are registered at the type level and don't require an opt-in struct tag.
+	// Encoding these types on its own make no sense or no data is lost to encode it.
+	
+	// En/Decoding of `big.Int` fields by default.
+	tag_register_type({nil, tag_big_unmarshal, tag_big_marshal}, TAG_UNSIGNED_BIG_NR, big.Int)
+	tag_register_type({nil, tag_big_unmarshal, tag_big_marshal}, TAG_NEGATIVE_BIG_NR, big.Int)
+}
+
+// Tag number 1 contains a numerical value counting the number of seconds from 1970-01-01T00:00Z
+// in UTC time to the represented point in civil time.
+//
+// See RFC 8949 section 3.4.2.
+@(private)
+tag_time_unmarshal :: proc(_: ^Tag_Implementation, d: Decoder, _: Tag_Number, v: any) -> (err: Unmarshal_Error) {
+	hdr := _decode_header(d.reader) or_return
+	#partial switch hdr {
+	case .U8, .U16, .U32, .U64, .Neg_U8, .Neg_U16, .Neg_U32, .Neg_U64:
+		switch &dst in v {
+		case time.Time:
+			i: i64
+			_unmarshal_any_ptr(d, &i, hdr) or_return
+			dst = time.unix(i64(i), 0)
+			return
+		case:
+			return _unmarshal_value(d, v, hdr)
+		}
+
+	case .F16, .F32, .F64:
+		switch &dst in v {
+		case time.Time:
+			f: f64
+			_unmarshal_any_ptr(d, &f, hdr) or_return
+			whole, fract := math.modf(f)
+			dst = time.unix(i64(whole), i64(fract * 1e9))
+			return
+		case:
+			return _unmarshal_value(d, v, hdr)
+		}
+
+	case:
+		maj, add := _header_split(hdr)
+		if maj == .Other {
+			i := _decode_tiny_u8(add) or_return
+
+			switch &dst in v {
+			case time.Time:
+				dst = time.unix(i64(i), 0)
+			case:
+				if _assign_int(v, i) { return }
+			}
+		}
+
+		// Only numbers and floats are allowed in this tag.
+		return .Bad_Tag_Value
+	}
+
+	return _unsupported(v, hdr)
+}
+
+@(private)
+tag_time_marshal :: proc(_: ^Tag_Implementation, e: Encoder, v: any) -> Marshal_Error {
+	switch vv in v {
+	case time.Time:
+		// NOTE: we lose precision here, which is one of the reasons for this tag being opt-in.
+		i := time.time_to_unix(vv)
+
+		_encode_u8(e.writer, TAG_EPOCH_TIME_NR, .Tag) or_return
+		return err_conv(_encode_uint(e, _int_to_uint(i)))
+	case:
+		unreachable()
+	}
+}
+
+@(private)
+tag_big_unmarshal :: proc(_: ^Tag_Implementation, d: Decoder, tnr: Tag_Number, v: any) -> (err: Unmarshal_Error) {
+	hdr := _decode_header(d.reader) or_return
+	maj, add := _header_split(hdr)
+	if maj != .Bytes {
+		// Only bytes are supported in this tag.
+		return .Bad_Tag_Value
+	}
+
+	switch &dst in v {
+	case big.Int:
+		bytes := err_conv(_decode_bytes(d, add)) or_return
+		defer delete(bytes)
+
+		if err := big.int_from_bytes_big(&dst, bytes); err != nil {
+			return .Bad_Tag_Value
+		}
+
+		if tnr ==  TAG_NEGATIVE_BIG_NR {
+			dst.sign = .Negative
+		}
+
+		return
+	}
+
+	return _unsupported(v, hdr)
+}
+
+@(private)
+tag_big_marshal :: proc(_: ^Tag_Implementation, e: Encoder, v: any) -> Marshal_Error {
+	switch &vv in v {
+	case big.Int:
+		if !big.int_is_initialized(&vv) {
+			_encode_u8(e.writer, TAG_UNSIGNED_BIG_NR, .Tag) or_return
+			return _encode_u8(e.writer, 0, .Bytes)
+		}
+
+		// NOTE: using the panic_allocator because all procedures should only allocate if the Int
+		// is uninitialized (which we checked).
+
+		is_neg, err := big.is_negative(&vv, mem.panic_allocator())
+		assert(err == nil, "should only error if not initialized, which has been checked")
+		
+		tnr: u8 = TAG_NEGATIVE_BIG_NR if is_neg else TAG_UNSIGNED_BIG_NR
+		_encode_u8(e.writer, tnr, .Tag) or_return
+
+		size_in_bytes, berr := big.int_to_bytes_size(&vv, false, mem.panic_allocator())
+		assert(berr == nil, "should only error if not initialized, which has been checked")
+		assert(size_in_bytes >= 0)
+
+		err_conv(_encode_u64(e, u64(size_in_bytes), .Bytes)) or_return
+
+		for offset := (size_in_bytes*8)-8; offset >= 0; offset -= 8 {
+			bits, derr := big.int_bitfield_extract(&vv, offset, 8, mem.panic_allocator())
+			assert(derr == nil, "should only error if not initialized or invalid argument (offset and count), which won't happen")
+
+			io.write_full(e.writer, {u8(bits & 255)}) or_return
+		}
+		return nil
+
+	case: unreachable()
+	}
+}
+
+@(private)
+tag_cbor_unmarshal :: proc(_: ^Tag_Implementation, d: Decoder, _: Tag_Number, v: any) -> Unmarshal_Error {
+	hdr := _decode_header(d.reader) or_return
+	major, add := _header_split(hdr)
+	#partial switch major {
+	case .Bytes:
+		ti := reflect.type_info_base(type_info_of(v.id))
+		return _unmarshal_bytes(d, v, ti, hdr, add)
+		
+	case: return .Bad_Tag_Value
+	}
+}
+
+@(private)
+tag_cbor_marshal :: proc(_: ^Tag_Implementation, e: Encoder, v: any) -> Marshal_Error {
+	_encode_u8(e.writer, TAG_CBOR_NR, .Tag) or_return
+	ti := runtime.type_info_base(type_info_of(v.id))
+	#partial switch t in ti.variant {
+	case runtime.Type_Info_String:
+		return marshal_into(e, v)
+	case runtime.Type_Info_Array:
+		elem_base := reflect.type_info_base(t.elem)
+		if elem_base.id != byte { return .Bad_Tag_Value }
+		return marshal_into(e, v)
+	case runtime.Type_Info_Slice:
+		elem_base := reflect.type_info_base(t.elem)
+		if elem_base.id != byte { return .Bad_Tag_Value }
+		return marshal_into(e, v)
+	case runtime.Type_Info_Dynamic_Array:
+		elem_base := reflect.type_info_base(t.elem)
+		if elem_base.id != byte { return .Bad_Tag_Value }
+		return marshal_into(e, v)
+	case:
+		return .Bad_Tag_Value
+	}
+}
+
+@(private)
+tag_base64_unmarshal :: proc(_: ^Tag_Implementation, d: Decoder, _: Tag_Number, v: any) -> (err: Unmarshal_Error) {
+	hdr        := _decode_header(d.reader) or_return
+	major, add := _header_split(hdr)
+	ti         := reflect.type_info_base(type_info_of(v.id))
+
+	if major != .Text && major != .Bytes {
+		return .Bad_Tag_Value
+	}
+
+	bytes := string(err_conv(_decode_bytes(d, add, allocator=context.temp_allocator)) or_return)
+	defer delete(bytes, context.temp_allocator)
+
+	#partial switch t in ti.variant {
+	case reflect.Type_Info_String:
+
+		if t.is_cstring {
+			length  := base64.decoded_len(bytes)
+			builder := strings.builder_make(0, length+1)
+			base64.decode_into(strings.to_stream(&builder), bytes) or_return
+
+			raw  := (^cstring)(v.data)
+			raw^  = cstring(raw_data(builder.buf))
+		} else {
+			raw  := (^string)(v.data)
+			raw^  = string(base64.decode(bytes) or_return)
+		}
+
+		return
+
+	case reflect.Type_Info_Slice:
+		elem_base := reflect.type_info_base(t.elem)
+
+		if elem_base.id != byte { return _unsupported(v, hdr) }
+
+		raw  := (^[]byte)(v.data)
+		raw^  = base64.decode(bytes) or_return
+		return
+		
+	case reflect.Type_Info_Dynamic_Array:
+		elem_base := reflect.type_info_base(t.elem)
+
+		if elem_base.id != byte { return _unsupported(v, hdr) }
+
+		decoded := base64.decode(bytes) or_return
+		
+		raw           := (^mem.Raw_Dynamic_Array)(v.data)
+		raw.data       = raw_data(decoded)
+		raw.len        = len(decoded)
+		raw.cap        = len(decoded)
+		raw.allocator  = context.allocator
+		return
+
+	case reflect.Type_Info_Array:
+		elem_base := reflect.type_info_base(t.elem)
+
+		if elem_base.id != byte { return _unsupported(v, hdr) }
+
+		if base64.decoded_len(bytes) > t.count { return _unsupported(v, hdr) }
+		
+		slice := ([^]byte)(v.data)[:len(bytes)]
+		copy(slice, base64.decode(bytes) or_return)
+		return
+	}
+
+	return _unsupported(v, hdr)
+}
+
+@(private)
+tag_base64_marshal :: proc(_: ^Tag_Implementation, e: Encoder, v: any) -> Marshal_Error {
+	_encode_u8(e.writer, TAG_BASE64_NR, .Tag) or_return
+
+	ti := runtime.type_info_base(type_info_of(v.id))
+	a := any{v.data, ti.id}
+
+	bytes: []byte
+	switch val in a {
+	case string:        bytes = transmute([]byte)val
+	case cstring:       bytes = transmute([]byte)string(val)
+	case []byte:        bytes = val
+	case [dynamic]byte: bytes = val[:]
+	case:
+		#partial switch t in ti.variant {
+		case runtime.Type_Info_Array:
+			if t.elem.id != byte { return .Bad_Tag_Value }
+			bytes = ([^]byte)(v.data)[:t.count]
+		case:
+			return .Bad_Tag_Value
+		}
+	}
+
+	out_len := base64.encoded_len(bytes)
+	err_conv(_encode_u64(e, u64(out_len), .Text)) or_return
+	return base64.encode_into(e.writer, bytes)
+}

+ 932 - 0
core/encoding/cbor/unmarshal.odin

@@ -0,0 +1,932 @@
+package encoding_cbor
+
+import "base:intrinsics"
+import "base:runtime"
+
+import "core:io"
+import "core:mem"
+import "core:reflect"
+import "core:strings"
+import "core:unicode/utf8"
+
+/*
+Unmarshals the given CBOR into the given pointer using reflection.
+Types that require allocation are allocated using the given allocator.
+
+Some temporary allocations are done on the given `temp_allocator`, but, if you want to,
+this can be set to a "normal" allocator, because the necessary `delete` and `free` calls are still made.
+This is helpful when the CBOR size is so big that you don't want to collect all the temporary allocations until the end.
+
+Disable streaming/indeterminate lengths with the `.Disallow_Streaming` flag.
+
+Shrink excess bytes in buffers and containers with the `.Shrink_Excess` flag.
+
+Mark the input as trusted input with the `.Trusted_Input` flag, this turns off the safety feature
+of not pre-allocating more than `max_pre_alloc` bytes before reading into the bytes. You should only
+do this when you own both sides of the encoding and are sure there can't be malicious bytes used as
+an input.
+*/
+unmarshal :: proc {
+	unmarshal_from_reader,
+	unmarshal_from_string,
+}
+
+unmarshal_from_reader :: proc(r: io.Reader, ptr: ^$T, flags := Decoder_Flags{}, allocator := context.allocator, temp_allocator := context.temp_allocator) -> (err: Unmarshal_Error) {
+	err = unmarshal_from_decoder(Decoder{ DEFAULT_MAX_PRE_ALLOC, flags, r }, ptr, allocator, temp_allocator)
+
+	// Normal EOF does not exist here, we try to read the exact amount that is said to be provided.
+	if err == .EOF { err = .Unexpected_EOF }
+	return
+}
+
+// Unmarshals from a string, see docs on the proc group `Unmarshal` for more info.
+unmarshal_from_string :: proc(s: string, ptr: ^$T, flags := Decoder_Flags{}, allocator := context.allocator, temp_allocator := context.temp_allocator) -> (err: Unmarshal_Error) {
+	sr: strings.Reader
+	r := strings.to_reader(&sr, s)
+
+	err = unmarshal_from_reader(r, ptr, flags, allocator, temp_allocator)
+
+	// Normal EOF does not exist here, we try to read the exact amount that is said to be provided.
+	if err == .EOF { err = .Unexpected_EOF }
+	return
+}
+
+unmarshal_from_decoder :: proc(d: Decoder, ptr: ^$T, allocator := context.allocator, temp_allocator := context.temp_allocator) -> (err: Unmarshal_Error) {
+	d := d
+
+	err = _unmarshal_any_ptr(d, ptr, nil, allocator, temp_allocator)
+
+	// Normal EOF does not exist here, we try to read the exact amount that is said to be provided.
+	if err == .EOF { err = .Unexpected_EOF }
+	return
+
+}
+
+_unmarshal_any_ptr :: proc(d: Decoder, v: any, hdr: Maybe(Header) = nil, allocator := context.allocator, temp_allocator := context.temp_allocator) -> Unmarshal_Error {
+	context.allocator = allocator
+	context.temp_allocator = temp_allocator
+	v := v
+
+	if v == nil || v.id == nil {
+		return .Invalid_Parameter
+	}
+
+	v = reflect.any_base(v)
+	ti := type_info_of(v.id)
+	if !reflect.is_pointer(ti) || ti.id == rawptr {
+		return .Non_Pointer_Parameter
+	}
+	
+	data := any{(^rawptr)(v.data)^, ti.variant.(reflect.Type_Info_Pointer).elem.id}	
+	return _unmarshal_value(d, data, hdr.? or_else (_decode_header(d.reader) or_return))
+}
+
+_unmarshal_value :: proc(d: Decoder, v: any, hdr: Header) -> (err: Unmarshal_Error) {
+	v := v
+	ti := reflect.type_info_base(type_info_of(v.id))
+	r := d.reader
+
+	// If it's a union with only one variant, then treat it as that variant
+	if u, ok := ti.variant.(reflect.Type_Info_Union); ok && len(u.variants) == 1 {
+		#partial switch hdr {
+		case .Nil, .Undefined, nil: // no-op.
+		case:
+			variant := u.variants[0]
+			v.id = variant.id
+			ti = reflect.type_info_base(variant)
+			if !reflect.is_pointer_internally(variant) {
+				tag := any{rawptr(uintptr(v.data) + u.tag_offset), u.tag_type.id}
+				assert(_assign_int(tag, 1))
+			}
+		}
+	}
+
+	// Allow generic unmarshal by doing it into a `Value`.
+	switch &dst in v {
+	case Value:
+		dst = err_conv(_decode_from_decoder(d, hdr)) or_return
+		return
+	}
+
+	switch hdr {
+	case .U8:
+		decoded := _decode_u8(r) or_return
+		if !_assign_int(v, decoded) { return _unsupported(v, hdr) }
+		return
+
+	case .U16:
+		decoded := _decode_u16(r) or_return
+		if !_assign_int(v, decoded) { return _unsupported(v, hdr) }
+		return
+
+	case .U32:
+		decoded := _decode_u32(r) or_return
+		if !_assign_int(v, decoded) { return _unsupported(v, hdr) }
+		return
+
+	case .U64:
+		decoded := _decode_u64(r) or_return
+		if !_assign_int(v, decoded) { return _unsupported(v, hdr) }
+		return
+
+	case .Neg_U8:
+		decoded := Negative_U8(_decode_u8(r) or_return)
+		
+		switch &dst in v {
+		case Negative_U8:
+			dst = decoded
+			return
+		case Negative_U16:
+			dst = Negative_U16(decoded)
+			return
+		case Negative_U32:
+			dst = Negative_U32(decoded)
+			return
+		case Negative_U64:
+			dst = Negative_U64(decoded)
+			return
+		}
+
+		if reflect.is_unsigned(ti) { return _unsupported(v, hdr) }
+
+		if !_assign_int(v, negative_to_int(decoded)) { return _unsupported(v, hdr) }
+		return
+
+	case .Neg_U16:
+		decoded := Negative_U16(_decode_u16(r) or_return)
+		
+		switch &dst in v {
+		case Negative_U16:
+			dst = decoded
+			return
+		case Negative_U32:
+			dst = Negative_U32(decoded)
+			return
+		case Negative_U64:
+			dst = Negative_U64(decoded)
+			return
+		}
+
+		if reflect.is_unsigned(ti) { return _unsupported(v, hdr) }
+
+		if !_assign_int(v, negative_to_int(decoded)) { return _unsupported(v, hdr) }
+		return
+
+	case .Neg_U32:
+		decoded := Negative_U32(_decode_u32(r) or_return)
+		
+		switch &dst in v {
+		case Negative_U32:
+			dst = decoded
+			return
+		case Negative_U64:
+			dst = Negative_U64(decoded)
+			return
+		}
+
+		if reflect.is_unsigned(ti) { return _unsupported(v, hdr) }
+
+		if !_assign_int(v, negative_to_int(decoded)) { return _unsupported(v, hdr) }
+		return
+
+	case .Neg_U64:
+		decoded := Negative_U64(_decode_u64(r) or_return)
+		
+		switch &dst in v {
+		case Negative_U64:
+			dst = decoded
+			return
+		}
+
+		if reflect.is_unsigned(ti) { return _unsupported(v, hdr) }
+
+		if !_assign_int(v, negative_to_int(decoded)) { return _unsupported(v, hdr) }
+		return
+
+	case .Simple:
+		decoded := _decode_simple(r) or_return
+
+		// NOTE: Because this is a special type and not to be treated as a general integer,
+		// We only put the value of it in fields that are explicitly of type `Simple`.
+		switch &dst in v {
+		case Simple:
+			dst = decoded
+			return
+		case:
+			return _unsupported(v, hdr)
+		}
+
+	case .F16:
+		decoded := _decode_f16(r) or_return
+		if !_assign_float(v, decoded) { return _unsupported(v, hdr) }
+		return
+
+	case .F32:
+		decoded := _decode_f32(r) or_return
+		if !_assign_float(v, decoded) { return _unsupported(v, hdr) }
+		return
+
+	case .F64:
+		decoded := _decode_f64(r) or_return
+		if !_assign_float(v, decoded) { return _unsupported(v, hdr) }
+		return
+
+	case .True:
+		if !_assign_bool(v, true) { return _unsupported(v, hdr) }
+		return
+
+	case .False:
+		if !_assign_bool(v, false) { return _unsupported(v, hdr) }
+		return
+	
+	case .Nil, .Undefined:
+		mem.zero(v.data, ti.size)
+		return
+
+	case .Break:
+		return .Break
+	}
+	
+	maj, add := _header_split(hdr)
+	switch maj {
+	case .Unsigned:
+		decoded := _decode_tiny_u8(add) or_return
+		if !_assign_int(v, decoded) { return _unsupported(v, hdr, add) }
+		return
+
+	case .Negative:
+		decoded := Negative_U8(_decode_tiny_u8(add) or_return)
+
+		switch &dst in v {
+		case Negative_U8:
+			dst = decoded
+			return
+		}
+
+		if reflect.is_unsigned(ti) { return _unsupported(v, hdr, add) }
+
+		if !_assign_int(v, negative_to_int(decoded)) { return _unsupported(v, hdr, add) }
+		return
+
+	case .Other:
+		decoded := _decode_tiny_simple(add) or_return
+
+		 // NOTE: Because this is a special type and not to be treated as a general integer,
+		 // We only put the value of it in fields that are explicitly of type `Simple`.
+		 switch &dst in v {
+		 case Simple:
+			 dst = decoded
+			 return
+		 case:
+		 	return _unsupported(v, hdr, add)
+		 }
+
+	case .Tag:
+		switch &dst in v {
+		case ^Tag:
+			tval := err_conv(_decode_tag_ptr(d, add)) or_return
+			if t, is_tag := tval.(^Tag); is_tag {
+				dst = t
+				return
+			}
+
+			destroy(tval)
+			return .Bad_Tag_Value
+		case Tag:
+			t := err_conv(_decode_tag(d, add)) or_return
+			if t, is_tag := t.?; is_tag {
+				dst = t
+				return
+			}
+
+			return .Bad_Tag_Value
+		}
+
+		nr := err_conv(_decode_uint_as_u64(r, add)) or_return
+
+		// Custom tag implementations.
+		if impl, ok := _tag_implementations_nr[nr]; ok {
+			return impl->unmarshal(d, nr, v)
+		} else if nr == TAG_OBJECT_TYPE {
+			return _unmarshal_union(d, v, ti, hdr)
+		} else {
+			// Discard the tag info and unmarshal as its value.
+			return _unmarshal_value(d, v, _decode_header(r) or_return)
+		}
+
+		return _unsupported(v, hdr, add)
+
+	case .Bytes: return _unmarshal_bytes(d, v, ti, hdr, add)
+	case .Text:  return _unmarshal_string(d, v, ti, hdr, add)
+	case .Array: return _unmarshal_array(d, v, ti, hdr, add)
+	case .Map:   return _unmarshal_map(d, v, ti, hdr, add)
+
+	case:        return .Bad_Major
+	}
+}
+
+_unmarshal_bytes :: proc(d: Decoder, v: any, ti: ^reflect.Type_Info, hdr: Header, add: Add) -> (err: Unmarshal_Error) {
+	#partial switch t in ti.variant {
+	case reflect.Type_Info_String:
+		bytes := err_conv(_decode_bytes(d, add)) or_return
+
+		if t.is_cstring {
+			raw  := (^cstring)(v.data)
+			assert_safe_for_cstring(string(bytes))
+			raw^  = cstring(raw_data(bytes))
+		} else {
+			// String has same memory layout as a slice, so we can directly use it as a slice.
+			raw  := (^mem.Raw_String)(v.data)
+			raw^  = transmute(mem.Raw_String)bytes
+		}
+
+		return
+
+	case reflect.Type_Info_Slice:
+		elem_base := reflect.type_info_base(t.elem)
+
+		if elem_base.id != byte { return _unsupported(v, hdr) }
+
+		bytes := err_conv(_decode_bytes(d, add)) or_return
+		raw   := (^mem.Raw_Slice)(v.data)
+		raw^   = transmute(mem.Raw_Slice)bytes
+		return
+		
+	case reflect.Type_Info_Dynamic_Array:
+		elem_base := reflect.type_info_base(t.elem)
+
+		if elem_base.id != byte { return _unsupported(v, hdr) }
+		
+		bytes         := err_conv(_decode_bytes(d, add)) or_return
+		raw           := (^mem.Raw_Dynamic_Array)(v.data)
+		raw.data       = raw_data(bytes)
+		raw.len        = len(bytes)
+		raw.cap        = len(bytes)
+		raw.allocator  = context.allocator
+		return
+
+	case reflect.Type_Info_Array:
+		elem_base := reflect.type_info_base(t.elem)
+
+		if elem_base.id != byte { return _unsupported(v, hdr) }
+
+		bytes := err_conv(_decode_bytes(d, add, allocator=context.temp_allocator)) or_return
+		defer delete(bytes, context.temp_allocator)
+
+		if len(bytes) > t.count { return _unsupported(v, hdr) }
+		
+		// Copy into array type, delete original.
+		slice := ([^]byte)(v.data)[:len(bytes)]
+		n := copy(slice, bytes)
+		assert(n == len(bytes))
+		return
+	}
+
+	return _unsupported(v, hdr)
+}
+
+_unmarshal_string :: proc(d: Decoder, v: any, ti: ^reflect.Type_Info, hdr: Header, add: Add) -> (err: Unmarshal_Error) {
+	#partial switch t in ti.variant {
+	case reflect.Type_Info_String:
+		text := err_conv(_decode_text(d, add)) or_return
+
+		if t.is_cstring {
+			raw := (^cstring)(v.data)
+
+			assert_safe_for_cstring(text)
+			raw^ = cstring(raw_data(text))
+		} else {
+			raw := (^string)(v.data)
+			raw^ = text
+		}
+		return
+
+	// Enum by its variant name.
+	case reflect.Type_Info_Enum:
+		text := err_conv(_decode_text(d, add, allocator=context.temp_allocator)) or_return
+		defer delete(text, context.temp_allocator)
+
+		for name, i in t.names {
+			if name == text {
+				if !_assign_int(any{v.data, ti.id}, t.values[i]) { return _unsupported(v, hdr) }
+				return
+			}
+		}
+	
+	case reflect.Type_Info_Rune:
+		text := err_conv(_decode_text(d, add, allocator=context.temp_allocator)) or_return
+		defer delete(text, context.temp_allocator)
+
+		r := (^rune)(v.data)
+		dr, n := utf8.decode_rune(text)
+		if dr == utf8.RUNE_ERROR || n < len(text) {
+			return _unsupported(v, hdr)
+		}
+
+		r^ = dr
+		return
+	}
+
+	return _unsupported(v, hdr)
+}
+
+_unmarshal_array :: proc(d: Decoder, v: any, ti: ^reflect.Type_Info, hdr: Header, add: Add) -> (err: Unmarshal_Error) {
+	assign_array :: proc(
+		d: Decoder,
+		da: ^mem.Raw_Dynamic_Array,
+		elemt: ^reflect.Type_Info,
+		length: int,
+		growable := true,
+	) -> (out_of_space: bool, err: Unmarshal_Error) {
+		for idx: uintptr = 0; length == -1 || idx < uintptr(length); idx += 1 {
+			elem_ptr := rawptr(uintptr(da.data) + idx*uintptr(elemt.size))
+			elem     := any{elem_ptr, elemt.id}
+
+			hdr := _decode_header(d.reader) or_return
+			
+			// Double size if out of capacity.
+			if da.cap <= da.len {
+				// Not growable, error out.
+				if !growable { return true, .Out_Of_Memory }
+
+				cap := 2 * da.cap
+				ok := runtime.__dynamic_array_reserve(da, elemt.size, elemt.align, cap)
+ 				
+				// NOTE: Might be lying here, but it is at least an allocator error.
+				if !ok { return false, .Out_Of_Memory }
+			}
+			
+			err = _unmarshal_value(d, elem, hdr)
+			if length == -1 && err == .Break { break }
+			if err != nil { return }
+
+			da.len += 1
+		}
+		
+		return false, nil
+	}
+
+	// Allow generically storing the values array.
+	switch &dst in v {
+	case ^Array:
+		dst = err_conv(_decode_array_ptr(d, add)) or_return
+		return
+	case Array:
+		dst = err_conv(_decode_array(d, add)) or_return
+		return
+	}
+
+	#partial switch t in ti.variant {
+	case reflect.Type_Info_Slice:
+		length, scap := err_conv(_decode_len_container(d, add)) or_return
+
+		data := mem.alloc_bytes_non_zeroed(t.elem.size * scap, t.elem.align) or_return
+		defer if err != nil { mem.free_bytes(data) }
+
+		da := mem.Raw_Dynamic_Array{raw_data(data), 0, length, context.allocator }
+
+		assign_array(d, &da, t.elem, length) or_return
+
+		if .Shrink_Excess in d.flags {
+			// Ignoring an error here, but this is not critical to succeed.
+			_ = runtime.__dynamic_array_shrink(&da, t.elem.size, t.elem.align, da.len)
+		}
+
+		raw      := (^mem.Raw_Slice)(v.data)
+		raw.data  = da.data
+		raw.len   = da.len
+		return
+
+	case reflect.Type_Info_Dynamic_Array:
+		length, scap := err_conv(_decode_len_container(d, add)) or_return
+
+		data := mem.alloc_bytes_non_zeroed(t.elem.size * scap, t.elem.align) or_return
+		defer if err != nil { mem.free_bytes(data) }
+
+		raw           := (^mem.Raw_Dynamic_Array)(v.data)
+		raw.data       = raw_data(data) 
+		raw.len        = 0
+		raw.cap        = length
+		raw.allocator  = context.allocator
+
+		_ = assign_array(d, raw, t.elem, length) or_return
+
+		if .Shrink_Excess in d.flags {
+			// Ignoring an error here, but this is not critical to succeed.
+			_ = runtime.__dynamic_array_shrink(raw, t.elem.size, t.elem.align, raw.len)
+		}
+		return
+
+	case reflect.Type_Info_Array:
+		_, scap := err_conv(_decode_len_container(d, add)) or_return
+		length := min(scap, t.count)
+	
+		if length > t.count {
+			return _unsupported(v, hdr)
+		}
+
+		da := mem.Raw_Dynamic_Array{rawptr(v.data), 0, length, context.allocator }
+
+		out_of_space := assign_array(d, &da, t.elem, length, growable=false) or_return
+		if out_of_space { return _unsupported(v, hdr) }
+		return
+
+	case reflect.Type_Info_Enumerated_Array:
+		_, scap := err_conv(_decode_len_container(d, add)) or_return
+		length := min(scap, t.count)
+	
+		if length > t.count {
+			return _unsupported(v, hdr)
+		}
+
+		da := mem.Raw_Dynamic_Array{rawptr(v.data), 0, length, context.allocator }
+
+		out_of_space := assign_array(d, &da, t.elem, length, growable=false) or_return
+		if out_of_space { return _unsupported(v, hdr) }
+		return
+
+	case reflect.Type_Info_Complex:
+		_, scap := err_conv(_decode_len_container(d, add)) or_return
+		length := min(scap, 2)
+	
+		if length > 2 {
+			return _unsupported(v, hdr)
+		}
+
+		da := mem.Raw_Dynamic_Array{rawptr(v.data), 0, 2, context.allocator }
+
+		info: ^runtime.Type_Info
+		switch ti.id {
+		case complex32:  info = type_info_of(f16)
+		case complex64:  info = type_info_of(f32)
+		case complex128: info = type_info_of(f64)
+		case:            unreachable()
+		}
+
+		out_of_space := assign_array(d, &da, info, 2, growable=false) or_return
+		if out_of_space { return _unsupported(v, hdr) }
+		return
+	
+	case reflect.Type_Info_Quaternion:
+		_, scap := err_conv(_decode_len_container(d, add)) or_return
+		length := min(scap, 4)
+	
+		if length > 4 {
+			return _unsupported(v, hdr)
+		}
+
+		da := mem.Raw_Dynamic_Array{rawptr(v.data), 0, 4, context.allocator }
+
+		info: ^runtime.Type_Info
+		switch ti.id {
+		case quaternion64:  info = type_info_of(f16)
+		case quaternion128: info = type_info_of(f32)
+		case quaternion256: info = type_info_of(f64)
+		case:               unreachable()
+		}
+
+		out_of_space := assign_array(d, &da, info, 4, growable=false) or_return
+		if out_of_space { return _unsupported(v, hdr) }
+		return
+
+	case: return _unsupported(v, hdr)
+	}
+}
+
+_unmarshal_map :: proc(d: Decoder, v: any, ti: ^reflect.Type_Info, hdr: Header, add: Add) -> (err: Unmarshal_Error) {
+	r := d.reader
+	decode_key :: proc(d: Decoder, v: any, allocator := context.allocator) -> (k: string, err: Unmarshal_Error) {
+		entry_hdr := _decode_header(d.reader) or_return
+		entry_maj, entry_add := _header_split(entry_hdr)
+		#partial switch entry_maj {
+		case .Text:
+			k = err_conv(_decode_text(d, entry_add, allocator)) or_return
+			return
+		case .Bytes:
+			bytes := err_conv(_decode_bytes(d, entry_add, allocator=allocator)) or_return
+			k = string(bytes)
+			return
+		case:
+			err = _unsupported(v, entry_hdr)
+			return
+		}
+	}
+
+	// Allow generically storing the map array.
+	switch &dst in v {
+	case ^Map:
+		dst = err_conv(_decode_map_ptr(d, add)) or_return
+		return
+	case Map:
+		dst = err_conv(_decode_map(d, add)) or_return
+		return
+	}
+
+	#partial switch t in ti.variant {
+	case reflect.Type_Info_Struct:
+		if t.is_raw_union {
+			return _unsupported(v, hdr)
+		}
+
+		length, _ := err_conv(_decode_len_container(d, add)) or_return
+		unknown := length == -1
+		fields := reflect.struct_fields_zipped(ti.id)
+	
+		for idx := 0; idx < len(fields) && (unknown || idx < length); idx += 1 {
+			// Decode key, keys can only be strings.
+			key: string
+			if keyv, kerr := decode_key(d, v, context.temp_allocator); unknown && kerr == .Break {
+				break
+			} else if kerr != nil {
+				err = kerr
+				return
+			} else {
+				key = keyv
+			}
+			defer delete(key, context.temp_allocator)
+			
+			// Find matching field.
+			use_field_idx := -1
+			{
+				for field, field_idx in fields {
+					tag_value := string(reflect.struct_tag_get(field.tag, "cbor"))
+					if tag_value == "-" {
+						continue
+					}
+
+					if key == tag_value {
+						use_field_idx = field_idx
+						break
+					}
+
+					if key == field.name {
+						// No break because we want to still check remaining struct tags.
+						use_field_idx = field_idx
+					}
+				}
+				
+				// Skips unused map entries.
+				if use_field_idx < 0 {
+					continue
+				}
+			}
+
+			field := fields[use_field_idx]
+			// name  := field.name
+			ptr   := rawptr(uintptr(v.data) + field.offset)
+			fany  := any{ptr, field.type.id}
+			_unmarshal_value(d, fany, _decode_header(r) or_return) or_return
+		}
+		return
+
+	case reflect.Type_Info_Map:
+		if !reflect.is_string(t.key) {
+			return _unsupported(v, hdr)
+		}
+
+		raw_map := (^mem.Raw_Map)(v.data)
+		if raw_map.allocator.procedure == nil {
+			raw_map.allocator = context.allocator
+		}
+
+		defer if err != nil {
+			_ = runtime.map_free_dynamic(raw_map^, t.map_info)
+		}
+
+		length, scap := err_conv(_decode_len_container(d, add)) or_return
+		unknown := length == -1
+		if !unknown {
+			// Reserve space before setting so we can return allocation errors and be efficient on big maps.
+			new_len := uintptr(min(scap, runtime.map_len(raw_map^)+length))
+			runtime.map_reserve_dynamic(raw_map, t.map_info, new_len) or_return
+		}
+		
+		// Temporary memory to unmarshal keys into before inserting them into the map.
+		elem_backing := mem.alloc_bytes_non_zeroed(t.value.size, t.value.align, context.temp_allocator) or_return
+		defer delete(elem_backing, context.temp_allocator)
+
+		map_backing_value := any{raw_data(elem_backing), t.value.id}
+
+		for idx := 0; unknown || idx < length; idx += 1 {
+			// Decode key, keys can only be strings.
+			key: string
+			if keyv, kerr := decode_key(d, v); unknown && kerr == .Break {
+				break
+			} else if kerr != nil {
+				err = kerr
+				return
+			} else {
+				key = keyv
+			}
+
+			if unknown || idx > scap {
+				// Reserve space for new element so we can return allocator errors.
+				new_len := uintptr(runtime.map_len(raw_map^)+1)
+				runtime.map_reserve_dynamic(raw_map, t.map_info, new_len) or_return
+			}
+
+			mem.zero_slice(elem_backing)
+			_unmarshal_value(d, map_backing_value, _decode_header(r) or_return) or_return
+
+			key_ptr := rawptr(&key)
+			key_cstr: cstring
+			if reflect.is_cstring(t.key) {
+				assert_safe_for_cstring(key)
+				key_cstr = cstring(raw_data(key))
+				key_ptr = &key_cstr
+			}
+
+			set_ptr := runtime.__dynamic_map_set_without_hash(raw_map, t.map_info, key_ptr, map_backing_value.data)
+			// We already reserved space for it, so this shouldn't fail.
+			assert(set_ptr != nil)
+		}
+	
+		if .Shrink_Excess in d.flags {
+			_, _ = runtime.map_shrink_dynamic(raw_map, t.map_info)
+		}
+		return
+
+		case:
+			return _unsupported(v, hdr)
+	}
+}
+
+// Unmarshal into a union, based on the `TAG_OBJECT_TYPE` tag of the spec, it denotes a tag which
+// contains an array of exactly two elements, the first is a textual representation of the following
+// CBOR value's type.
+_unmarshal_union :: proc(d: Decoder, v: any, ti: ^reflect.Type_Info, hdr: Header) -> (err: Unmarshal_Error) {
+	r := d.reader
+	#partial switch t in ti.variant {
+	case reflect.Type_Info_Union:
+		idhdr: Header
+		target_name: string
+		{
+			vhdr := _decode_header(r) or_return
+			vmaj, vadd := _header_split(vhdr)
+			if vmaj != .Array {
+				return .Bad_Tag_Value
+			}
+
+			n_items, _ := err_conv(_decode_len_container(d, vadd)) or_return
+			if n_items != 2 {
+				return .Bad_Tag_Value
+			}
+			
+			idhdr = _decode_header(r) or_return
+			idmaj, idadd := _header_split(idhdr)
+			if idmaj != .Text {
+				return .Bad_Tag_Value
+			}
+
+			target_name = err_conv(_decode_text(d, idadd, context.temp_allocator)) or_return
+		}
+		defer delete(target_name, context.temp_allocator)
+
+		for variant, i in t.variants {
+			tag := i64(i)
+			if !t.no_nil {
+				tag += 1
+			}
+
+			#partial switch vti in variant.variant {
+			case reflect.Type_Info_Named:
+				if vti.name == target_name {
+					reflect.set_union_variant_raw_tag(v, tag)
+					return _unmarshal_value(d, any{v.data, variant.id}, _decode_header(r) or_return)
+				}
+
+			case:
+				builder := strings.builder_make(context.temp_allocator)
+				defer strings.builder_destroy(&builder)
+
+				reflect.write_type(&builder, variant)
+				variant_name := strings.to_string(builder)
+				
+				if variant_name == target_name {
+					reflect.set_union_variant_raw_tag(v, tag)
+					return _unmarshal_value(d, any{v.data, variant.id}, _decode_header(r) or_return)
+				}
+			}
+		}
+
+		// No variant matched.
+		return _unsupported(v, idhdr)
+
+	case:
+		// Not a union.
+		return _unsupported(v, hdr)
+	}
+}
+
+_assign_int :: proc(val: any, i: $T) -> bool {
+	v := reflect.any_core(val)
+
+	// NOTE: should under/over flow be checked here? `encoding/json` doesn't, but maybe that is a
+	// less strict encoding?.
+
+	switch &dst in v {
+	case i8:      dst = i8     (i)
+	case i16:     dst = i16    (i)
+	case i16le:   dst = i16le  (i)
+	case i16be:   dst = i16be  (i)
+	case i32:     dst = i32    (i)
+	case i32le:   dst = i32le  (i)
+	case i32be:   dst = i32be  (i)
+	case i64:     dst = i64    (i)
+	case i64le:   dst = i64le  (i)
+	case i64be:   dst = i64be  (i)
+	case i128:    dst = i128   (i)
+	case i128le:  dst = i128le (i)
+	case i128be:  dst = i128be (i)
+	case u8:      dst = u8     (i)
+	case u16:     dst = u16    (i)
+	case u16le:   dst = u16le  (i)
+	case u16be:   dst = u16be  (i)
+	case u32:     dst = u32    (i)
+	case u32le:   dst = u32le  (i)
+	case u32be:   dst = u32be  (i)
+	case u64:     dst = u64    (i)
+	case u64le:   dst = u64le  (i)
+	case u64be:   dst = u64be  (i)
+	case u128:    dst = u128   (i)
+	case u128le:  dst = u128le (i)
+	case u128be:  dst = u128be (i)
+	case int:     dst = int    (i)
+	case uint:    dst = uint   (i)
+	case uintptr: dst = uintptr(i)
+	case:
+		ti := type_info_of(v.id)
+		if _, ok := ti.variant.(runtime.Type_Info_Bit_Set); ok {
+			do_byte_swap := !reflect.bit_set_is_big_endian(v)
+			switch ti.size * 8 {
+			case 0: // no-op.
+			case 8:
+				x := (^u8)(v.data)
+				x^ = u8(i)
+			case 16:
+				x := (^u16)(v.data)
+				x^ = do_byte_swap ? intrinsics.byte_swap(u16(i)) : u16(i)
+			case 32:
+				x := (^u32)(v.data)
+				x^ = do_byte_swap ? intrinsics.byte_swap(u32(i)) : u32(i)
+			case 64:
+				x := (^u64)(v.data)
+				x^ = do_byte_swap ? intrinsics.byte_swap(u64(i)) : u64(i)
+			case:
+				panic("unknown bit_size size")
+			}
+			return true
+		}
+		return false
+	}
+	return true
+}
+
+_assign_float :: proc(val: any, f: $T) -> bool {
+	v := reflect.any_core(val)
+
+	// NOTE: should under/over flow be checked here? `encoding/json` doesn't, but maybe that is a
+	// less strict encoding?.
+
+	switch &dst in v {
+	case f16:     dst = f16  (f)
+	case f16le:   dst = f16le(f)
+	case f16be:   dst = f16be(f)
+	case f32:     dst = f32  (f)
+	case f32le:   dst = f32le(f)
+	case f32be:   dst = f32be(f)
+	case f64:     dst = f64  (f)
+	case f64le:   dst = f64le(f)
+	case f64be:   dst = f64be(f)
+	
+	case complex32:  dst = complex(f16(f), 0)
+	case complex64:  dst = complex(f32(f), 0)
+	case complex128: dst = complex(f64(f), 0)
+	
+	case quaternion64:  dst = quaternion(w=f16(f), x=0, y=0, z=0)
+	case quaternion128: dst = quaternion(w=f32(f), x=0, y=0, z=0)
+	case quaternion256: dst = quaternion(w=f64(f), x=0, y=0, z=0)
+	
+	case: return false
+	}
+	return true
+}
+
+_assign_bool :: proc(val: any, b: bool) -> bool {
+	v := reflect.any_core(val)
+	switch &dst in v {
+	case bool: dst = bool(b)
+	case b8:   dst = b8  (b)
+	case b16:  dst = b16 (b)
+	case b32:  dst = b32 (b)
+	case b64:  dst = b64 (b)
+	case: return false
+	}
+	return true
+}
+
+// Sanity check that the decoder added a nil byte to the end.
+@(private, disabled=ODIN_DISABLE_ASSERT)
+assert_safe_for_cstring :: proc(s: string, loc := #caller_location) {
+	assert(([^]byte)(raw_data(s))[len(s)] == 0, loc = loc)
+}

+ 20 - 6
core/encoding/csv/reader.odin

@@ -1,6 +1,6 @@
 // package csv reads and writes comma-separated values (CSV) files.
 // This package supports the format described in RFC 4180 <https://tools.ietf.org/html/rfc4180.html>
-package csv
+package encoding_csv
 
 import "core:bufio"
 import "core:bytes"
@@ -91,7 +91,10 @@ DEFAULT_RECORD_BUFFER_CAPACITY :: 256
 
 // reader_init initializes a new Reader from r
 reader_init :: proc(reader: ^Reader, r: io.Reader, buffer_allocator := context.allocator) {
-	reader.comma = ','
+	switch reader.comma {
+	case '\x00', '\n', '\r', 0xfffd:
+		reader.comma = ','
+	}
 
 	context.allocator = buffer_allocator
 	reserve(&reader.record_buffer, DEFAULT_RECORD_BUFFER_CAPACITY)
@@ -121,6 +124,7 @@ reader_destroy :: proc(r: ^Reader) {
 // read reads a single record (a slice of fields) from r
 //
 // All \r\n sequences are normalized to \n, including multi-line field
+@(require_results)
 read :: proc(r: ^Reader, allocator := context.allocator) -> (record: []string, err: Error) {
 	if r.reuse_record {
 		record, err = _read_record(r, &r.last_record, allocator)
@@ -133,6 +137,7 @@ read :: proc(r: ^Reader, allocator := context.allocator) -> (record: []string, e
 }
 
 // is_io_error checks where an Error is a specific io.Error kind
+@(require_results)
 is_io_error :: proc(err: Error, io_err: io.Error) -> bool {
 	if v, ok := err.(io.Error); ok {
 		return v == io_err
@@ -140,10 +145,10 @@ is_io_error :: proc(err: Error, io_err: io.Error) -> bool {
 	return false
 }
 
-
 // read_all reads all the remaining records from r.
 // Each record is a slice of fields.
 // read_all is defined to read until an EOF, and does not treat, and does not treat EOF as an error
+@(require_results)
 read_all :: proc(r: ^Reader, allocator := context.allocator) -> ([][]string, Error) {
 	context.allocator = allocator
 	records: [dynamic][]string
@@ -153,13 +158,18 @@ read_all :: proc(r: ^Reader, allocator := context.allocator) -> ([][]string, Err
 			return records[:], nil
 		}
 		if rerr != nil {
-			return nil, rerr
+			// allow for a partial read
+			if record != nil {
+				append(&records, record)
+			}
+			return records[:], rerr
 		}
 		append(&records, record)
 	}
 }
 
 // read reads a single record (a slice of fields) from the provided input.
+@(require_results)
 read_from_string :: proc(input: string, record_allocator := context.allocator, buffer_allocator := context.allocator) -> (record: []string, n: int, err: Error) {
 	ir: strings.Reader
 	strings.reader_init(&ir, input)
@@ -175,6 +185,7 @@ read_from_string :: proc(input: string, record_allocator := context.allocator, b
 
 
 // read_all reads all the remaining records from the provided input.
+@(require_results)
 read_all_from_string :: proc(input: string, records_allocator := context.allocator, buffer_allocator := context.allocator) -> ([][]string, Error) {
 	ir: strings.Reader
 	strings.reader_init(&ir, input)
@@ -186,7 +197,7 @@ read_all_from_string :: proc(input: string, records_allocator := context.allocat
 	return read_all(&r, records_allocator)
 }
 
-@private
+@(private, require_results)
 is_valid_delim :: proc(r: rune) -> bool {
 	switch r {
 	case 0, '"', '\r', '\n', utf8.RUNE_ERROR:
@@ -195,8 +206,9 @@ is_valid_delim :: proc(r: rune) -> bool {
 	return utf8.valid_rune(r)
 }
 
-@private
+@(private, require_results)
 _read_record :: proc(r: ^Reader, dst: ^[dynamic]string, allocator := context.allocator) -> ([]string, Error) {
+	@(require_results)
 	read_line :: proc(r: ^Reader) -> ([]byte, io.Error) {
 		if !r.multiline_fields {
 			line, err := bufio.reader_read_slice(&r.r, '\n')
@@ -266,6 +278,7 @@ _read_record :: proc(r: ^Reader, dst: ^[dynamic]string, allocator := context.all
 		unreachable()
 	}
 
+	@(require_results)
 	length_newline :: proc(b: []byte) -> int {
 		if len(b) > 0 && b[len(b)-1] == '\n' {
 			return 1
@@ -273,6 +286,7 @@ _read_record :: proc(r: ^Reader, dst: ^[dynamic]string, allocator := context.all
 		return 0
 	}
 
+	@(require_results)
 	next_rune :: proc(b: []byte) -> rune {
 		r, _ := utf8.decode_rune(b)
 		return r

+ 5 - 2
core/encoding/csv/writer.odin

@@ -1,4 +1,4 @@
-package csv
+package encoding_csv
 
 import "core:io"
 import "core:strings"
@@ -17,7 +17,10 @@ Writer :: struct {
 
 // writer_init initializes a Writer that writes to w
 writer_init :: proc(writer: ^Writer, w: io.Writer) {
-	writer.comma = ','
+	switch writer.comma {
+	case '\x00', '\n', '\r', 0xfffd:
+		writer.comma = ','
+	}
 	writer.w = w
 }
 

+ 1 - 1
core/encoding/entity/entity.odin

@@ -1,4 +1,4 @@
-package unicode_entity
+package encoding_unicode_entity
 /*
 	A unicode entity encoder/decoder
 

+ 1 - 1
core/encoding/entity/generated.odin

@@ -1,4 +1,4 @@
-package unicode_entity
+package encoding_unicode_entity
 
 /*
 	------ GENERATED ------ DO NOT EDIT ------ GENERATED ------ DO NOT EDIT ------ GENERATED ------

+ 1 - 1
core/encoding/hex/hex.odin

@@ -1,4 +1,4 @@
-package hex
+package encoding_hex
 
 import "core:strings"
 

+ 2 - 2
core/encoding/json/marshal.odin

@@ -1,4 +1,4 @@
-package json
+package encoding_json
 
 import "core:mem"
 import "core:math/bits"
@@ -420,7 +420,7 @@ marshal_to_writer :: proc(w: io.Writer, v: any, opt: ^Marshal_Options) -> (err:
 				data := rawptr(uintptr(v.data) + info.offsets[i])
 				the_value := any{data, id}
 
-				if is_omitempty(the_value) {
+				if omitempty && is_omitempty(the_value) {
 					continue
 				}
 

+ 1 - 1
core/encoding/json/parser.odin

@@ -1,4 +1,4 @@
-package json
+package encoding_json
 
 import "core:mem"
 import "core:unicode/utf8"

+ 1 - 1
core/encoding/json/tokenizer.odin

@@ -1,4 +1,4 @@
-package json
+package encoding_json
 
 import "core:unicode/utf8"
 

+ 1 - 1
core/encoding/json/types.odin

@@ -1,4 +1,4 @@
-package json
+package encoding_json
 
 import "core:strings"
 

+ 2 - 2
core/encoding/json/unmarshal.odin

@@ -1,4 +1,4 @@
-package json
+package encoding_json
 
 import "core:mem"
 import "core:math"
@@ -348,7 +348,7 @@ json_name_from_tag_value :: proc(value: string) -> (json_name, extra: string) {
 	json_name = value
 	if comma_index := strings.index_byte(json_name, ','); comma_index >= 0 {
 		json_name = json_name[:comma_index]
-		extra = json_name[comma_index:]
+		extra = value[1 + comma_index:]
 	}
 	return
 }

+ 1 - 1
core/encoding/json/validator.odin

@@ -1,4 +1,4 @@
-package json
+package encoding_json
 
 import "core:mem"
 

+ 1 - 1
core/encoding/varint/doc.odin

@@ -25,4 +25,4 @@
 	```
 
 */
-package varint
+package encoding_varint

+ 1 - 1
core/encoding/varint/leb128.odin

@@ -8,7 +8,7 @@
 
 // package varint implements variable length integer encoding and decoding using
 // the LEB128 format as used by DWARF debug info, Android .dex and other file formats.
-package varint
+package encoding_varint
 
 // In theory we should use the bigint package. In practice, varints bigger than this indicate a corrupted file.
 // Instead we'll set limits on the values we'll encode/decode

+ 1 - 1
core/encoding/xml/debug_print.odin

@@ -1,4 +1,4 @@
-package xml
+package encoding_xml
 
 /*
 	An XML 1.0 / 1.1 parser

+ 1 - 1
core/encoding/xml/helpers.odin

@@ -1,4 +1,4 @@
-package xml
+package encoding_xml
 
 /*
 	An XML 1.0 / 1.1 parser

+ 1 - 1
core/encoding/xml/tokenizer.odin

@@ -1,4 +1,4 @@
-package xml
+package encoding_xml
 
 /*
 	An XML 1.0 / 1.1 parser

+ 1 - 1
core/encoding/xml/xml_reader.odin

@@ -24,7 +24,7 @@ MAYBE:
 List of contributors:
 - Jeroen van Rijn: Initial implementation.
 */
-package xml
+package encoding_xml
 // An XML 1.0 / 1.1 parser
 
 import "core:bytes"

+ 49 - 19
core/fmt/fmt.odin

@@ -120,11 +120,11 @@ register_user_formatter :: proc(id: typeid, formatter: User_Formatter) -> Regist
 //
 // 	Returns: A formatted string. 
 //
+@(require_results)
 aprint :: proc(args: ..any, sep := " ", allocator := context.allocator) -> string {
 	str: strings.Builder
 	strings.builder_init(&str, allocator)
-	sbprint(&str, ..args, sep=sep)
-	return strings.to_string(str)
+	return sbprint(&str, ..args, sep=sep)
 }
 // 	Creates a formatted string with a newline character at the end
 //
@@ -136,11 +136,11 @@ aprint :: proc(args: ..any, sep := " ", allocator := context.allocator) -> strin
 //
 // 	Returns: A formatted string with a newline character at the end.
 //
+@(require_results)
 aprintln :: proc(args: ..any, sep := " ", allocator := context.allocator) -> string {
 	str: strings.Builder
 	strings.builder_init(&str, allocator)
-	sbprintln(&str, ..args, sep=sep)
-	return strings.to_string(str)
+	return sbprintln(&str, ..args, sep=sep)
 }
 // 	Creates a formatted string using a format string and arguments
 //
@@ -153,11 +153,11 @@ aprintln :: proc(args: ..any, sep := " ", allocator := context.allocator) -> str
 //
 // 	Returns: A formatted string. The returned string must be freed accordingly.
 //
+@(require_results)
 aprintf :: proc(fmt: string, args: ..any, allocator := context.allocator, newline := false) -> string {
 	str: strings.Builder
 	strings.builder_init(&str, allocator)
-	sbprintf(&str, fmt, ..args, newline=newline)
-	return strings.to_string(str)
+	return sbprintf(&str, fmt, ..args, newline=newline)
 }
 // 	Creates a formatted string using a format string and arguments, followed by a newline.
 //
@@ -169,6 +169,7 @@ aprintf :: proc(fmt: string, args: ..any, allocator := context.allocator, newlin
 //
 // 	Returns: A formatted string. The returned string must be freed accordingly.
 //
+@(require_results)
 aprintfln :: proc(fmt: string, args: ..any, allocator := context.allocator) -> string {
 	return aprintf(fmt, ..args, allocator=allocator, newline=true)
 }
@@ -182,11 +183,11 @@ aprintfln :: proc(fmt: string, args: ..any, allocator := context.allocator) -> s
 //
 // 	Returns: A formatted string.
 //
+@(require_results)
 tprint :: proc(args: ..any, sep := " ") -> string {
 	str: strings.Builder
 	strings.builder_init(&str, context.temp_allocator)
-	sbprint(&str, ..args, sep=sep)
-	return strings.to_string(str)
+	return sbprint(&str, ..args, sep=sep)
 }
 // 	Creates a formatted string with a newline character at the end
 //
@@ -198,11 +199,11 @@ tprint :: proc(args: ..any, sep := " ") -> string {
 //
 // 	Returns: A formatted string with a newline character at the end.
 //
+@(require_results)
 tprintln :: proc(args: ..any, sep := " ") -> string {
 	str: strings.Builder
 	strings.builder_init(&str, context.temp_allocator)
-	sbprintln(&str, ..args, sep=sep)
-	return strings.to_string(str)
+	return sbprintln(&str, ..args, sep=sep)
 }
 // 	Creates a formatted string using a format string and arguments
 //
@@ -215,11 +216,11 @@ tprintln :: proc(args: ..any, sep := " ") -> string {
 //
 // 	Returns: A formatted string.
 //
+@(require_results)
 tprintf :: proc(fmt: string, args: ..any, newline := false) -> string {
 	str: strings.Builder
 	strings.builder_init(&str, context.temp_allocator)
-	sbprintf(&str, fmt, ..args, newline=newline)
-	return strings.to_string(str)
+	return sbprintf(&str, fmt, ..args, newline=newline)
 }
 // 	Creates a formatted string using a format string and arguments, followed by a newline.
 //
@@ -231,6 +232,7 @@ tprintf :: proc(fmt: string, args: ..any, newline := false) -> string {
 //
 // 	Returns: A formatted string.
 //
+@(require_results)
 tprintfln :: proc(fmt: string, args: ..any) -> string {
 	return tprintf(fmt, ..args, newline=true)
 }
@@ -339,6 +341,7 @@ panicf :: proc(fmt: string, args: ..any, loc := #caller_location) -> ! {
 //
 // Returns: A formatted C string
 //
+@(require_results)
 caprintf :: proc(format: string, args: ..any, newline := false) -> cstring {
 	str: strings.Builder
 	strings.builder_init(&str)
@@ -357,6 +360,7 @@ caprintf :: proc(format: string, args: ..any, newline := false) -> cstring {
 //
 // Returns: A formatted C string
 //
+@(require_results)
 caprintfln :: proc(format: string, args: ..any) -> cstring {
 	return caprintf(format, ..args, newline=true)
 }
@@ -371,6 +375,7 @@ caprintfln :: proc(format: string, args: ..any) -> cstring {
 //
 // Returns: A formatted C string
 //
+@(require_results)
 ctprintf :: proc(format: string, args: ..any, newline := false) -> cstring {
 	str: strings.Builder
 	strings.builder_init(&str, context.temp_allocator)
@@ -389,6 +394,7 @@ ctprintf :: proc(format: string, args: ..any, newline := false) -> cstring {
 //
 // Returns: A formatted C string
 //
+@(require_results)
 ctprintfln :: proc(format: string, args: ..any) -> cstring {
 	return ctprintf(format, ..args, newline=true)
 }
@@ -1900,7 +1906,7 @@ fmt_struct :: proc(fi: ^Info, v: any, the_verb: rune, info: runtime.Type_Info_St
 	// fi.hash = false;
 	fi.indent += 1
 
-	if hash	{
+	if !is_soa && hash {
 		io.write_byte(fi.writer, '\n', &fi.n)
 	}
 	defer {
@@ -1934,6 +1940,9 @@ fmt_struct :: proc(fi: ^Info, v: any, the_verb: rune, info: runtime.Type_Info_St
 			n = uintptr((^int)(uintptr(v.data) + info.offsets[actual_field_count])^)
 		}
 
+		if hash && n > 0 {
+			io.write_byte(fi.writer, '\n', &fi.n)
+		}
 
 		for index in 0..<n {
 			if !hash && index > 0 { io.write_string(fi.writer, ", ", &fi.n) }
@@ -1942,9 +1951,23 @@ fmt_struct :: proc(fi: ^Info, v: any, the_verb: rune, info: runtime.Type_Info_St
 
 			if !hash && field_count > 0 { io.write_string(fi.writer, ", ", &fi.n) }
 
+			if hash {
+				fi.indent -= 1
+				fmt_write_indent(fi)
+				fi.indent += 1
+			}
 			io.write_string(fi.writer, base_type_name, &fi.n)
 			io.write_byte(fi.writer, '{', &fi.n)
-			defer io.write_byte(fi.writer, '}', &fi.n)
+			if hash { io.write_byte(fi.writer, '\n', &fi.n) }
+			defer {
+				if hash {
+					fi.indent -= 1
+					fmt_write_indent(fi)
+					fi.indent += 1
+				}
+				io.write_byte(fi.writer, '}', &fi.n)
+				if hash { io.write_string(fi.writer, ",\n", &fi.n) }
+			}
 			fi.record_level += 1
 			defer fi.record_level -= 1
 
@@ -2156,14 +2179,18 @@ fmt_named :: proc(fi: ^Info, v: any, verb: rune, info: runtime.Type_Info_Named)
 			when ODIN_ERROR_POS_STYLE == .Default {
 				io.write_byte(fi.writer, '(', &fi.n)
 				io.write_int(fi.writer, int(a.line), 10, &fi.n)
-				io.write_byte(fi.writer, ':', &fi.n)
-				io.write_int(fi.writer, int(a.column), 10, &fi.n)
+				if a.column != 0 {
+					io.write_byte(fi.writer, ':', &fi.n)
+					io.write_int(fi.writer, int(a.column), 10, &fi.n)
+				}
 				io.write_byte(fi.writer, ')', &fi.n)
 			} else when ODIN_ERROR_POS_STYLE == .Unix {
 				io.write_byte(fi.writer, ':', &fi.n)
 				io.write_int(fi.writer, int(a.line), 10, &fi.n)
-				io.write_byte(fi.writer, ':', &fi.n)
-				io.write_int(fi.writer, int(a.column), 10, &fi.n)
+				if a.column != 0 {
+					io.write_byte(fi.writer, ':', &fi.n)
+					io.write_int(fi.writer, int(a.column), 10, &fi.n)
+				}
 				io.write_byte(fi.writer, ':', &fi.n)
 			} else {
 				#panic("Unhandled ODIN_ERROR_POS_STYLE")
@@ -2505,8 +2532,11 @@ fmt_bit_field :: proc(fi: ^Info, v: any, verb: rune, info: runtime.Type_Info_Bit
 		bit_offset := info.bit_offsets[i]
 		bit_size := info.bit_sizes[i]
 
-		value := read_bits(([^]byte)(v.data), bit_offset, bit_size)
 		type := info.types[i]
+		value := read_bits(([^]byte)(v.data), bit_offset, bit_size)
+		if reflect.is_endian_big(type) {
+			value <<= u64(8*type.size) - u64(bit_size)
+		}
 
 		if !reflect.is_unsigned(runtime.type_info_core(type)) {
 			// Sign Extension

+ 55 - 0
core/fmt/fmt_js.odin

@@ -1,7 +1,9 @@
 //+build js
 package fmt
 
+import "core:bufio"
 import "core:io"
+import "core:os"
 
 foreign import "odin_env"
 
@@ -31,12 +33,63 @@ stderr := io.Writer{
 	data      = rawptr(uintptr(2)),
 }
 
+@(private="file")
+fd_to_writer :: proc(fd: os.Handle, loc := #caller_location) -> io.Writer {
+	switch fd {
+	case 1: return stdout
+	case 2: return stderr
+	case:   panic("`fmt.fprint` variant called with invalid file descriptor for JS, only 1 (stdout) and 2 (stderr) are supported", loc)
+	}
+}
+
+// fprint formats using the default print settings and writes to fd
+fprint :: proc(fd: os.Handle, args: ..any, sep := " ", flush := true, loc := #caller_location) -> int {
+	buf: [1024]byte
+	b: bufio.Writer
+	defer bufio.writer_flush(&b)
+
+	bufio.writer_init_with_buf(&b, fd_to_writer(fd, loc), buf[:])
+	w := bufio.writer_to_writer(&b)
+	return wprint(w, ..args, sep=sep, flush=flush)
+}
+
+// fprintln formats using the default print settings and writes to fd
+fprintln :: proc(fd: os.Handle, args: ..any, sep := " ", flush := true, loc := #caller_location) -> int {
+	buf: [1024]byte
+	b: bufio.Writer
+	defer bufio.writer_flush(&b)
+
+	bufio.writer_init_with_buf(&b, fd_to_writer(fd, loc), buf[:])
+
+	w := bufio.writer_to_writer(&b)
+	return wprintln(w, ..args, sep=sep, flush=flush)
+}
+
+// fprintf formats according to the specified format string and writes to fd
+fprintf :: proc(fd: os.Handle, fmt: string, args: ..any, flush := true, newline := false, loc := #caller_location) -> int {
+	buf: [1024]byte
+	b: bufio.Writer
+	defer bufio.writer_flush(&b)
+
+	bufio.writer_init_with_buf(&b, fd_to_writer(fd, loc), buf[:])
+
+	w := bufio.writer_to_writer(&b)
+	return wprintf(w, fmt, ..args, flush=flush, newline=newline)
+}
+
+// fprintfln formats according to the specified format string and writes to fd, followed by a newline.
+fprintfln :: proc(fd: os.Handle, fmt: string, args: ..any, flush := true, loc := #caller_location) -> int {
+	return fprintf(fd, fmt, ..args, flush=flush, newline=true, loc=loc)
+}
+
 // print formats using the default print settings and writes to stdout
 print   :: proc(args: ..any, sep := " ", flush := true) -> int { return wprint(w=stdout, args=args, sep=sep, flush=flush) }
 // println formats using the default print settings and writes to stdout
 println :: proc(args: ..any, sep := " ", flush := true) -> int { return wprintln(w=stdout, args=args, sep=sep, flush=flush) }
 // printf formats according to the specififed format string and writes to stdout
 printf  :: proc(fmt: string, args: ..any, flush := true) -> int { return wprintf(stdout, fmt, ..args, flush=flush) }
+// printfln formats according to the specified format string and writes to stdout, followed by a newline.
+printfln :: proc(fmt: string, args: ..any, flush := true) -> int { return wprintf(stdout, fmt, ..args, flush=flush, newline=true) }
 
 // eprint formats using the default print settings and writes to stderr
 eprint   :: proc(args: ..any, sep := " ", flush := true) -> int { return wprint(w=stderr, args=args, sep=sep, flush=flush) }
@@ -44,3 +97,5 @@ eprint   :: proc(args: ..any, sep := " ", flush := true) -> int { return wprint(
 eprintln :: proc(args: ..any, sep := " ", flush := true) -> int { return wprintln(w=stderr, args=args, sep=sep, flush=flush) }
 // eprintf formats according to the specififed format string and writes to stderr
 eprintf  :: proc(fmt: string, args: ..any, flush := true) -> int { return wprintf(stderr, fmt, ..args, flush=flush) }
+// eprintfln formats according to the specified format string and writes to stderr, followed by a newline.
+eprintfln :: proc(fmt: string, args: ..any, flush := true) -> int { return wprintf(stdout, fmt, ..args, flush=flush, newline=true) }

+ 21 - 18
core/image/netpbm/doc.odin

@@ -1,5 +1,6 @@
 /*
 Formats:
+
 	PBM (P1, P4): Portable Bit Map,       stores black and white images   (1 channel)
 	PGM (P2, P5): Portable Gray Map,      stores greyscale images         (1 channel, 1 or 2 bytes per value)
 	PPM (P3, P6): Portable Pixel Map,     stores colour images            (3 channel, 1 or 2 bytes per value)
@@ -7,27 +8,29 @@ Formats:
 	PFM (Pf, PF): Portable Float Map,     stores floating-point images    (Pf: 1 channel, PF: 3 channel)
 
 Reading:
-	All formats fill out header fields `format`, `width`, `height`, `channels`, `depth`
-	Specific formats use more fields
-		PGM, PPM, and PAM set `maxval` (maximum of 65535)
-		PAM sets `tupltype` if there is one, and can set `channels` to any value (not just 1 or 3)
-		PFM sets `scale` (float equivalent of `maxval`) and `little_endian` (endianness of stored floats)
-	Currently doesn't support reading multiple images from one binary-format file
+
+- All formats fill out header fields `format`, `width`, `height`, `channels`, `depth`.
+- Specific formats use more fields:
+	PGM, PPM, and PAM set `maxval` (maximum of 65535)
+	PAM sets `tupltype` if there is one, and can set `channels` to any value (not just 1 or 3)
+	PFM sets `scale` (float equivalent of `maxval`) and `little_endian` (endianness of stored floats)
+- Currently doesn't support reading multiple images from one binary-format file.
 
 Writing:
-	You can use your own `Netpbm_Info` struct to control how images are written
-	All formats require the header field `format` to be specified
-	Additional header fields are required for specific formats
-		PGM, PPM, and PAM require `maxval` (maximum of 65535)
-		PAM also uses `tupltype`, though it may be left as default (empty or nil string)
-		PFM requires `scale`, and optionally `little_endian`
+
+- You can use your own `Netpbm_Info` struct to control how images are written.
+- All formats require the header field `format` to be specified.
+- Additional header fields are required for specific formats:
+	PGM, PPM, and PAM require `maxval` (maximum of 65535)
+	PAM also uses `tupltype`, though it may be left as default (empty or nil string)
+	PFM requires `scale`, and optionally `little_endian`
 
 Some syntax differences from the specifications:
-	`channels` stores the number of values per pixel, what the PAM specification calls `depth`
-	`depth` instead is the number of bits for a single value (32 for PFM, 16 or 8 otherwise)
-	`scale` and `little_endian` are separated, so the `header` will always store a positive `scale`
-	`little_endian` will only be true for a negative `scale` PFM, every other format will be false
-	`little_endian` only describes the netpbm data being read/written, the image buffer will be native
-*/
 
+- `channels` stores the number of values per pixel, what the PAM specification calls `depth`
+- `depth` instead is the number of bits for a single value (32 for PFM, 16 or 8 otherwise)
+- `scale` and `little_endian` are separated, so the `header` will always store a positive `scale`
+- `little_endian` will only be true for a negative `scale` PFM, every other format will be false
+- `little_endian` only describes the netpbm data being read/written, the image buffer will be native
+*/
 package netpbm

+ 24 - 1
core/io/io.odin

@@ -29,7 +29,7 @@ Error :: enum i32 {
 	// Invalid_Write means that a write returned an impossible count
 	Invalid_Write,
 
-	// Short_Buffer means that a read required a longer buffer than was provided
+	// Short_Buffer means that a read/write required a longer buffer than was provided
 	Short_Buffer,
 
 	// No_Progress is returned by some implementations of `io.Reader` when many calls
@@ -359,6 +359,29 @@ read_at_least :: proc(r: Reader, buf: []byte, min: int) -> (n: int, err: Error)
 	return
 }
 
+// write_full writes until the entire contents of `buf` has been written or an error occurs.
+write_full :: proc(w: Writer, buf: []byte) -> (n: int, err: Error) {
+	return write_at_least(w, buf, len(buf))
+}
+
+// write_at_least writes at least `buf[:min]` to the writer and returns the amount written.
+// If an error occurs before writing everything it is returned.
+write_at_least :: proc(w: Writer, buf: []byte, min: int) -> (n: int, err: Error) {
+	if len(buf) < min {
+		return 0, .Short_Buffer
+	}
+	for n < min && err == nil {
+		nn: int
+		nn, err = write(w, buf[n:])
+		n += nn
+	}
+
+	if err == nil && n < min {
+		err = .Short_Write
+	}
+	return
+}
+
 // copy copies from src to dst till either EOF is reached on src or an error occurs
 // It returns the number of bytes copied and the first error that occurred whilst copying, if any.
 copy :: proc(dst: Writer, src: Reader) -> (written: i64, err: Error) {

+ 14 - 0
core/math/big/prime.odin

@@ -1247,6 +1247,20 @@ internal_random_prime :: proc(a: ^Int, size_in_bits: int, trials: int, flags :=
 			a.digit[0] |= 3
 		}
 		if .Second_MSB_On in flags {
+			/*
+				Ensure there's enough space for the bit to be set.
+			*/
+			if a.used * _DIGIT_BITS < size_in_bits - 1 {
+				new_size := (size_in_bits - 1) / _DIGIT_BITS
+
+				if new_size % _DIGIT_BITS > 0 {
+					new_size += 1
+				}
+
+				internal_grow(a, new_size) or_return
+				a.used = new_size
+			}
+
 			internal_int_bitfield_set_single(a, size_in_bits - 2) or_return
 		}
 

+ 2 - 2
core/math/linalg/glsl/linalg_glsl.odin

@@ -1724,7 +1724,7 @@ quatFromMat4 :: proc "c" (m: mat4) -> (q: quat) {
 @(require_results)
 quatMulVec3 :: proc "c" (q: quat, v: vec3) -> vec3 {
 	xyz := vec3{q.x, q.y, q.z}
-	t := cross(xyz, v)
+	t := cross(2.0 * xyz, v)
 	return v + q.w*t + cross(xyz, t)
 }
 
@@ -1832,7 +1832,7 @@ dquatFromDmat4 :: proc "c" (m: dmat4) -> (q: dquat) {
 @(require_results)
 dquatMulDvec3 :: proc "c" (q: dquat, v: dvec3) -> dvec3 {
 	xyz := dvec3{q.x, q.y, q.z}
-	t := cross(xyz, v)
+	t := cross(2.0 * xyz, v)
 	return v + q.w*t + cross(xyz, t)
 }
 

+ 28 - 1
core/math/math.odin

@@ -60,6 +60,7 @@ sqrt :: proc{
 @(require_results) sin_f32be :: proc "contextless" (θ: f32be) -> f32be { return #force_inline f32be(sin_f32(f32(θ))) }
 @(require_results) sin_f64le :: proc "contextless" (θ: f64le) -> f64le { return #force_inline f64le(sin_f64(f64(θ))) }
 @(require_results) sin_f64be :: proc "contextless" (θ: f64be) -> f64be { return #force_inline f64be(sin_f64(f64(θ))) }
+// Return the sine of θ in radians.
 sin :: proc{
 	sin_f16, sin_f16le, sin_f16be,
 	sin_f32, sin_f32le, sin_f32be,
@@ -72,6 +73,7 @@ sin :: proc{
 @(require_results) cos_f32be :: proc "contextless" (θ: f32be) -> f32be { return #force_inline f32be(cos_f32(f32(θ))) }
 @(require_results) cos_f64le :: proc "contextless" (θ: f64le) -> f64le { return #force_inline f64le(cos_f64(f64(θ))) }
 @(require_results) cos_f64be :: proc "contextless" (θ: f64be) -> f64be { return #force_inline f64be(cos_f64(f64(θ))) }
+// Return the cosine of θ in radians.
 cos :: proc{
 	cos_f16, cos_f16le, cos_f16be,
 	cos_f32, cos_f32le, cos_f32be,
@@ -378,6 +380,7 @@ log10 :: proc{
 @(require_results) tan_f64   :: proc "contextless" (θ: f64)   -> f64   { return sin(θ)/cos(θ) }
 @(require_results) tan_f64le :: proc "contextless" (θ: f64le) -> f64le { return f64le(tan_f64(f64(θ))) }
 @(require_results) tan_f64be :: proc "contextless" (θ: f64be) -> f64be { return f64be(tan_f64(f64(θ))) }
+// Return the tangent of θ in radians.
 tan :: proc{
 	tan_f16, tan_f16le, tan_f16be,
 	tan_f32, tan_f32le, tan_f32be,
@@ -1752,7 +1755,28 @@ atan2_f64be :: proc "contextless" (y, x: f64be) -> f64be {
 	// TODO(bill): Better atan2_f32
 	return f64be(atan2_f64(f64(y), f64(x)))
 }
-
+/*
+ Return the arc tangent of y/x in radians. Defined on the domain [-∞, ∞] for x and y with a range of [-π, π]
+
+ Special cases:
+	atan2(y, NaN)     = NaN
+	atan2(NaN, x)     = NaN
+	atan2(+0, x>=0)   = + 0
+	atan2(-0, x>=0)   = - 0
+	atan2(+0, x<=-0)  = + π
+	atan2(-0, x<=-0)  = - π
+	atan2(y>0, 0)     = + π/2
+	atan2(y<0, 0)     = - π/2
+	atan2(+∞, +∞)     = + π/4
+	atan2(-∞, +∞)     = - π/4
+	atan2(+∞, -∞)     =   3π/4
+	atan2(-∞, -∞)     = - 3π/4
+	atan2(y, +∞)      =   0
+	atan2(y>0, -∞)    = + π
+	atan2(y<0, -∞)    = - π
+	atan2(+∞, x)      = + π/2
+	atan2(-∞, x)      = - π/2
+*/
 atan2 :: proc{
 	atan2_f64, atan2_f32, atan2_f16,
 	atan2_f64le, atan2_f64be,
@@ -1760,6 +1784,7 @@ atan2 :: proc{
 	atan2_f16le, atan2_f16be,
 }
 
+// Return the arc tangent of x, in radians. Defined on the domain of [-∞, ∞] with a range of [-π/2, π/2]
 @(require_results)
 atan :: proc "contextless" (x: $T) -> T where intrinsics.type_is_float(T) {
 	return atan2(x, 1)
@@ -1871,6 +1896,7 @@ asin_f16le :: proc "contextless" (x: f16le) -> f16le {
 asin_f16be :: proc "contextless" (x: f16be) -> f16be {
 	return f16be(asin_f64(f64(x)))
 }
+// Return the arc sine of x, in radians. Defined on the domain of [-1, 1] with a range of [-π/2, π/2]
 asin :: proc{
 	asin_f64, asin_f32, asin_f16,
 	asin_f64le, asin_f64be,
@@ -1985,6 +2011,7 @@ acos_f16le :: proc "contextless" (x: f16le) -> f16le {
 acos_f16be :: proc "contextless" (x: f16be) -> f16be {
 	return f16be(acos_f64(f64(x)))
 }
+// Return the arc cosine of x, in radians. Defined on the domain of [-1, 1] with a range of [0, π].
 acos :: proc{
 	acos_f64, acos_f32, acos_f16,
 	acos_f64le, acos_f64be,

+ 23 - 20
core/math/rand/rand.odin

@@ -5,6 +5,7 @@ Package core:math/rand implements various random number generators
 package rand
 
 import "base:intrinsics"
+import "core:crypto"
 import "core:math"
 import "core:mem"
 
@@ -104,27 +105,30 @@ init :: proc(r: ^Rand, seed: u64) {
 }
 
 /*
-Initialises a random number generator to use the system random number generator.  
-The system random number generator is platform specific.  
-On `linux` refer to the `getrandom` syscall.  
-On `darwin` refer to `getentropy`.  
-On `windows` refer to `BCryptGenRandom`.
-
-All other platforms are not supported
+Initialises a random number generator to use the system random number generator.
+The system random number generator is platform specific, and not supported
+on all targets.
 
 Inputs:
 - r: The random number generator to use the system random number generator
 
-WARNING: Panics if the system is not either `windows`, `darwin` or `linux`
+WARNING: Panics if the system random number generator is not supported.
+Support can be determined via the `core:crypto.HAS_RAND_BYTES` constant.
 
 Example:
+	import "core:crypto"
 	import "core:math/rand"
 	import "core:fmt"
 
 	init_as_system_example :: proc() {
 		my_rand: rand.Rand
-		rand.init_as_system(&my_rand)
-		fmt.println(rand.uint64(&my_rand))
+		switch crypto.HAS_RAND_BYTES {
+		case true:
+			rand.init_as_system(&my_rand)
+			fmt.println(rand.uint64(&my_rand))
+		case false:
+			fmt.println("system random not supported!")
+		}
 	}
 
 Possible Output:
@@ -133,7 +137,7 @@ Possible Output:
 
 */
 init_as_system :: proc(r: ^Rand) {
-	if !#defined(_system_random) {
+	if !crypto.HAS_RAND_BYTES {
 		panic(#procedure + " is not supported on this platform yet")
 	}
 	r.state = 0
@@ -144,15 +148,14 @@ init_as_system :: proc(r: ^Rand) {
 @(private)
 _random_u64 :: proc(r: ^Rand) -> u64 {
 	r := r
-	if r == nil {
+	switch {
+	case r == nil:
 		r = &global_rand
+	case r.is_system:
+		value: u64
+		crypto.rand_bytes((cast([^]u8)&value)[:size_of(u64)])
+		return value
 	}
-	when #defined(_system_random) {
-		if r.is_system {
-			return _system_random()
-		}
-	}
-
 
 	old_state := r.state
 	r.state = old_state * 6364136223846793005 + (r.inc|1)
@@ -789,8 +792,8 @@ shuffle :: proc(array: $T/[]$E, r: ^Rand = nil) {
 		return
 	}
 
-	for i := i64(0); i < n; i += 1 {
-		j := int63_max(n, r)
+	for i := i64(n - 1); i > 0; i -= 1 {
+		j := int63_max(i + 1, r)
 		array[i], array[j] = array[j], array[i]
 	}
 }

+ 0 - 22
core/math/rand/system_darwin.odin

@@ -1,22 +0,0 @@
-package rand
-
-import "core:sys/darwin"
-
-@(require_results)
-_system_random :: proc() -> u64 {
-	for {
-		value: u64
-		ret := darwin.syscall_getentropy(([^]u8)(&value), size_of(value))
-		if ret < 0 {
-			switch ret {
-			case -4: // EINTR
-				continue
-			case -78: // ENOSYS
-				panic("getentropy not available in kernel")
-			case:
-				panic("getentropy failed")
-			}
-		}
-		return value
-	}
-}

+ 0 - 14
core/math/rand/system_js.odin

@@ -1,14 +0,0 @@
-package rand
-
-foreign import "odin_env"
-foreign odin_env {
-	@(link_name = "rand_bytes")
-	env_rand_bytes :: proc "contextless" (buf: []byte) ---
-}
-
-@(require_results)
-_system_random :: proc() -> u64 {
-	buf: [8]u8
-	env_rand_bytes(buf[:])
-	return transmute(u64)buf
-}

+ 0 - 29
core/math/rand/system_linux.odin

@@ -1,29 +0,0 @@
-package rand
-
-import "core:sys/linux"
-
-@(require_results)
-_system_random :: proc() -> u64 {
-	for {
-		value: u64
-		value_buf := (cast([^]u8)&value)[:size_of(u64)]
-		_, errno := linux.getrandom(value_buf, {})
-		#partial switch errno {
-		case .NONE:
-			// Do nothing
-		case .EINTR: 
-			// Call interupted by a signal handler, just retry the request.
-			continue
-		case .ENOSYS: 
-			// The kernel is apparently prehistoric (< 3.17 circa 2014)
-			// and does not support getrandom.
-			panic("getrandom not available in kernel")
-		case:
-			// All other failures are things that should NEVER happen
-			// unless the kernel interface changes (ie: the Linux
-			// developers break userland).
-			panic("getrandom failed")
-		}
-		return value
-	}
-}

+ 0 - 13
core/math/rand/system_windows.odin

@@ -1,13 +0,0 @@
-package rand
-
-import win32 "core:sys/windows"
-
-@(require_results)
-_system_random :: proc() -> u64 {
-	value: u64
-	status := win32.BCryptGenRandom(nil, ([^]u8)(&value), size_of(value), win32.BCRYPT_USE_SYSTEM_PREFERRED_RNG)
-	if status < 0 {
-		panic("BCryptGenRandom failed")
-	}
-	return value
-}

+ 1 - 1
core/mem/allocators.odin

@@ -1124,7 +1124,7 @@ buddy_allocator_proc :: proc(allocator_data: rawptr, mode: Allocator_Mode,
 	case .Query_Info:
 		info := (^Allocator_Query_Info)(old_memory)
 		if info != nil && info.pointer != nil {
-			ptr := old_memory
+			ptr := info.pointer
 			if !(b.head <= ptr && ptr <= b.tail) {
 				return nil, .Invalid_Pointer
 			}

+ 32 - 0
core/mem/tracking_allocator.odin

@@ -22,6 +22,13 @@ Tracking_Allocator :: struct {
 	bad_free_array:    [dynamic]Tracking_Allocator_Bad_Free_Entry,
 	mutex:             sync.Mutex,
 	clear_on_free_all: bool,
+
+	total_memory_allocated:   i64,
+	total_allocation_count:   i64,
+	total_memory_freed:       i64,
+	total_free_count:         i64,
+	peak_memory_allocated:    i64,
+	current_memory_allocated: i64,
 }
 
 tracking_allocator_init :: proc(t: ^Tracking_Allocator, backing_allocator: Allocator, internals_allocator := context.allocator) {
@@ -44,6 +51,7 @@ tracking_allocator_clear :: proc(t: ^Tracking_Allocator) {
 	sync.mutex_lock(&t.mutex)
 	clear(&t.allocation_map)
 	clear(&t.bad_free_array)
+	t.current_memory_allocated = 0
 	sync.mutex_unlock(&t.mutex)
 }
 
@@ -59,6 +67,21 @@ tracking_allocator :: proc(data: ^Tracking_Allocator) -> Allocator {
 tracking_allocator_proc :: proc(allocator_data: rawptr, mode: Allocator_Mode,
                                 size, alignment: int,
                                 old_memory: rawptr, old_size: int, loc := #caller_location) -> (result: []byte, err: Allocator_Error) {
+	track_alloc :: proc(data: ^Tracking_Allocator, entry: ^Tracking_Allocator_Entry) {
+		data.total_memory_allocated += i64(entry.size)
+		data.total_allocation_count += 1
+		data.current_memory_allocated += i64(entry.size)
+		if data.current_memory_allocated > data.peak_memory_allocated {
+			data.peak_memory_allocated = data.current_memory_allocated
+		}
+	}
+
+	track_free :: proc(data: ^Tracking_Allocator, entry: ^Tracking_Allocator_Entry) {
+		data.total_memory_freed += i64(entry.size)
+		data.total_free_count += 1
+		data.current_memory_allocated -= i64(entry.size)
+	}
+
 	data := (^Tracking_Allocator)(allocator_data)
 
 	sync.mutex_guard(&data.mutex)
@@ -100,13 +123,21 @@ tracking_allocator_proc :: proc(allocator_data: rawptr, mode: Allocator_Mode,
 			err = err,
 			location = loc,
 		}
+		track_alloc(data, &data.allocation_map[result_ptr])
 	case .Free:
+		if old_memory != nil && old_memory in data.allocation_map {
+			track_free(data, &data.allocation_map[old_memory])
+		}
 		delete_key(&data.allocation_map, old_memory)
 	case .Free_All:
 		if data.clear_on_free_all {
 			clear_map(&data.allocation_map)
+			data.current_memory_allocated = 0
 		}
 	case .Resize, .Resize_Non_Zeroed:
+		if old_memory != nil && old_memory in data.allocation_map {
+			track_free(data, &data.allocation_map[old_memory])
+		}
 		if old_memory != result_ptr {
 			delete_key(&data.allocation_map, old_memory)
 		}
@@ -118,6 +149,7 @@ tracking_allocator_proc :: proc(allocator_data: rawptr, mode: Allocator_Mode,
 			err = err,
 			location = loc,
 		}
+		track_alloc(data, &data.allocation_map[result_ptr])
 
 	case .Query_Features:
 		set := (^Allocator_Mode_Set)(old_memory)

+ 6 - 2
core/net/socket_linux.odin

@@ -258,8 +258,12 @@ _send_tcp :: proc(tcp_sock: TCP_Socket, buf: []byte) -> (int, Network_Error) {
 	for total_written < len(buf) {
 		limit := min(int(max(i32)), len(buf) - total_written)
 		remaining := buf[total_written:][:limit]
-		res, errno := linux.send(linux.Fd(tcp_sock), remaining, {})
-		if errno != .NONE {
+		res, errno := linux.send(linux.Fd(tcp_sock), remaining, {.NOSIGNAL})
+		if errno == .EPIPE {
+			// If the peer is disconnected when we are trying to send we will get an `EPIPE` error,
+			// so we turn that into a clearer error
+			return total_written, TCP_Send_Error.Connection_Closed
+		} else if errno != .NONE {
 			return total_written, TCP_Send_Error(errno)
 		}
 		total_written += int(res)

+ 15 - 2
core/net/url.odin

@@ -21,7 +21,7 @@ import "core:strconv"
 import "core:unicode/utf8"
 import "core:encoding/hex"
 
-split_url :: proc(url: string, allocator := context.allocator) -> (scheme, host, path: string, queries: map[string]string) {
+split_url :: proc(url: string, allocator := context.allocator) -> (scheme, host, path: string, queries: map[string]string, fragment: string) {
 	s := url
 
 	i := strings.index(s, "://")
@@ -30,6 +30,12 @@ split_url :: proc(url: string, allocator := context.allocator) -> (scheme, host,
 		s = s[i+3:]
 	}
 
+	i = strings.index(s, "#")
+	if i != -1 {
+		fragment = s[i+1:]
+		s = s[:i]
+	}
+
 	i = strings.index(s, "?")
 	if i != -1 {
 		query_str := s[i+1:]
@@ -62,7 +68,7 @@ split_url :: proc(url: string, allocator := context.allocator) -> (scheme, host,
 	return
 }
 
-join_url :: proc(scheme, host, path: string, queries: map[string]string, allocator := context.allocator) -> string {
+join_url :: proc(scheme, host, path: string, queries: map[string]string, fragment: string, allocator := context.allocator) -> string {
 	b := strings.builder_make(allocator)
 	strings.builder_grow(&b, len(scheme) + 3 + len(host) + 1 + len(path))
 
@@ -95,6 +101,13 @@ join_url :: proc(scheme, host, path: string, queries: map[string]string, allocat
 		i += 1
 	}
 
+	if fragment != "" {
+		if fragment[0] != '#' {
+			strings.write_string(&b, "#")
+		}
+		strings.write_string(&b, strings.trim_space(fragment))
+	}
+
 	return strings.to_string(b)
 }
 

+ 13 - 8
core/odin/parser/parser.odin

@@ -416,24 +416,28 @@ end_of_line_pos :: proc(p: ^Parser, tok: tokenizer.Token) -> tokenizer.Pos {
 }
 
 expect_closing_brace_of_field_list :: proc(p: ^Parser) -> tokenizer.Token {
+	return expect_closing_token_of_field_list(p, .Close_Brace, "field list")
+}
+
+expect_closing_token_of_field_list :: proc(p: ^Parser, closing_kind: tokenizer.Token_Kind, msg: string) -> tokenizer.Token {
 	token := p.curr_tok
-	if allow_token(p, .Close_Brace) {
+	if allow_token(p, closing_kind) {
 		return token
 	}
 	if allow_token(p, .Semicolon) && !tokenizer.is_newline(token) {
 		str := tokenizer.token_to_string(token)
 		error(p, end_of_line_pos(p, p.prev_tok), "expected a comma, got %s", str)
 	}
-	expect_brace := expect_token(p, .Close_Brace)
+	expect_closing := expect_token_after(p, closing_kind, msg)
 
-	if expect_brace.kind != .Close_Brace {
-		for p.curr_tok.kind != .Close_Brace && p.curr_tok.kind != .EOF && !is_non_inserted_semicolon(p.curr_tok) {
+	if expect_closing.kind != closing_kind {
+		for p.curr_tok.kind != closing_kind && p.curr_tok.kind != .EOF && !is_non_inserted_semicolon(p.curr_tok) {
 			advance_token(p)
 		}
 		return p.curr_tok
 	} 
 
-	return expect_brace
+	return expect_closing
 }
 
 expect_closing_parentheses_of_field_list :: proc(p: ^Parser) -> tokenizer.Token {
@@ -1354,6 +1358,7 @@ parse_stmt :: proc(p: ^Parser) -> ^ast.Stmt {
 
 		rs := ast.new(ast.Return_Stmt, tok.pos, end)
 		rs.results = results[:]
+		expect_semicolon(p, rs)
 		return rs
 
 	case .Break, .Continue, .Fallthrough:
@@ -2990,8 +2995,8 @@ parse_literal_value :: proc(p: ^Parser, type: ^ast.Expr) -> ^ast.Comp_Lit {
 	}
 	p.expr_level -= 1
 
-	skip_possible_newline(p)
-	close := expect_token_after(p, .Close_Brace, "compound literal")
+  	skip_possible_newline(p)
+	close := expect_closing_brace_of_field_list(p)
 
 	pos := type.pos if type != nil else open.pos
 	lit := ast.new(ast.Comp_Lit, pos, end_pos(close))
@@ -3054,7 +3059,7 @@ parse_call_expr :: proc(p: ^Parser, operand: ^ast.Expr) -> ^ast.Expr {
 		allow_token(p, .Comma) or_break
 	}
 
-	close := expect_token_after(p, .Close_Paren, "argument list")
+	close := expect_closing_token_of_field_list(p, .Close_Paren, "argument list")
 	p.expr_level -= 1
 
 	ce := ast.new(ast.Call_Expr, operand.pos, end_pos(close))

+ 1 - 0
core/odin/tokenizer/tokenizer.odin

@@ -39,6 +39,7 @@ init :: proc(t: ^Tokenizer, src: string, path: string, err: Error_Handler = defa
 	t.read_offset = 0
 	t.line_offset = 0
 	t.line_count = len(src) > 0 ? 1 : 0
+	t.insert_semicolon = false
 	t.error_count = 0
 	t.path = path
 

+ 2 - 9
core/os/os_js.odin

@@ -1,9 +1,7 @@
 //+build js
 package os
 
-import "base:intrinsics"
 import "base:runtime"
-import "core:unicode/utf16"
 
 is_path_separator :: proc(c: byte) -> bool {
 	return c == '/' || c == '\\'
@@ -64,13 +62,8 @@ write_at :: proc(fd: Handle, data: []byte, offset: i64) -> (n: int, err: Errno)
 	unimplemented("core:os procedure not supported on JS target")
 }
 
-
-
-// NOTE(bill): Uses startup to initialize it
-//stdin  := get_std_handle(uint(win32.STD_INPUT_HANDLE))
-//stdout := get_std_handle(uint(win32.STD_OUTPUT_HANDLE))
-//stderr := get_std_handle(uint(win32.STD_ERROR_HANDLE))
-
+stdout: Handle = 1
+stderr: Handle = 2
 
 get_std_handle :: proc "contextless" (h: uint) -> Handle {
 	context = runtime.default_context()

+ 2 - 2
core/os/stat.odin

@@ -3,8 +3,8 @@ package os
 import "core:time"
 
 File_Info :: struct {
-	fullpath: string,
-	name:     string,
+	fullpath: string, // allocated
+	name:     string, // uses `fullpath` as underlying data
 	size:     i64,
 	mode:     File_Mode,
 	is_dir:   bool,

+ 21 - 0
core/reflect/reflect.odin

@@ -934,6 +934,27 @@ set_union_value :: proc(dst: any, value: any) -> bool {
 	panic("expected a union to reflect.set_union_variant_typeid")
 }
 
+@(require_results)
+bit_set_is_big_endian :: proc(value: any, loc := #caller_location) -> bool {
+	if value == nil { return ODIN_ENDIAN == .Big }
+	
+	ti := runtime.type_info_base(type_info_of(value.id))
+	if info, ok := ti.variant.(runtime.Type_Info_Bit_Set); ok {
+		if info.underlying == nil { return ODIN_ENDIAN == .Big }
+
+		underlying_ti := runtime.type_info_base(info.underlying)
+		if underlying_info, uok := underlying_ti.variant.(runtime.Type_Info_Integer); uok {
+			switch underlying_info.endianness {
+			case .Platform: return ODIN_ENDIAN == .Big
+			case .Little:   return false
+			case .Big:      return true
+			}
+		}
+
+		return ODIN_ENDIAN == .Big
+	}
+	panic("expected a bit_set to reflect.bit_set_is_big_endian", loc)
+}
 
 
 @(require_results)

+ 61 - 0
core/reflect/types.odin

@@ -408,7 +408,68 @@ is_relative_multi_pointer :: proc(info: ^Type_Info) -> bool {
 }
 
 
+@(require_results)
+is_endian_platform :: proc(info: ^Type_Info) -> bool {
+	if info == nil { return false}
+	info := info
+	info = type_info_core(info)
+	#partial switch v in info.variant {
+	case Type_Info_Integer:
+		return v.endianness == .Platform
+	case Type_Info_Bit_Set:
+		if v.underlying != nil {
+			return is_endian_platform(v.underlying)
+		}
+		return true
+	case Type_Info_Pointer:
+		return true
+	}
+	return false
+}
+
+@(require_results)
+is_endian_little :: proc(info: ^Type_Info) -> bool {
+	if info == nil { return false}
+	info := info
+	info = type_info_core(info)
+	#partial switch v in info.variant {
+	case Type_Info_Integer:
+		if v.endianness == .Platform {
+			return ODIN_ENDIAN == .Little
+		}
+		return v.endianness == .Little
+	case Type_Info_Bit_Set:
+		if v.underlying != nil {
+			return is_endian_platform(v.underlying)
+		}
+		return ODIN_ENDIAN == .Little
+	case Type_Info_Pointer:
+		return ODIN_ENDIAN == .Little
+	}
+	return ODIN_ENDIAN == .Little
+}
 
+@(require_results)
+is_endian_big :: proc(info: ^Type_Info) -> bool {
+	if info == nil { return false}
+	info := info
+	info = type_info_core(info)
+	#partial switch v in info.variant {
+	case Type_Info_Integer:
+		if v.endianness == .Platform {
+			return ODIN_ENDIAN == .Big
+		}
+		return v.endianness == .Big
+	case Type_Info_Bit_Set:
+		if v.underlying != nil {
+			return is_endian_platform(v.underlying)
+		}
+		return ODIN_ENDIAN == .Big
+	case Type_Info_Pointer:
+		return ODIN_ENDIAN == .Big
+	}
+	return ODIN_ENDIAN == .Big
+}
 
 
 

+ 2 - 0
core/sync/chan/chan.odin

@@ -75,6 +75,7 @@ create_raw_unbuffered :: proc(#any_int msg_size, msg_alignment: int, allocator:
 
 	ptr := mem.alloc(size, align, allocator) or_return
 	c = (^Raw_Chan)(ptr)
+	c.allocator = allocator
 	c.allocation_size = size
 	c.unbuffered_data = ([^]byte)(ptr)[offset:]
 	c.msg_size = u16(msg_size)
@@ -99,6 +100,7 @@ create_raw_buffered :: proc(#any_int msg_size, msg_alignment: int, #any_int cap:
 
 	ptr := mem.alloc(size, align, allocator) or_return
 	c = (^Raw_Chan)(ptr)
+	c.allocator = allocator
 	c.allocation_size = size
 
 	bptr := ([^]byte)(ptr)

+ 46 - 137
core/sync/futex_netbsd.odin

@@ -1,165 +1,74 @@
 //+private
 package sync
 
-import "core:c"
+import "base:intrinsics"
 import "core:time"
+import "core:c"
 import "core:sys/unix"
 
-@(private="file")
-Wait_Node :: struct {
-	thread:     unix.pthread_t,
-	futex:      ^Futex,
-	prev, next: ^Wait_Node,
-}
-@(private="file")
-atomic_flag :: distinct bool
-@(private="file")
-Wait_Queue :: struct {
-	lock: atomic_flag,
-	list: Wait_Node,
-}
-@(private="file")
-waitq_lock :: proc "contextless" (waitq: ^Wait_Queue) {
-	for cast(bool)atomic_exchange_explicit(&waitq.lock, atomic_flag(true), .Acquire) {
-		cpu_relax() // spin...
-	}
-}
-@(private="file")
-waitq_unlock :: proc "contextless" (waitq: ^Wait_Queue) {
-	atomic_store_explicit(&waitq.lock, atomic_flag(false), .Release)
-}
+foreign import libc "system:c"
 
-// FIXME: This approach may scale badly in the future,
-// possible solution - hash map (leads to deadlocks now).
-@(private="file")
-g_waitq: Wait_Queue
+FUTEX_PRIVATE_FLAG :: 128
 
-@(init, private="file")
-g_waitq_init :: proc() {
-	g_waitq = {
-		list = {
-			prev = &g_waitq.list,
-			next = &g_waitq.list,
-		},
-	}
-}
+FUTEX_WAIT_PRIVATE :: 0 | FUTEX_PRIVATE_FLAG
+FUTEX_WAKE_PRIVATE :: 1 | FUTEX_PRIVATE_FLAG
 
-@(private="file")
-get_waitq :: #force_inline proc "contextless" (f: ^Futex) -> ^Wait_Queue {
-	_ = f
-	return &g_waitq
-}
+EINTR     :: 4		/* Interrupted system call */
+EAGAIN    :: 35		/* Resource temporarily unavailable */
+ETIMEDOUT :: 60		/* Operation timed out */
 
-_futex_wait :: proc "contextless" (f: ^Futex, expect: u32) -> (ok: bool) {
-	waitq := get_waitq(f)
-	waitq_lock(waitq)
-	defer waitq_unlock(waitq)
+Time_Spec :: struct {
+	time_sec:  uint,
+	time_nsec: uint,
+}
 
-	head   := &waitq.list
-	waiter := Wait_Node{
-		thread = unix.pthread_self(),
-		futex  = f,
-		prev   = head,
-		next   = head.next,
+get_last_error :: proc "contextless" () -> int {
+	foreign libc {
+		__errno :: proc() -> ^c.int ---
 	}
+	return int(__errno()^)
+}
 
-	waiter.prev.next = &waiter
-	waiter.next.prev = &waiter
-
-	old_mask, mask: unix.sigset_t
-	unix.sigemptyset(&mask)
-	unix.sigaddset(&mask, unix.SIGCONT)
-	unix.pthread_sigmask(unix.SIG_BLOCK, &mask, &old_mask)
-
-	if u32(atomic_load_explicit(f, .Acquire)) == expect {
-		waitq_unlock(waitq)
-		defer waitq_lock(waitq)
-		
-		sig: c.int
-		unix.sigwait(&mask, &sig)
-		errno := unix.errno() 
-		ok = errno == unix.ERROR_NONE
+_futex_wait :: proc "contextless" (futex: ^Futex, expected: u32) -> bool {
+	if cast(int) intrinsics.syscall(unix.SYS___futex, uintptr(futex), FUTEX_WAIT_PRIVATE, uintptr(expected), 0, 0, 0) == -1 {
+		switch get_last_error() {
+		case EINTR, EAGAIN:
+			return true
+		case:
+			_panic("futex_wait failure")
+		}	
 	}
-
-	waiter.prev.next = waiter.next
-	waiter.next.prev = waiter.prev
-
- 	unix.pthread_sigmask(unix.SIG_SETMASK, &old_mask, nil)
-
- 	// FIXME: Add error handling!
- 	return
+	return true
 }
 
-_futex_wait_with_timeout :: proc "contextless" (f: ^Futex, expect: u32, duration: time.Duration) -> (ok: bool) {
+_futex_wait_with_timeout :: proc "contextless" (futex: ^Futex, expected: u32, duration: time.Duration) -> bool {
 	if duration <= 0 {
 		return false
 	}
-	waitq := get_waitq(f)
-	waitq_lock(waitq)
-	defer waitq_unlock(waitq)
-
-	head   := &waitq.list
-	waiter := Wait_Node{
-		thread = unix.pthread_self(),
-		futex  = f,
-		prev   = head,
-		next   = head.next,
-	}
-
-	waiter.prev.next = &waiter
-	waiter.next.prev = &waiter
-
-	old_mask, mask: unix.sigset_t
-	unix.sigemptyset(&mask)
-	unix.sigaddset(&mask, unix.SIGCONT)
-	unix.pthread_sigmask(unix.SIG_BLOCK, &mask, &old_mask)
-
-	if u32(atomic_load_explicit(f, .Acquire)) == expect {
-		waitq_unlock(waitq)
-		defer waitq_lock(waitq)
-		
-		info: unix.siginfo_t
-		ts := unix.timespec{
-			tv_sec  = i64(duration / 1e9),
-			tv_nsec = i64(duration % 1e9),
+	if cast(int) intrinsics.syscall(unix.SYS___futex, uintptr(futex), FUTEX_WAIT_PRIVATE, uintptr(expected), cast(uintptr) &Time_Spec{
+		time_sec  = cast(uint)(duration / 1e9),
+		time_nsec = cast(uint)(duration % 1e9),
+	}, 0, 0) == -1 {
+		switch get_last_error() {
+		case EINTR, EAGAIN:
+			return true
+		case ETIMEDOUT:
+			return false
+		case:
+			_panic("futex_wait_with_timeout failure")
 		}
-		unix.sigtimedwait(&mask, &info, &ts)
-		errno := unix.errno() 
-		ok = errno == unix.EAGAIN || errno == unix.ERROR_NONE
 	}
-
-	waiter.prev.next = waiter.next
-	waiter.next.prev = waiter.prev
-
- 	unix.pthread_sigmask(unix.SIG_SETMASK, &old_mask, nil)
-
- 	// FIXME: Add error handling!
- 	return 
+	return true
 }
 
-_futex_signal :: proc "contextless" (f: ^Futex) {
-	waitq := get_waitq(f)
-	waitq_lock(waitq)
-	defer waitq_unlock(waitq)
-
-	head := &waitq.list
-	for waiter := head.next; waiter != head; waiter = waiter.next {
-		if waiter.futex == f {
-			unix.pthread_kill(waiter.thread, unix.SIGCONT)
-			break
-		}
+_futex_signal :: proc "contextless" (futex: ^Futex) {
+	if cast(int) intrinsics.syscall(unix.SYS___futex, uintptr(futex), FUTEX_WAKE_PRIVATE, 1, 0, 0, 0) == -1 {
+		_panic("futex_wake_single failure")
 	}
 }
 
-_futex_broadcast :: proc "contextless" (f: ^Futex) {
-	waitq := get_waitq(f)
-	waitq_lock(waitq)
-	defer waitq_unlock(waitq)
-
-	head := &waitq.list
-	for waiter := head.next; waiter != head; waiter = waiter.next {
-		if waiter.futex == f {
-			unix.pthread_kill(waiter.thread, unix.SIGCONT)
-		}
+_futex_broadcast :: proc "contextless" (futex: ^Futex)  {
+	if cast(int) intrinsics.syscall(unix.SYS___futex, uintptr(futex), FUTEX_WAKE_PRIVATE, uintptr(max(i32)), 0, 0, 0) == -1 {
+		_panic("_futex_wake_all failure")
 	}
 }

+ 34 - 0
core/sys/darwin/CoreFoundation/CFBase.odin

@@ -0,0 +1,34 @@
+package CoreFoundation
+
+foreign import CoreFoundation "system:CoreFoundation.framework"
+
+TypeID      :: distinct uint
+OptionFlags :: distinct uint
+HashCode    :: distinct uint
+Index       :: distinct int
+TypeRef     :: distinct rawptr
+
+Range :: struct {
+	location: Index,
+	length:   Index,
+}
+
+foreign CoreFoundation {
+	// Releases a Core Foundation object.
+	CFRelease :: proc(cf: TypeRef) ---
+}
+
+// Releases a Core Foundation object.
+Release :: proc {
+	ReleaseObject,
+	ReleaseString,
+}
+
+ReleaseObject :: #force_inline proc(cf: TypeRef) {
+	CFRelease(cf)
+}
+
+// Releases a Core Foundation string.
+ReleaseString :: #force_inline proc(theString: String) {
+	CFRelease(TypeRef(theString))
+}

+ 203 - 0
core/sys/darwin/CoreFoundation/CFString.odin

@@ -0,0 +1,203 @@
+package CoreFoundation
+
+import "base:runtime"
+
+foreign import CoreFoundation "system:CoreFoundation.framework"
+
+String :: distinct TypeRef // same as CFStringRef
+
+StringEncoding :: distinct u32
+
+StringBuiltInEncodings :: enum StringEncoding {
+	MacRoman = 0,
+	WindowsLatin1 = 0x0500,
+	ISOLatin1 = 0x0201,
+	NextStepLatin = 0x0B01,
+	ASCII = 0x0600,
+	Unicode = 0x0100,
+	UTF8 = 0x08000100,
+	NonLossyASCII = 0x0BFF,
+
+	UTF16 = 0x0100,
+	UTF16BE = 0x10000100,
+	UTF16LE = 0x14000100,
+
+	UTF32 = 0x0c000100,
+	UTF32BE = 0x18000100,
+	UTF32LE = 0x1c000100,
+}
+
+StringEncodings :: enum Index {
+    MacJapanese = 1,
+    MacChineseTrad = 2,
+    MacKorean = 3,
+    MacArabic = 4,
+    MacHebrew = 5,
+    MacGreek = 6,
+    MacCyrillic = 7,
+    MacDevanagari = 9,
+    MacGurmukhi = 10,
+    MacGujarati = 11,
+    MacOriya = 12,
+    MacBengali = 13,
+    MacTamil = 14,
+    MacTelugu = 15,
+    MacKannada = 16,
+    MacMalayalam = 17,
+    MacSinhalese = 18,
+    MacBurmese = 19,
+    MacKhmer = 20,
+    MacThai = 21,
+    MacLaotian = 22,
+    MacGeorgian = 23,
+    MacArmenian = 24,
+    MacChineseSimp = 25,
+    MacTibetan = 26,
+    MacMongolian = 27,
+    MacEthiopic = 28,
+    MacCentralEurRoman = 29,
+    MacVietnamese = 30,
+    MacExtArabic = 31,
+    MacSymbol = 33,
+    MacDingbats = 34,
+    MacTurkish = 35,
+    MacCroatian = 36,
+    MacIcelandic = 37,
+    MacRomanian = 38,
+    MacCeltic = 39,
+    MacGaelic = 40,
+    MacFarsi = 0x8C,
+    MacUkrainian = 0x98,
+    MacInuit = 0xEC,
+    MacVT100 = 0xFC,
+    MacHFS = 0xFF,
+    ISOLatin2 = 0x0202,
+    ISOLatin3 = 0x0203,
+    ISOLatin4 = 0x0204,
+    ISOLatinCyrillic = 0x0205,
+    ISOLatinArabic = 0x0206,
+    ISOLatinGreek = 0x0207,
+    ISOLatinHebrew = 0x0208,
+    ISOLatin5 = 0x0209,
+    ISOLatin6 = 0x020A,
+    ISOLatinThai = 0x020B,
+    ISOLatin7 = 0x020D,
+    ISOLatin8 = 0x020E,
+    ISOLatin9 = 0x020F,
+    ISOLatin10 = 0x0210,
+    DOSLatinUS = 0x0400,
+    DOSGreek = 0x0405,
+    DOSBalticRim = 0x0406,
+    DOSLatin1 = 0x0410,
+    DOSGreek1 = 0x0411,
+    DOSLatin2 = 0x0412,
+    DOSCyrillic = 0x0413,
+    DOSTurkish = 0x0414,
+    DOSPortuguese = 0x0415,
+    DOSIcelandic = 0x0416,
+    DOSHebrew = 0x0417,
+    DOSCanadianFrench = 0x0418,
+    DOSArabic = 0x0419,
+    DOSNordic = 0x041A,
+    DOSRussian = 0x041B,
+    DOSGreek2 = 0x041C,
+    DOSThai = 0x041D,
+    DOSJapanese = 0x0420,
+    DOSChineseSimplif = 0x0421,
+    DOSKorean = 0x0422,
+    DOSChineseTrad = 0x0423,
+    WindowsLatin2 = 0x0501,
+    WindowsCyrillic = 0x0502,
+    WindowsGreek = 0x0503,
+    WindowsLatin5 = 0x0504,
+    WindowsHebrew = 0x0505,
+    WindowsArabic = 0x0506,
+    WindowsBalticRim = 0x0507,
+    WindowsVietnamese = 0x0508,
+    WindowsKoreanJohab = 0x0510,
+    ANSEL = 0x0601,
+    JIS_X0201_76 = 0x0620,
+    JIS_X0208_83 = 0x0621,
+    JIS_X0208_90 = 0x0622,
+    JIS_X0212_90 = 0x0623,
+    JIS_C6226_78 = 0x0624,
+    ShiftJIS_X0213 = 0x0628,
+    ShiftJIS_X0213_MenKuTen = 0x0629,
+    GB_2312_80 = 0x0630,
+    GBK_95 = 0x0631,
+    GB_18030_2000 = 0x0632,
+    KSC_5601_87 = 0x0640,
+    KSC_5601_92_Johab = 0x0641,
+    CNS_11643_92_P1 = 0x0651,
+    CNS_11643_92_P2 = 0x0652,
+    CNS_11643_92_P3 = 0x0653,
+    ISO_2022_JP = 0x0820,
+    ISO_2022_JP_2 = 0x0821,
+    ISO_2022_JP_1 = 0x0822,
+    ISO_2022_JP_3 = 0x0823,
+    ISO_2022_CN = 0x0830,
+    ISO_2022_CN_EXT = 0x0831,
+    ISO_2022_KR = 0x0840,
+    EUC_JP = 0x0920,
+    EUC_CN = 0x0930,
+    EUC_TW = 0x0931,
+    EUC_KR = 0x0940,
+    ShiftJIS = 0x0A01,
+    KOI8_R = 0x0A02,
+    Big5 = 0x0A03,
+    MacRomanLatin1 = 0x0A04,
+    HZ_GB_2312 = 0x0A05,
+    Big5_HKSCS_1999 = 0x0A06,
+    VISCII = 0x0A07,
+    KOI8_U = 0x0A08,
+    Big5_E = 0x0A09,
+    NextStepJapanese = 0x0B02,
+    EBCDIC_US = 0x0C01,
+    EBCDIC_CP037 = 0x0C02,
+    UTF7 = 0x04000100,
+    UTF7_IMAP = 0x0A10,
+    ShiftJIS_X0213_00 = 0x0628, // Deprecated. Use `ShiftJIS_X0213` instead.
+}
+
+@(link_prefix = "CF", default_calling_convention = "c")
+foreign CoreFoundation {
+	// Copies the character contents of a string to a local C string buffer after converting the characters to a given encoding.
+	StringGetCString :: proc(theString: String, buffer: [^]byte, bufferSize: Index, encoding: StringEncoding) -> b8 ---
+
+	// Returns the number (in terms of UTF-16 code pairs) of Unicode characters in a string.
+	StringGetLength :: proc(theString: String) -> Index ---
+
+	// Returns the maximum number of bytes a string of a specified length (in Unicode characters) will take up if encoded in a specified encoding.
+	StringGetMaximumSizeForEncoding :: proc(length: Index, encoding: StringEncoding) -> Index ---
+
+	// Fetches a range of the characters from a string into a byte buffer after converting the characters to a specified encoding.
+	StringGetBytes :: proc(thestring: String, range: Range, encoding: StringEncoding, lossByte: u8, isExternalRepresentation: b8, buffer: [^]byte, maxBufLen: Index, usedBufLen: ^Index) -> Index ---
+
+	StringIsEncodingAvailable :: proc(encoding: StringEncoding) -> bool ---
+
+	@(link_name = "__CFStringMakeConstantString")
+	StringMakeConstantString :: proc "c" (#const c: cstring) -> String ---
+}
+
+STR :: StringMakeConstantString
+
+StringCopyToOdinString :: proc(
+	theString: String,
+	allocator := context.allocator,
+) -> (
+	str: string,
+	ok: bool,
+) #optional_ok {
+	length := StringGetLength(theString)
+	max := StringGetMaximumSizeForEncoding(length, StringEncoding(StringBuiltInEncodings.UTF8))
+
+	buf, err := make([]byte, max, allocator)
+	if err != nil do return
+
+	raw_str := runtime.Raw_String {
+		data = raw_data(buf),
+	}
+	StringGetBytes(theString, {0, length}, StringEncoding(StringBuiltInEncodings.UTF8), 0, false, raw_data(buf), max, (^Index)(&raw_str.len))
+
+	return transmute(string)raw_str, true
+}

+ 1 - 1
core/sys/darwin/Foundation/NSApplication.odin

@@ -132,7 +132,7 @@ Application_nextEventMatchingMask :: proc "c" (self: ^Application, mask: EventMa
 
 @(objc_type=Application, objc_name="sendEvent")
 Application_sendEvent :: proc "c" (self: ^Application, event: ^Event) {
-	msgSend(Event, self, "sendEvent:", event)
+	msgSend(nil, self, "sendEvent:", event)
 }
 @(objc_type=Application, objc_name="updateWindows")
 Application_updateWindows :: proc "c" (self: ^Application) {

+ 6 - 9
core/sys/darwin/Foundation/NSString.odin

@@ -23,12 +23,9 @@ StringEncoding :: enum UInteger {
 	WindowsCP1250     = 15,
 	ISO2022JP         = 21,
 	MacOSRoman        = 30,
-
 	UTF16             = Unicode,
-
 	UTF16BigEndian    = 0x90000100,
 	UTF16LittleEndian = 0x94000100,
-
 	UTF32             = 0x8c000100,
 	UTF32BigEndian    = 0x98000100,
 	UTF32LittleEndian = 0x9c000100,
@@ -49,12 +46,9 @@ StringCompareOption :: enum UInteger {
 
 unichar :: distinct u16
 
-@(link_prefix="NS", default_calling_convention="c")
-foreign Foundation {
-	StringFromClass :: proc(cls: Class) -> ^String ---
-}
-
 AT :: MakeConstantString
+
+// CFString is 'toll-free bridged' with its Cocoa Foundation counterpart, NSString.
 MakeConstantString :: proc "c" (#const c: cstring) -> ^String {
 	foreign Foundation {
 		__CFStringMakeConstantString :: proc "c" (c: cstring) -> ^String ---
@@ -62,6 +56,10 @@ MakeConstantString :: proc "c" (#const c: cstring) -> ^String {
 	return __CFStringMakeConstantString(c)
 }
 
+@(link_prefix="NS", default_calling_convention="c")
+foreign Foundation {
+	StringFromClass :: proc(cls: Class) -> ^String ---
+}
 
 @(objc_type=String, objc_name="alloc", objc_is_class_method=true)
 String_alloc :: proc "c" () -> ^String {
@@ -73,7 +71,6 @@ String_init :: proc "c" (self: ^String) -> ^String {
 	return msgSend(^String, self, "init")
 }
 
-
 @(objc_type=String, objc_name="initWithString")
 String_initWithString :: proc "c" (self: ^String, other: ^String) -> ^String {
 	return msgSend(^String, self, "initWithString:", other)

+ 386 - 0
core/sys/darwin/Security/SecBase.odin

@@ -0,0 +1,386 @@
+package Security
+
+OSStatus :: distinct i32
+
+errSec :: enum OSStatus {
+	Success = 0, // No error.
+	Unimplemented = -4, // Function or operation not implemented.
+	DiskFull = -34, // The disk is full.
+	IO = -36, // I/O error.
+	OpWr = -49, // File already open with with write permission.
+	Param = -50, // One or more parameters passed to a function were not valid.
+	WrPerm = -61, // Write permissions error.
+	Allocate = -108, // Failed to allocate memory.
+	UserCanceled = -128, // User canceled the operation.
+	BadReq = -909, // Bad parameter or invalid state for operation.
+	InternalComponent = -2070,
+	CoreFoundationUnknown = -4960,
+	MissingEntitlement, // A required entitlement isn't present.
+	RestrictedAPI, // Client is restricted and is not permitted to perform this operation.
+	NotAvailable = -25291, // No keychain is available. You may need to restart your computer.
+	ReadOnly = -25292, // This keychain cannot be modified.
+	AuthFailed = -25293, // The user name or passphrase you entered is not correct.
+	NoSuchKeychain = -25294, // The specified keychain could not be found.
+	InvalidKeychain = -25295, // The specified keychain is not a valid keychain file.
+	DuplicateKeychain = -25296, // A keychain with the same name already exists.
+	DuplicateCallback = -25297, // The specified callback function is already installed.
+	InvalidCallback = -25298, // The specified callback function is not valid.
+	DuplicateItem = -25299, // The specified item already exists in the keychain.
+	ItemNotFound = -25300, // The specified item could not be found in the keychain.
+	BufferTooSmall = -25301, // There is not enough memory available to use the specified item.
+	DataTooLarge = -25302, // This item contains information which is too large or in a format that cannot be displayed.
+	NoSuchAttr = -25303, // The specified attribute does not exist.
+	InvalidItemRef = -25304, // The specified item is no longer valid. It may have been deleted from the keychain.
+	InvalidSearchRef = -25305, // Unable to search the current keychain.
+	NoSuchClass = -25306, // The specified item does not appear to be a valid keychain item.
+	NoDefaultKeychain = -25307, // A default keychain could not be found.
+	InteractionNotAllowed = -25308, // User interaction is not allowed.
+	ReadOnlyAttr = -25309, // The specified attribute could not be modified.
+	WrongSecVersion = -25310, // This keychain was created by a different version of the system software and cannot be opened.
+	KeySizeNotAllowed = -25311, // This item specifies a key size which is too large or too small.
+	NoStorageModule = -25312, // A required component (data storage module) could not be loaded. You may need to restart your computer.
+	NoCertificateModule = -25313, // A required component (certificate module) could not be loaded. You may need to restart your computer.
+	NoPolicyModule = -25314, // A required component (policy module) could not be loaded. You may need to restart your computer.
+	InteractionRequired = -25315, // User interaction is required, but is currently not allowed.
+	DataNotAvailable = -25316, // The contents of this item cannot be retrieved.
+	DataNotModifiable = -25317, // The contents of this item cannot be modified.
+	CreateChainFailed = -25318, // One or more certificates required to validate this certificate cannot be found.
+	InvalidPrefsDomain = -25319, // The specified preferences domain is not valid.
+	InDarkWake = -25320, // In dark wake, no UI possible
+	ACLNotSimple = -25240, // The specified access control list is not in standard (simple) form.
+	PolicyNotFound = -25241, // The specified policy cannot be found.
+	InvalidTrustSetting = -25242, // The specified trust setting is invalid.
+	NoAccessForItem = -25243, // The specified item has no access control.
+	InvalidOwnerEdit = -25244, // Invalid attempt to change the owner of this item.
+	TrustNotAvailable = -25245, // No trust results are available.
+	UnsupportedFormat = -25256, // Import/Export format unsupported.
+	UnknownFormat = -25257, // Unknown format in import.
+	KeyIsSensitive = -25258, // Key material must be wrapped for export.
+	MultiplePrivKeys = -25259, // An attempt was made to import multiple private keys.
+	PassphraseRequired = -25260, // Passphrase is required for import/export.
+	InvalidPasswordRef = -25261, // The password reference was invalid.
+	InvalidTrustSettings = -25262, // The Trust Settings Record was corrupted.
+	NoTrustSettings = -25263, // No Trust Settings were found.
+	Pkcs12VerifyFailure = -25264, // MAC verification failed during PKCS12 import (wrong password?)
+	NotSigner = -26267, // A certificate was not signed by its proposed parent.
+	Decode = -26275, // Unable to decode the provided data.
+	ServiceNotAvailable = -67585, // The required service is not available.
+	InsufficientClientID = -67586, // The client ID is not correct.
+	DeviceReset = -67587, // A device reset has occurred.
+	DeviceFailed = -67588, // A device failure has occurred.
+	AppleAddAppACLSubject = -67589, // Adding an application ACL subject failed.
+	ApplePublicKeyIncomplete = -67590, // The public key is incomplete.
+	AppleSignatureMismatch = -67591, // A signature mismatch has occurred.
+	AppleInvalidKeyStartDate = -67592, // The specified key has an invalid start date.
+	AppleInvalidKeyEndDate = -67593, // The specified key has an invalid end date.
+	ConversionError = -67594, // A conversion error has occurred.
+	AppleSSLv2Rollback = -67595, // A SSLv2 rollback error has occurred.
+	QuotaExceeded = -67596, // The quota was exceeded.
+	FileTooBig = -67597, // The file is too big.
+	InvalidDatabaseBlob = -67598, // The specified database has an invalid blob.
+	InvalidKeyBlob = -67599, // The specified database has an invalid key blob.
+	IncompatibleDatabaseBlob = -67600, // The specified database has an incompatible blob.
+	IncompatibleKeyBlob = -67601, // The specified database has an incompatible key blob.
+	HostNameMismatch = -67602, // A host name mismatch has occurred.
+	UnknownCriticalExtensionFlag = -67603, // There is an unknown critical extension flag.
+	NoBasicConstraints = -67604, // No basic constraints were found.
+	NoBasicConstraintsCA = -67605, // No basic CA constraints were found.
+	InvalidAuthorityKeyID = -67606, // The authority key ID is not valid.
+	InvalidSubjectKeyID = -67607, // The subject key ID is not valid.
+	InvalidKeyUsageForPolicy = -67608, // The key usage is not valid for the specified policy.
+	InvalidExtendedKeyUsage = -67609, // The extended key usage is not valid.
+	InvalidIDLinkage = -67610, // The ID linkage is not valid.
+	PathLengthConstraintExceeded = -67611, // The path length constraint was exceeded.
+	InvalidRoot = -67612, // The root or anchor certificate is not valid.
+	CRLExpired = -67613, // The CRL has expired.
+	CRLNotValidYet = -67614, // The CRL is not yet valid.
+	CRLNotFound = -67615, // The CRL was not found.
+	CRLServerDown = -67616, // The CRL server is down.
+	CRLBadURI = -67617, // The CRL has a bad Uniform Resource Identifier.
+	UnknownCertExtension = -67618, // An unknown certificate extension was encountered.
+	UnknownCRLExtension = -67619, // An unknown CRL extension was encountered.
+	CRLNotTrusted = -67620, // The CRL is not trusted.
+	CRLPolicyFailed = -67621, // The CRL policy failed.
+	IDPFailure = -67622, // The issuing distribution point was not valid.
+	SMIMEEmailAddressesNotFound = -67623, // An email address mismatch was encountered.
+	SMIMEBadExtendedKeyUsage = -67624, // The appropriate extended key usage for SMIME was not found.
+	SMIMEBadKeyUsage = -67625, // The key usage is not compatible with SMIME.
+	SMIMEKeyUsageNotCritical = -67626, // The key usage extension is not marked as critical.
+	SMIMENoEmailAddress = -67627, // No email address was found in the certificate.
+	SMIMESubjAltNameNotCritical = -67628, // The subject alternative name extension is not marked as critical.
+	SSLBadExtendedKeyUsage = -67629, // The appropriate extended key usage for SSL was not found.
+	OCSPBadResponse = -67630, // The OCSP response was incorrect or could not be parsed.
+	OCSPBadRequest = -67631, // The OCSP request was incorrect or could not be parsed.
+	OCSPUnavailable = -67632, // OCSP service is unavailable.
+	OCSPStatusUnrecognized = -67633, // The OCSP server did not recognize this certificate.
+	EndOfData = -67634, // An end-of-data was detected.
+	IncompleteCertRevocationCheck = -67635, // An incomplete certificate revocation check occurred.
+	NetworkFailure = -67636, // A network failure occurred.
+	OCSPNotTrustedToAnchor = -67637, // The OCSP response was not trusted to a root or anchor certificate.
+	RecordModified = -67638, // The record was modified.
+	OCSPSignatureError = -67639, // The OCSP response had an invalid signature.
+	OCSPNoSigner = -67640, // The OCSP response had no signer.
+	OCSPResponderMalformedReq = -67641, // The OCSP responder was given a malformed request.
+	OCSPResponderInternalError = -67642, // The OCSP responder encountered an internal error.
+	OCSPResponderTryLater = -67643, // The OCSP responder is busy, try again later.
+	OCSPResponderSignatureRequired = -67644, // The OCSP responder requires a signature.
+	OCSPResponderUnauthorized = -67645, // The OCSP responder rejected this request as unauthorized.
+	OCSPResponseNonceMismatch = -67646, // The OCSP response nonce did not match the request.
+	CodeSigningBadCertChainLength = -67647, // Code signing encountered an incorrect certificate chain length.
+	CodeSigningNoBasicConstraints = -67648, // Code signing found no basic constraints.
+	CodeSigningBadPathLengthConstraint = -67649, // Code signing encountered an incorrect path length constraint.
+	CodeSigningNoExtendedKeyUsage = -67650, // Code signing found no extended key usage.
+	CodeSigningDevelopment = -67651, // Code signing indicated use of a development-only certificate.
+	ResourceSignBadCertChainLength = -67652, // Resource signing has encountered an incorrect certificate chain length.
+	ResourceSignBadExtKeyUsage = -67653, // Resource signing has encountered an error in the extended key usage.
+	TrustSettingDeny = -67654, // The trust setting for this policy was set to Deny.
+	InvalidSubjectName = -67655, // An invalid certificate subject name was encountered.
+	UnknownQualifiedCertStatement = -67656, // An unknown qualified certificate statement was encountered.
+	MobileMeRequestQueued = -67657,
+	MobileMeRequestRedirected = -67658,
+	MobileMeServerError = -67659,
+	MobileMeServerNotAvailable = -67660,
+	MobileMeServerAlreadyExists = -67661,
+	MobileMeServerServiceErr = -67662,
+	MobileMeRequestAlreadyPending = -67663,
+	MobileMeNoRequestPending = -67664,
+	MobileMeCSRVerifyFailure = -67665,
+	MobileMeFailedConsistencyCheck = -67666,
+	NotInitialized = -67667, // A function was called without initializing CSSM.
+	InvalidHandleUsage = -67668, // The CSSM handle does not match with the service type.
+	PVCReferentNotFound = -67669, // A reference to the calling module was not found in the list of authorized callers.
+	FunctionIntegrityFail = -67670, // A function address was not within the verified module.
+	InternalError = -67671, // An internal error has occurred.
+	MemoryError = -67672, // A memory error has occurred.
+	InvalidData = -67673, // Invalid data was encountered.
+	MDSError = -67674, // A Module Directory Service error has occurred.
+	InvalidPointer = -67675, // An invalid pointer was encountered.
+	SelfCheckFailed = -67676, // Self-check has failed.
+	FunctionFailed = -67677, // A function has failed.
+	ModuleManifestVerifyFailed = -67678, // A module manifest verification failure has occurred.
+	InvalidGUID = -67679, // An invalid GUID was encountered.
+	InvalidHandle = -67680, // An invalid handle was encountered.
+	InvalidDBList = -67681, // An invalid DB list was encountered.
+	InvalidPassthroughID = -67682, // An invalid passthrough ID was encountered.
+	InvalidNetworkAddress = -67683, // An invalid network address was encountered.
+	CRLAlreadySigned = -67684, // The certificate revocation list is already signed.
+	InvalidNumberOfFields = -67685, // An invalid number of fields were encountered.
+	VerificationFailure = -67686, // A verification failure occurred.
+	UnknownTag = -67687, // An unknown tag was encountered.
+	InvalidSignature = -67688, // An invalid signature was encountered.
+	InvalidName = -67689, // An invalid name was encountered.
+	InvalidCertificateRef = -67690, // An invalid certificate reference was encountered.
+	InvalidCertificateGroup = -67691, // An invalid certificate group was encountered.
+	TagNotFound = -67692, // The specified tag was not found.
+	InvalidQuery = -67693, // The specified query was not valid.
+	InvalidValue = -67694, // An invalid value was detected.
+	CallbackFailed = -67695, // A callback has failed.
+	ACLDeleteFailed = -67696, // An ACL delete operation has failed.
+	ACLReplaceFailed = -67697, // An ACL replace operation has failed.
+	ACLAddFailed = -67698, // An ACL add operation has failed.
+	ACLChangeFailed = -67699, // An ACL change operation has failed.
+	InvalidAccessCredentials = -67700, // Invalid access credentials were encountered.
+	InvalidRecord = -67701, // An invalid record was encountered.
+	InvalidACL = -67702, // An invalid ACL was encountered.
+	InvalidSampleValue = -67703, // An invalid sample value was encountered.
+	IncompatibleVersion = -67704, // An incompatible version was encountered.
+	PrivilegeNotGranted = -67705, // The privilege was not granted.
+	InvalidScope = -67706, // An invalid scope was encountered.
+	PVCAlreadyConfigured = -67707, // The PVC is already configured.
+	InvalidPVC = -67708, // An invalid PVC was encountered.
+	EMMLoadFailed = -67709, // The EMM load has failed.
+	EMMUnloadFailed = -67710, // The EMM unload has failed.
+	AddinLoadFailed = -67711, // The add-in load operation has failed.
+	InvalidKeyRef = -67712, // An invalid key was encountered.
+	InvalidKeyHierarchy = -67713, // An invalid key hierarchy was encountered.
+	AddinUnloadFailed = -67714, // The add-in unload operation has failed.
+	LibraryReferenceNotFound = -67715, // A library reference was not found.
+	InvalidAddinFunctionTable = -67716, // An invalid add-in function table was encountered.
+	InvalidServiceMask = -67717, // An invalid service mask was encountered.
+	ModuleNotLoaded = -67718, // A module was not loaded.
+	InvalidSubServiceID = -67719, // An invalid subservice ID was encountered.
+	AttributeNotInContext = -67720, // An attribute was not in the context.
+	ModuleManagerInitializeFailed = -67721, // A module failed to initialize.
+	ModuleManagerNotFound = -67722, // A module was not found.
+	EventNotificationCallbackNotFound = -67723, // An event notification callback was not found.
+	InputLengthError = -67724, // An input length error was encountered.
+	OutputLengthError = -67725, // An output length error was encountered.
+	PrivilegeNotSupported = -67726, // The privilege is not supported.
+	DeviceError = -67727, // A device error was encountered.
+	AttachHandleBusy = -67728, // The CSP handle was busy.
+	NotLoggedIn = -67729, // You are not logged in.
+	AlgorithmMismatch = -67730, // An algorithm mismatch was encountered.
+	KeyUsageIncorrect = -67731, // The key usage is incorrect.
+	KeyBlobTypeIncorrect = -67732, // The key blob type is incorrect.
+	KeyHeaderInconsistent = -67733, // The key header is inconsistent.
+	UnsupportedKeyFormat = -67734, // The key header format is not supported.
+	UnsupportedKeySize = -67735, // The key size is not supported.
+	InvalidKeyUsageMask = -67736, // The key usage mask is not valid.
+	UnsupportedKeyUsageMask = -67737, // The key usage mask is not supported.
+	InvalidKeyAttributeMask = -67738, // The key attribute mask is not valid.
+	UnsupportedKeyAttributeMask = -67739, // The key attribute mask is not supported.
+	InvalidKeyLabel = -67740, // The key label is not valid.
+	UnsupportedKeyLabel = -67741, // The key label is not supported.
+	InvalidKeyFormat = -67742, // The key format is not valid.
+	UnsupportedVectorOfBuffers = -67743, // The vector of buffers is not supported.
+	InvalidInputVector = -67744, // The input vector is not valid.
+	InvalidOutputVector = -67745, // The output vector is not valid.
+	InvalidContext = -67746, // An invalid context was encountered.
+	InvalidAlgorithm = -67747, // An invalid algorithm was encountered.
+	InvalidAttributeKey = -67748, // A key attribute was not valid.
+	MissingAttributeKey = -67749, // A key attribute was missing.
+	InvalidAttributeInitVector = -67750, // An init vector attribute was not valid.
+	MissingAttributeInitVector = -67751, // An init vector attribute was missing.
+	InvalidAttributeSalt = -67752, // A salt attribute was not valid.
+	MissingAttributeSalt = -67753, // A salt attribute was missing.
+	InvalidAttributePadding = -67754, // A padding attribute was not valid.
+	MissingAttributePadding = -67755, // A padding attribute was missing.
+	InvalidAttributeRandom = -67756, // A random number attribute was not valid.
+	MissingAttributeRandom = -67757, // A random number attribute was missing.
+	InvalidAttributeSeed = -67758, // A seed attribute was not valid.
+	MissingAttributeSeed = -67759, // A seed attribute was missing.
+	InvalidAttributePassphrase = -67760, // A passphrase attribute was not valid.
+	MissingAttributePassphrase = -67761, // A passphrase attribute was missing.
+	InvalidAttributeKeyLength = -67762, // A key length attribute was not valid.
+	MissingAttributeKeyLength = -67763, // A key length attribute was missing.
+	InvalidAttributeBlockSize = -67764, // A block size attribute was not valid.
+	MissingAttributeBlockSize = -67765, // A block size attribute was missing.
+	InvalidAttributeOutputSize = -67766, // An output size attribute was not valid.
+	MissingAttributeOutputSize = -67767, // An output size attribute was missing.
+	InvalidAttributeRounds = -67768, // The number of rounds attribute was not valid.
+	MissingAttributeRounds = -67769, // The number of rounds attribute was missing.
+	InvalidAlgorithmParms = -67770, // An algorithm parameters attribute was not valid.
+	MissingAlgorithmParms = -67771, // An algorithm parameters attribute was missing.
+	InvalidAttributeLabel = -67772, // A label attribute was not valid.
+	MissingAttributeLabel = -67773, // A label attribute was missing.
+	InvalidAttributeKeyType = -67774, // A key type attribute was not valid.
+	MissingAttributeKeyType = -67775, // A key type attribute was missing.
+	InvalidAttributeMode = -67776, // A mode attribute was not valid.
+	MissingAttributeMode = -67777, // A mode attribute was missing.
+	InvalidAttributeEffectiveBits = -67778, // An effective bits attribute was not valid.
+	MissingAttributeEffectiveBits = -67779, // An effective bits attribute was missing.
+	InvalidAttributeStartDate = -67780, // A start date attribute was not valid.
+	MissingAttributeStartDate = -67781, // A start date attribute was missing.
+	InvalidAttributeEndDate = -67782, // An end date attribute was not valid.
+	MissingAttributeEndDate = -67783, // An end date attribute was missing.
+	InvalidAttributeVersion = -67784, // A version attribute was not valid.
+	MissingAttributeVersion = -67785, // A version attribute was missing.
+	InvalidAttributePrime = -67786, // A prime attribute was not valid.
+	MissingAttributePrime = -67787, // A prime attribute was missing.
+	InvalidAttributeBase = -67788, // A base attribute was not valid.
+	MissingAttributeBase = -67789, // A base attribute was missing.
+	InvalidAttributeSubprime = -67790, // A subprime attribute was not valid.
+	MissingAttributeSubprime = -67791, // A subprime attribute was missing.
+	InvalidAttributeIterationCount = -67792, // An iteration count attribute was not valid.
+	MissingAttributeIterationCount = -67793, // An iteration count attribute was missing.
+	InvalidAttributeDLDBHandle = -67794, // A database handle attribute was not valid.
+	MissingAttributeDLDBHandle = -67795, // A database handle attribute was missing.
+	InvalidAttributeAccessCredentials = -67796, // An access credentials attribute was not valid.
+	MissingAttributeAccessCredentials = -67797, // An access credentials attribute was missing.
+	InvalidAttributePublicKeyFormat = -67798, // A public key format attribute was not valid.
+	MissingAttributePublicKeyFormat = -67799, // A public key format attribute was missing.
+	InvalidAttributePrivateKeyFormat = -67800, // A private key format attribute was not valid.
+	MissingAttributePrivateKeyFormat = -67801, // A private key format attribute was missing.
+	InvalidAttributeSymmetricKeyFormat = -67802, // A symmetric key format attribute was not valid.
+	MissingAttributeSymmetricKeyFormat = -67803, // A symmetric key format attribute was missing.
+	InvalidAttributeWrappedKeyFormat = -67804, // A wrapped key format attribute was not valid.
+	MissingAttributeWrappedKeyFormat = -67805, // A wrapped key format attribute was missing.
+	StagedOperationInProgress = -67806, // A staged operation is in progress.
+	StagedOperationNotStarted = -67807, // A staged operation was not started.
+	VerifyFailed = -67808, // A cryptographic verification failure has occurred.
+	QuerySizeUnknown = -67809, // The query size is unknown.
+	BlockSizeMismatch = -67810, // A block size mismatch occurred.
+	PublicKeyInconsistent = -67811, // The public key was inconsistent.
+	DeviceVerifyFailed = -67812, // A device verification failure has occurred.
+	InvalidLoginName = -67813, // An invalid login name was detected.
+	AlreadyLoggedIn = -67814, // The user is already logged in.
+	InvalidDigestAlgorithm = -67815, // An invalid digest algorithm was detected.
+	InvalidCRLGroup = -67816, // An invalid CRL group was detected.
+	CertificateCannotOperate = -67817, // The certificate cannot operate.
+	CertificateExpired = -67818, // An expired certificate was detected.
+	CertificateNotValidYet = -67819, // The certificate is not yet valid.
+	CertificateRevoked = -67820, // The certificate was revoked.
+	CertificateSuspended = -67821, // The certificate was suspended.
+	InsufficientCredentials = -67822, // Insufficient credentials were detected.
+	InvalidAction = -67823, // The action was not valid.
+	InvalidAuthority = -67824, // The authority was not valid.
+	VerifyActionFailed = -67825, // A verify action has failed.
+	InvalidCertAuthority = -67826, // The certificate authority was not valid.
+	InvalidCRLAuthority = -67827, // The CRL authority was not valid.
+	InvalidCRLEncoding = -67828, // The CRL encoding was not valid.
+	InvalidCRLType = -67829, // The CRL type was not valid.
+	InvalidCRL = -67830, // The CRL was not valid.
+	InvalidFormType = -67831, // The form type was not valid.
+	InvalidID = -67832, // The ID was not valid.
+	InvalidIdentifier = -67833, // The identifier was not valid.
+	InvalidIndex = -67834, // The index was not valid.
+	InvalidPolicyIdentifiers = -67835, // The policy identifiers are not valid.
+	InvalidTimeString = -67836, // The time specified was not valid.
+	InvalidReason = -67837, // The trust policy reason was not valid.
+	InvalidRequestInputs = -67838, // The request inputs are not valid.
+	InvalidResponseVector = -67839, // The response vector was not valid.
+	InvalidStopOnPolicy = -67840, // The stop-on policy was not valid.
+	InvalidTuple = -67841, // The tuple was not valid.
+	MultipleValuesUnsupported = -67842, // Multiple values are not supported.
+	NotTrusted = -67843, // The certificate was not trusted.
+	NoDefaultAuthority = -67844, // No default authority was detected.
+	RejectedForm = -67845, // The trust policy had a rejected form.
+	RequestLost = -67846, // The request was lost.
+	RequestRejected = -67847, // The request was rejected.
+	UnsupportedAddressType = -67848, // The address type is not supported.
+	UnsupportedService = -67849, // The service is not supported.
+	InvalidTupleGroup = -67850, // The tuple group was not valid.
+	InvalidBaseACLs = -67851, // The base ACLs are not valid.
+	InvalidTupleCredentials = -67852, // The tuple credentials are not valid.
+	InvalidEncoding = -67853, // The encoding was not valid.
+	InvalidValidityPeriod = -67854, // The validity period was not valid.
+	InvalidRequestor = -67855, // The requestor was not valid.
+	RequestDescriptor = -67856, // The request descriptor was not valid.
+	InvalidBundleInfo = -67857, // The bundle information was not valid.
+	InvalidCRLIndex = -67858, // The CRL index was not valid.
+	NoFieldValues = -67859, // No field values were detected.
+	UnsupportedFieldFormat = -67860, // The field format is not supported.
+	UnsupportedIndexInfo = -67861, // The index information is not supported.
+	UnsupportedLocality = -67862, // The locality is not supported.
+	UnsupportedNumAttributes = -67863, // The number of attributes is not supported.
+	UnsupportedNumIndexes = -67864, // The number of indexes is not supported.
+	UnsupportedNumRecordTypes = -67865, // The number of record types is not supported.
+	FieldSpecifiedMultiple = -67866, // Too many fields were specified.
+	IncompatibleFieldFormat = -67867, // The field format was incompatible.
+	InvalidParsingModule = -67868, // The parsing module was not valid.
+	DatabaseLocked = -67869, // The database is locked.
+	DatastoreIsOpen = -67870, // The data store is open.
+	MissingValue = -67871, // A missing value was detected.
+	UnsupportedQueryLimits = -67872, // The query limits are not supported.
+	UnsupportedNumSelectionPreds = -67873, // The number of selection predicates is not supported.
+	UnsupportedOperator = -67874, // The operator is not supported.
+	InvalidDBLocation = -67875, // The database location is not valid.
+	InvalidAccessRequest = -67876, // The access request is not valid.
+	InvalidIndexInfo = -67877, // The index information is not valid.
+	InvalidNewOwner = -67878, // The new owner is not valid.
+	InvalidModifyMode = -67879, // The modify mode is not valid.
+	MissingRequiredExtension = -67880, // A required certificate extension is missing.
+	ExtendedKeyUsageNotCritical = -67881, // The extended key usage extension was not marked critical.
+	TimestampMissing = -67882, // A timestamp was expected but was not found.
+	TimestampInvalid = -67883, // The timestamp was not valid.
+	TimestampNotTrusted = -67884, // The timestamp was not trusted.
+	TimestampServiceNotAvailable = -67885, // The timestamp service is not available.
+	TimestampBadAlg = -67886, // An unrecognized or unsupported Algorithm Identifier in timestamp.
+	TimestampBadRequest = -67887, // The timestamp transaction is not permitted or supported.
+	TimestampBadDataFormat = -67888, // The timestamp data submitted has the wrong format.
+	TimestampTimeNotAvailable = -67889, // The time source for the Timestamp Authority is not available.
+	TimestampUnacceptedPolicy = -67890, // The requested policy is not supported by the Timestamp Authority.
+	TimestampUnacceptedExtension = -67891, // The requested extension is not supported by the Timestamp Authority.
+	TimestampAddInfoNotAvailable = -67892, // The additional information requested is not available.
+	TimestampSystemFailure = -67893, // The timestamp request cannot be handled due to system failure.
+	SigningTimeMissing = -67894, // A signing time was expected but was not found.
+	TimestampRejection = -67895, // A timestamp transaction was rejected.
+	TimestampWaiting = -67896, // A timestamp transaction is waiting.
+	TimestampRevocationWarning = -67897, // A timestamp authority revocation warning was issued.
+	TimestampRevocationNotification = -67898, // A timestamp authority revocation notification was issued.
+	CertificatePolicyNotAllowed = -67899, // The requested policy is not allowed for this certificate.
+	CertificateNameNotAllowed = -67900, // The requested name is not allowed for this certificate.
+	CertificateValidityPeriodTooLong = -67901, // The validity period in the certificate exceeds the maximum allowed.
+	CertificateIsCA = -67902, // The verified certificate is a CA rather than an end-entity.
+	CertificateDuplicateExtension = -67903, // The certificate contains multiple extensions with the same extension ID.
+}

+ 19 - 0
core/sys/darwin/Security/SecRandom.odin

@@ -0,0 +1,19 @@
+package Security
+
+import CF "core:sys/darwin/CoreFoundation"
+
+foreign import Security "system:Security.framework"
+
+// A reference to a random number generator.
+RandomRef :: distinct rawptr
+
+@(link_prefix="Sec", default_calling_convention="c")
+foreign Security {
+	// Default random ref for /dev/random. Synonym for nil.
+	@(link_name="kSecRandomDefault") kSecRandomDefault: RandomRef
+
+	// Generates an array of cryptographically secure random bytes.
+	RandomCopyBytes :: proc(rnd: RandomRef = kSecRandomDefault, count: uint, bytes: [^]byte) -> errSec ---
+
+	CopyErrorMessageString :: proc(status: errSec, reserved: rawptr = nil) -> CF.String ---
+}

+ 0 - 98
core/sys/darwin/core_foundation.odin

@@ -1,98 +0,0 @@
-//+build darwin
-package darwin
-
-import "base:runtime"
-
-foreign import core_foundation "system:CoreFoundation.framework"
-
-CFTypeRef   :: distinct rawptr
-
-CFStringRef :: distinct CFTypeRef
-
-CFIndex :: int
-
-CFRange :: struct {
-	location: CFIndex,
-	length:   CFIndex,
-}
-
-CFStringEncoding :: enum u32 {
-	ASCII             = 1,
-	NEXTSTEP          = 2,
-	JapaneseEUC       = 3,
-	UTF8              = 4,
-	ISOLatin1         = 5,
-	Symbol            = 6,
-	NonLossyASCII     = 7,
-	ShiftJIS          = 8,
-	ISOLatin2         = 9,
-	Unicode           = 10,
-	WindowsCP1251     = 11,
-	WindowsCP1252     = 12,
-	WindowsCP1253     = 13,
-	WindowsCP1254     = 14,
-	WindowsCP1250     = 15,
-	ISO2022JP         = 21,
-	MacOSRoman        = 30,
-
-	UTF16             = Unicode,
-
-	UTF16BigEndian    = 0x90000100,
-	UTF16LittleEndian = 0x94000100,
-
-	UTF32             = 0x8c000100,
-	UTF32BigEndian    = 0x98000100,
-	UTF32LittleEndian = 0x9c000100,
-}
-
-foreign core_foundation {
-	// Copies the character contents of a string to a local C string buffer after converting the characters to a given encoding.
-	CFStringGetCString :: proc(theString: CFStringRef, buffer: [^]byte, bufferSize: CFIndex, encoding: CFStringEncoding) -> Bool ---
-	
-	// Returns the number (in terms of UTF-16 code pairs) of Unicode characters in a string.
-	CFStringGetLength :: proc(theString: CFStringRef) -> CFIndex ---
-	
-	// Returns the maximum number of bytes a string of a specified length (in Unicode characters) will take up if encoded in a specified encoding.
-	CFStringGetMaximumSizeForEncoding :: proc(length: CFIndex, encoding: CFStringEncoding) -> CFIndex ---
-	
-	// Fetches a range of the characters from a string into a byte buffer after converting the characters to a specified encoding.
-	CFStringGetBytes :: proc(
-		thestring: CFStringRef,
-		range: CFRange,
-		encoding: CFStringEncoding,
-		lossByte: u8,
-		isExternalRepresentation: Bool,
-		buffer: [^]byte,
-		maxBufLen: CFIndex,
-		usedBufLen: ^CFIndex,
-	) -> CFIndex ---
-	
-	// Releases a Core Foundation object.
-	@(link_name="CFRelease")
-	_CFRelease :: proc(cf: CFTypeRef) ---
-}
-
-// Releases a Core Foundation object.
-CFRelease :: proc {
-	CFReleaseString,
-}
-
-// Releases a Core Foundation string.
-CFReleaseString :: #force_inline proc(theString: CFStringRef) {
-	_CFRelease(CFTypeRef(theString))
-}
-
-CFStringCopyToOdinString :: proc(theString: CFStringRef, allocator := context.allocator) -> (str: string, ok: bool) #optional_ok {
-	length := CFStringGetLength(theString)
-	max    := CFStringGetMaximumSizeForEncoding(length, .UTF8)
-
-	buf, err := make([]byte, max, allocator)
-	if err != nil { return }
-	
-	raw_str := runtime.Raw_String{
-		data = raw_data(buf),
-	}
-	CFStringGetBytes(theString, {0, length}, .UTF8, 0, false, raw_data(buf), max, &raw_str.len)
-
-	return transmute(string)raw_str, true
-}

+ 0 - 26
core/sys/darwin/security.odin

@@ -1,26 +0,0 @@
-//+build darwin
-package darwin
-
-foreign import security "system:Security.framework"
-
-// A reference to a random number generator.
-SecRandomRef :: distinct rawptr
-
-OSStatus :: distinct i32
-
-errSec :: enum OSStatus {
-	Success       = 0,  // No error.
-	Unimplemented = -4, // Function or operation not implemented.
-
-	// Many more...
-}
-
-foreign security {
-	// Synonym for nil, uses a cryptographically secure random number generator.
-	kSecRandomDefault: SecRandomRef
-	
-	// Generates an array of cryptographically secure random bytes.
-	SecRandomCopyBytes :: proc(rnd: SecRandomRef = kSecRandomDefault, count: uint, bytes: [^]byte) -> errSec ---
-
-	SecCopyErrorMessageString :: proc(status: errSec, reserved: rawptr = nil) -> CFStringRef ---
-}

+ 3 - 3
core/sys/darwin/xnu_system_call_wrappers.odin

@@ -337,7 +337,7 @@ syscall_ftruncate :: #force_inline proc "contextless" (fd: c.int, length: off_t)
 	return cast(c.int)intrinsics.syscall(unix_offset_syscall(.ftruncate), uintptr(fd), uintptr(length))
 }
 
-syscall_sysctl :: #force_inline proc "contextless" (name: ^c.int, namelen: c.uint, oldp: rawptr, oldlenp: ^i64, newp: ^i8, newlen: i64) -> c.int {
+syscall_sysctl :: #force_inline proc "contextless" (name: [^]c.int, namelen: c.size_t, oldp: rawptr, oldlenp: ^c.size_t, newp: rawptr, newlen: c.size_t) -> c.int {
 	return cast(c.int)intrinsics.syscall(unix_offset_syscall(.sysctl), uintptr(name), uintptr(namelen), uintptr(oldp), uintptr(oldlenp), uintptr(newp), uintptr(newlen))
 }
 
@@ -390,8 +390,8 @@ syscall_adjtime :: #force_inline proc "contextless" (delta: ^timeval, old_delta:
 	return cast(c.int)intrinsics.syscall(unix_offset_syscall(.adjtime), uintptr(delta), uintptr(old_delta))
 }
 
-syscall_sysctlbyname :: #force_inline proc "contextless" (name: cstring, oldp: rawptr, oldlenp: ^i64, newp: rawptr, newlen: i64) -> c.int {
-	return cast(c.int)intrinsics.syscall(unix_offset_syscall(.sysctlbyname), transmute(uintptr)name, uintptr(oldp), uintptr(oldlenp), uintptr(newp), uintptr(newlen))
+syscall_sysctlbyname :: #force_inline proc "contextless" (name: string, oldp: rawptr, oldlenp: ^c.size_t, newp: rawptr, newlen: c.size_t) -> c.int {
+	return cast(c.int)intrinsics.syscall(unix_offset_syscall(.sysctlbyname), uintptr(raw_data(name)), uintptr(len(name)), uintptr(oldp), uintptr(oldlenp), uintptr(newp), uintptr(newlen))
 }
 
 syscall_proc_info :: #force_inline proc "contextless" (num: c.int, pid: u32, flavor: c.int, arg: u64, buffer: rawptr, buffer_size: c.int) -> c.int {

+ 59 - 15
core/sys/info/cpu_arm.odin

@@ -1,26 +1,70 @@
 //+build arm32, arm64
 package sysinfo
 
-// TODO: Set up an enum with the ARM equivalent of the above.
-CPU_Feature :: enum u64 {}
+import "core:sys/unix"
 
-cpu_features: Maybe(CPU_Feature)
-cpu_name:     Maybe(string)
+_ :: unix
 
-@(init, private)
-init_cpu_features :: proc "c" () {
+CPU_Feature :: enum u64 {
+	// Advanced SIMD & floating-point capabilities:
+	asimd,         // General support for Advanced SIMD instructions/neon.
+	floatingpoint, // General support for floating-point instructions.
+	asimdhp,       // Advanced SIMD half-precision conversion instructions.
+	bf16,          // Storage and arithmetic instructions of the Brain Floating Point (BFloat16) data type.
+	fcma,          // Floating-point complex number instructions.
+	fhm,           // Floating-point half-precision multiplication instructions.
+	fp16,          // General half-precision floating-point data processing instructions.
+	frint,         // Floating-point to integral valued floating-point number rounding instructions.
+	i8mm,          // Advanced SIMD int8 matrix multiplication instructions.
+	jscvt,         // JavaScript conversion instruction.
+	rdm,           // Advanced SIMD rounding double multiply accumulate instructions.
+
+	flagm,  // Condition flag manipulation instructions.
+	flagm2, // Enhancements to condition flag manipulation instructions.
+	crc32,  // CRC32 instructions.
+
+	lse,    // Atomic instructions to support large systems.
+	lse2,   // Changes to single-copy atomicity and alignment requirements for loads and stores for large systems.
+	lrcpc,  // Load-acquire Release Consistency processor consistent (RCpc) instructions.
+	lrcpc2, // Load-acquire Release Consistency processor consistent (RCpc) instructions version 2.
+
+	aes,
+	pmull,
+	sha1,
+	sha256,
+	sha512,
+	sha3,
+
+	sb,   // Barrier instruction to control speculation.
+	ssbs, // Instructions to control speculation of loads and stores.
 }
 
+CPU_Features :: distinct bit_set[CPU_Feature; u64]
+
+cpu_features: Maybe(CPU_Features)
+cpu_name: Maybe(string)
+
 @(private)
-_cpu_name_buf: [72]u8
+cpu_name_buf: [128]byte
 
 @(init, private)
-init_cpu_name :: proc "c" () {
-	when ODIN_ARCH == .arm32 {
-		copy(_cpu_name_buf[:], "ARM")
-		cpu_name = string(_cpu_name_buf[:3])
-	} else {
-		copy(_cpu_name_buf[:], "ARM64")
-		cpu_name = string(_cpu_name_buf[:5])
+init_cpu_name :: proc "contextless" () {
+	generic := true
+
+	when ODIN_OS == .Darwin {
+		if unix.sysctlbyname("machdep.cpu.brand_string", &cpu_name_buf) {
+			cpu_name = string(cstring(rawptr(&cpu_name_buf)))
+			generic = false
+		}
 	}
-}
+
+	if generic {
+		when ODIN_ARCH == .arm64 {
+			copy(cpu_name_buf[:], "ARM64")
+			cpu_name = string(cpu_name_buf[:len("ARM64")])
+		} else {
+			copy(cpu_name_buf[:], "ARM")
+			cpu_name = string(cpu_name_buf[:len("ARM")])
+		}
+	}
+}

+ 98 - 0
core/sys/info/cpu_darwin_arm64.odin

@@ -0,0 +1,98 @@
+package sysinfo
+
+import "core:sys/unix"
+
+@(init, private)
+init_cpu_features :: proc "contextless" () {
+	@(static) features: CPU_Features
+	defer cpu_features = features
+
+	try_set :: proc "contextless" (name: string, feature: CPU_Feature) -> (ok: bool) {
+		support: b32
+		if ok = unix.sysctlbyname(name, &support); ok && support {
+			features += { feature }
+		}
+		return
+	}
+
+	// Docs from Apple: https://developer.apple.com/documentation/kernel/1387446-sysctlbyname/determining_instruction_set_characteristics
+	// Features from there that do not have (or I didn't find) an equivalent on Linux are commented out below.
+
+	// Advanced SIMD & floating-point capabilities:
+	{
+		if !try_set("hw.optional.AdvSIMD", .asimd) {
+			try_set("hw.optional.neon", .asimd)
+		}
+
+		try_set("hw.optional.floatingpoint", .floatingpoint)
+
+		if !try_set("hw.optional.AdvSIMD_HPFPCvt", .asimdhp) {
+			try_set("hw.optional.neon_hpfp", .asimdhp)
+		}
+
+		try_set("hw.optional.arm.FEAT_BF16", .bf16)
+		// try_set("hw.optional.arm.FEAT_DotProd", .dotprod)
+
+		if !try_set("hw.optional.arm.FEAT_FCMA", .fcma) {
+			try_set("hw.optional.armv8_3_compnum", .fcma)
+		}
+
+		if !try_set("hw.optional.arm.FEAT_FHM", .fhm) {
+			try_set("hw.optional.armv8_2_fhm", .fhm)
+		}
+
+		if !try_set("hw.optional.arm.FEAT_FP16", .fp16) {
+			try_set("hw.optional.neon_fp16", .fp16)
+		}
+
+		try_set("hw.optional.arm.FEAT_FRINTTS", .frint)
+		try_set("hw.optional.arm.FEAT_I8MM", .i8mm)
+		try_set("hw.optional.arm.FEAT_JSCVT", .jscvt)
+		try_set("hw.optional.arm.FEAT_RDM", .rdm)
+	}
+
+	// Integer capabilities:
+	{
+		try_set("hw.optional.arm.FEAT_FlagM", .flagm)
+		try_set("hw.optional.arm.FEAT_FlagM2", .flagm2)
+		try_set("hw.optional.armv8_crc32", .crc32)
+	}
+
+	// Atomic and memory ordering instruction capabilities:
+	{
+		try_set("hw.optional.arm.FEAT_LRCPC", .lrcpc)
+		try_set("hw.optional.arm.FEAT_LRCPC2", .lrcpc2)
+
+		if !try_set("hw.optional.arm.FEAT_LSE", .lse) {
+			try_set("hw.optional.armv8_1_atomics", .lse)
+		}
+
+		// try_set("hw.optional.arm.FEAT_LSE2", .lse2)
+	}
+
+	// Encryption capabilities:
+	{
+		try_set("hw.optional.arm.FEAT_AES", .aes)
+		try_set("hw.optional.arm.FEAT_PMULL", .pmull)
+		try_set("hw.optional.arm.FEAT_SHA1", .sha1)
+		try_set("hw.optional.arm.FEAT_SHA256", .sha256)
+
+		if !try_set("hw.optional.arm.FEAT_SHA512", .sha512) {
+			try_set("hw.optional.armv8_2_sha512", .sha512)
+		}
+
+		if !try_set("hw.optional.arm.FEAT_SHA3", .sha3) {
+			try_set("hw.optional.armv8_2_sha3", .sha3)
+		}
+	}
+
+	// General capabilities:
+	{
+		// try_set("hw.optional.arm.FEAT_BTI", .bti)
+		// try_set("hw.optional.arm.FEAT_DPB", .dpb)
+		// try_set("hw.optional.arm.FEAT_DPB2", .dpb2)
+		// try_set("hw.optional.arm.FEAT_ECV", .ecv)
+		try_set("hw.optional.arm.FEAT_SB", .sb)
+		try_set("hw.optional.arm.FEAT_SSBS", .ssbs)
+	}
+}

+ 65 - 0
core/sys/info/cpu_linux_arm.odin

@@ -0,0 +1,65 @@
+//+build arm32, arm64
+//+build linux
+package sysinfo
+
+import "core:sys/linux"
+import "core:strings"
+
+@(init, private)
+init_cpu_features :: proc() {
+	fd, err := linux.open("/proc/cpuinfo", {})
+	if err != .NONE { return }
+	defer linux.close(fd)
+
+	// This is probably enough right?
+	buf: [4096]byte
+	n, rerr := linux.read(fd, buf[:])
+	if rerr != .NONE || n == 0 { return }
+
+	features: CPU_Features
+	defer cpu_features = features
+
+	str := string(buf[:n])
+	for line in strings.split_lines_iterator(&str) {
+		key, _, value := strings.partition(line, ":")
+		key   = strings.trim_space(key)
+		value = strings.trim_space(value)
+
+		if key != "Features" { continue }
+
+		for feature in strings.split_by_byte_iterator(&value, ' ') {
+			switch feature {
+			case "asimd", "neon": features += { .asimd }
+			case "fp":            features += { .floatingpoint }
+			case "asimdhp":       features += { .asimdhp }
+			case "asimdbf16":     features += { .bf16 }
+			case "fcma":          features += { .fcma }
+			case "asimdfhm":      features += { .fhm }
+			case "fphp", "half":  features += { .fp16 }
+			case "frint":         features += { .frint }
+			case "i8mm":          features += { .i8mm }
+			case "jscvt":         features += { .jscvt }
+			case "asimdrdm":      features += { .rdm }
+
+			case "flagm":  features += { .flagm }
+			case "flagm2": features += { .flagm2 }
+			case "crc32":  features += { .crc32 }
+
+			case "atomics": features += { .lse }
+			case "lrcpc":   features += { .lrcpc }
+			case "ilrcpc":  features += { .lrcpc2 }
+
+			case "aes":    features += { .aes }
+			case "pmull":  features += { .pmull }
+			case "sha1":   features += { .sha1 }
+			case "sha2":   features += { .sha256 }
+			case "sha3":   features += { .sha3 }
+			case "sha512": features += { .sha512 }
+
+			case "sb":   features += { .sb }
+			case "ssbs": features += { .ssbs }
+			}
+		}
+		break
+	}
+}

+ 67 - 64
core/sys/info/doc.odin

@@ -1,78 +1,81 @@
 /*
-	Copyright 2022 Jeroen van Rijn <[email protected]>.
-	Made available under Odin's BSD-3 license.
+Copyright 2022 Jeroen van Rijn <[email protected]>.
+Made available under Odin's BSD-3 license.
 
-	Package `core:sys/info` gathers system information on:
-	Windows, Linux, macOS, FreeBSD & OpenBSD.
+Package `core:sys/info` gathers system information on:
+Windows, Linux, macOS, FreeBSD & OpenBSD.
 
-	Simply import the package and you'll have access to the OS version, RAM amount
-	and CPU information.
+Simply import the package and you'll have access to the OS version, RAM amount
+and CPU information.
 
-	On Windows, GPUs will also be enumerated using the registry.
+On Windows, GPUs will also be enumerated using the registry.
 
-	CPU feature flags can be tested against `cpu_features`, where applicable, e.g.
-	`if .aes in si.aes { ... }`
-*/
-//+build ignore
-package sysinfo
+CPU feature flags can be tested against `cpu_features`, where applicable, e.g.
+`if .aes in si.aes { ... }`
+
+Example:
+
+	import "core:fmt"
+	import si "core:sys/info"
 
-import "core:fmt"
-import si "core:sys/info"
+	main :: proc() {
+		fmt.printfln("Odin:  %v",    ODIN_VERSION)
+		fmt.printfln("OS:    %v",    si.os_version.as_string)
+		fmt.printfln("OS:    %#v",   si.os_version)
+		fmt.printfln("CPU:   %v",    si.cpu_name)
+		fmt.printfln("RAM:   %#.1M", si.ram.total_ram)
 
-main :: proc() {
-	fmt.printf("Odin:  %v\n",     ODIN_VERSION)
-	fmt.printf("OS:    %v\n",     si.os_version.as_string)
-	fmt.printf("OS:    %#v\n",    si.os_version)
-	fmt.printf("CPU:   %v\n",     si.cpu_name)
-	fmt.printf("RAM:   %v MiB\n", si.ram.total_ram / 1024 / 1024)
+		// fmt.printfln("Features: %v",      si.cpu_features)
+		// fmt.printfln("MacOS version: %v", si.macos_version)
 
-	fmt.println()
-	for gpu, i in si.gpus {
-		fmt.printf("GPU #%v:\n", i)
-		fmt.printf("\tVendor: %v\n",     gpu.vendor_name)
-		fmt.printf("\tModel:  %v\n",     gpu.model_name)
-		fmt.printf("\tVRAM:   %v MiB\n", gpu.total_ram / 1024 / 1024)
+		fmt.println()
+		for gpu, i in si.gpus {
+			fmt.printfln("GPU #%v:", i)
+			fmt.printfln("\tVendor: %v",    gpu.vendor_name)
+			fmt.printfln("\tModel:  %v",    gpu.model_name)
+			fmt.printfln("\tVRAM:   %#.1M", gpu.total_ram)
+		}
 	}
-}
 
-/*
-	Example Windows output:
-		Odin:  dev-2022-09
-		OS:    Windows 10 Professional (version: 20H2), build: 19042.1466
-		OS:    OS_Version{
-			platform = "Windows",
-			major = 10,
-			minor = 0,
+- Example Windows output:
+
+	Odin:  dev-2022-09
+	OS:    Windows 10 Professional (version: 20H2), build: 19042.1466
+	OS:    OS_Version{
+		platform = "Windows",
+		major = 10,
+		minor = 0,
+		patch = 0,
+		build = [
+			19042,
+			1466,
+		],
+		version = "20H2",
+		as_string = "Windows 10 Professional (version: 20H2), build: 19042.1466",
+	}
+	CPU:   AMD Ryzen 7 1800X Eight-Core Processor
+	RAM:   64.0 GiB
+	GPU #0:
+		Vendor: Advanced Micro Devices, Inc.
+		Model:  Radeon RX Vega
+		VRAM:   8.0 GiB
+
+- Example macOS output:
+
+	ODIN: dev-2022-09
+	OS:   OS_Version{
+			platform = "MacOS",
+			major = 21,
+			minor = 5,
 			patch = 0,
 			build = [
-				19042,
-				1466,
+					0,
+					0,
 			],
-			version = "20H2",
-			as_string = "Windows 10 Professional (version: 20H2), build: 19042.1466",
-		}
-		CPU:   AMD Ryzen 7 1800X Eight-Core Processor
-		RAM:   65469 MiB
-
-		GPU #0:
-			Vendor: Advanced Micro Devices, Inc.
-			Model:  Radeon RX Vega
-			VRAM:   8176 MiB
-
-	Example macOS output:
-		ODIN: dev-2022-09
-		OS:   OS_Version{
-		        platform = "MacOS",
-		        major = 21,
-		        minor = 5,
-		        patch = 0,
-		        build = [
-		                0,
-		                0,
-		        ],
-		        version = "21F79",
-		        as_string = "macOS Monterey 12.4 (build 21F79, kernel 21.5.0)",
-		}
-		CPU:  Intel(R) Core(TM) i5-7360U CPU @ 2.30GHz
-		RAM:  8192 MiB
+			version = "21F79",
+			as_string = "macOS Monterey 12.4 (build 21F79, kernel 21.5.0)",
+	}
+	CPU:  Intel(R) Core(TM) i5-7360U CPU @ 2.30GHz
+	RAM:  8.0 GiB
 */
+package sysinfo

+ 9 - 5
core/sys/info/platform_openbsd.odin → core/sys/info/platform_bsd.odin

@@ -1,4 +1,4 @@
-// +build openbsd
+//+build openbsd, netbsd
 package sysinfo
 
 import sys "core:sys/unix"
@@ -11,12 +11,16 @@ version_string_buf: [1024]u8
 
 @(init, private)
 init_os_version :: proc () {
-	os_version.platform = .OpenBSD
+	when ODIN_OS == .NetBSD {
+		os_version.platform = .NetBSD
+	} else {
+		os_version.platform = .OpenBSD
+	}
 
 	kernel_version_buf: [1024]u8
 
 	b := strings.builder_from_bytes(version_string_buf[:])
-	// Retrieve kernel info using `sysctl`, e.g. OpenBSD
+	// Retrieve kernel info using `sysctl`, e.g. OpenBSD and NetBSD
 	mib := []i32{sys.CTL_KERN, sys.KERN_OSTYPE}
 	if !sys.sysctl(mib, &kernel_version_buf) {
 		return
@@ -61,7 +65,7 @@ init_os_version :: proc () {
 	os_version.as_string = strings.to_string(b)
 }
 
-@(init)
+@(init, private)
 init_ram :: proc() {
 	// Retrieve RAM info using `sysctl`
 	mib := []i32{sys.CTL_HW, sys.HW_PHYSMEM64}
@@ -69,4 +73,4 @@ init_ram :: proc() {
 	if sys.sysctl(mib, &mem_size) {
 		ram.total_ram = int(mem_size)
 	}
-}
+}

+ 3 - 2
core/sys/info/platform_darwin.odin

@@ -1,4 +1,3 @@
-// +build darwin
 package sysinfo
 
 import sys "core:sys/unix"
@@ -76,6 +75,8 @@ init_os_version :: proc () {
 	os_version.minor = rel.darwin.y
 	os_version.patch = rel.darwin.z
 
+	macos_version = transmute(Version)rel.release.version
+
 	strings.write_string(&b, rel.os_name)
 	if match == .Exact || match == .Nearest {
 		strings.write_rune(&b, ' ')
@@ -113,7 +114,7 @@ init_os_version :: proc () {
 	os_version.as_string = strings.to_string(b)
 }
 
-@(init)
+@(init, private)
 init_ram :: proc() {
 	// Retrieve RAM info using `sysctl`
 

Unele fișiere nu au fost afișate deoarece prea multe fișiere au fost modificate în acest diff