浏览代码

Merge branch 'master' into basic_egl

Colin Davidson 1 年之前
父节点
当前提交
deb8922181
共有 100 个文件被更改,包括 2079 次插入2188 次删除
  1. 45 4
      .github/workflows/ci.yml
  2. 45 2
      .github/workflows/nightly.yml
  3. 2 1
      .gitignore
  4. 0 0
      base/builtin/builtin.odin
  5. 6 0
      base/intrinsics/intrinsics.odin
  6. 19 1
      base/runtime/core.odin
  7. 18 36
      base/runtime/core_builtin.odin
  8. 1 1
      base/runtime/core_builtin_soa.odin
  9. 1 1
      base/runtime/default_allocators_arena.odin
  10. 12 0
      base/runtime/default_allocators_general.odin
  11. 0 8
      base/runtime/default_allocators_nil.odin
  12. 0 0
      base/runtime/default_temporary_allocator.odin
  13. 3 2
      base/runtime/docs.odin
  14. 0 0
      base/runtime/dynamic_array_internal.odin
  15. 52 1
      base/runtime/dynamic_map_internal.odin
  16. 1 1
      base/runtime/entry_unix.odin
  17. 0 0
      base/runtime/entry_unix_no_crt_amd64.asm
  18. 0 0
      base/runtime/entry_unix_no_crt_darwin_arm64.asm
  19. 0 0
      base/runtime/entry_unix_no_crt_i386.asm
  20. 1 1
      base/runtime/entry_wasm.odin
  21. 1 1
      base/runtime/entry_windows.odin
  22. 0 0
      base/runtime/error_checks.odin
  23. 110 0
      base/runtime/heap_allocator.odin
  24. 15 0
      base/runtime/heap_allocator_other.odin
  25. 38 0
      base/runtime/heap_allocator_unix.odin
  26. 39 0
      base/runtime/heap_allocator_windows.odin
  27. 26 78
      base/runtime/internal.odin
  28. 7 0
      base/runtime/os_specific.odin
  29. 22 0
      base/runtime/os_specific_bsd.odin
  30. 15 0
      base/runtime/os_specific_darwin.odin
  31. 2 1
      base/runtime/os_specific_freestanding.odin
  32. 2 1
      base/runtime/os_specific_js.odin
  33. 24 0
      base/runtime/os_specific_linux.odin
  34. 2 1
      base/runtime/os_specific_wasi.odin
  35. 51 0
      base/runtime/os_specific_windows.odin
  36. 20 6
      base/runtime/print.odin
  37. 0 0
      base/runtime/procs.odin
  38. 1 1
      base/runtime/procs_darwin.odin
  39. 0 0
      base/runtime/procs_js.odin
  40. 0 0
      base/runtime/procs_wasm.odin
  41. 0 0
      base/runtime/procs_windows_amd64.asm
  42. 0 0
      base/runtime/procs_windows_amd64.odin
  43. 0 0
      base/runtime/procs_windows_i386.odin
  44. 1 1
      base/runtime/udivmod128.odin
  45. 1 1
      core/bufio/scanner.odin
  46. 1 1
      core/c/c.odin
  47. 1 1
      core/c/libc/complex.odin
  48. 1 1
      core/c/libc/math.odin
  49. 1 1
      core/c/libc/stdarg.odin
  50. 1 1
      core/c/libc/stdatomic.odin
  51. 1 1
      core/c/libc/string.odin
  52. 1 1
      core/compress/common.odin
  53. 1 1
      core/compress/shoco/shoco.odin
  54. 1 1
      core/container/bit_array/bit_array.odin
  55. 1 1
      core/container/intrusive/list/intrusive_list.odin
  56. 2 2
      core/container/lru/lru_cache.odin
  57. 1 1
      core/container/priority_queue/priority_queue.odin
  58. 2 2
      core/container/queue/queue.odin
  59. 2 2
      core/container/small_array/small_array.odin
  60. 2 2
      core/container/topological_sort/topological_sort.odin
  61. 8 70
      core/crypto/README.md
  62. 58 39
      core/crypto/_blake2/blake2.odin
  63. 68 43
      core/crypto/_sha3/sha3.odin
  64. 32 100
      core/crypto/blake2b/blake2b.odin
  65. 32 100
      core/crypto/blake2s/blake2s.odin
  66. 62 0
      core/crypto/hash/doc.odin
  67. 116 0
      core/crypto/hash/hash.odin
  68. 353 0
      core/crypto/hash/low_level.odin
  69. 162 0
      core/crypto/hmac/hmac.odin
  70. 54 336
      core/crypto/legacy/keccak/keccak.odin
  71. 50 98
      core/crypto/legacy/md5/md5.odin
  72. 52 99
      core/crypto/legacy/sha1/sha1.odin
  73. 0 4
      core/crypto/poly1305/poly1305.odin
  74. 88 425
      core/crypto/sha2/sha2.odin
  75. 56 324
      core/crypto/sha3/sha3.odin
  76. 40 178
      core/crypto/shake/shake.odin
  77. 47 98
      core/crypto/sm3/sm3.odin
  78. 20 27
      core/dynlib/lib.odin
  79. 1 1
      core/dynlib/lib_windows.odin
  80. 1 1
      core/encoding/endian/endian.odin
  81. 4 1
      core/encoding/json/marshal.odin
  82. 1 2
      core/encoding/json/unmarshal.odin
  83. 2 2
      core/encoding/xml/xml_reader.odin
  84. 116 54
      core/fmt/fmt.odin
  85. 1 1
      core/fmt/fmt_os.odin
  86. 1 1
      core/hash/crc32.odin
  87. 1 1
      core/hash/hash.odin
  88. 2 2
      core/hash/xxhash/common.odin
  89. 1 1
      core/hash/xxhash/streaming.odin
  90. 1 1
      core/hash/xxhash/xxhash_3.odin
  91. 1 1
      core/hash/xxhash/xxhash_32.odin
  92. 1 1
      core/hash/xxhash/xxhash_64.odin
  93. 1 1
      core/image/common.odin
  94. 1 1
      core/image/netpbm/netpbm.odin
  95. 1 1
      core/image/png/helpers.odin
  96. 2 2
      core/image/png/png.odin
  97. 1 1
      core/io/io.odin
  98. 1 0
      core/log/file_console_logger.odin
  99. 37 1
      core/log/log.odin
  100. 1 1
      core/log/log_allocator.odin

+ 45 - 4
.github/workflows/ci.yml

@@ -3,6 +3,7 @@ on: [push, pull_request, workflow_dispatch]
 
 jobs:
   build_linux:
+    name: Ubuntu Build, Check, and Test
     runs-on: ubuntu-latest
     steps:
       - uses: actions/checkout@v1
@@ -46,6 +47,9 @@ jobs:
       - name: Odin check examples/all for Linux i386
         run: ./odin check examples/all -vet -strict-style -target:linux_i386
         timeout-minutes: 10
+      - name: Odin check examples/all for Linux arm64
+        run: ./odin check examples/all -vet -strict-style -target:linux_arm64
+        timeout-minutes: 10
       - name: Odin check examples/all for FreeBSD amd64
         run: ./odin check examples/all -vet -strict-style -target:freebsd_amd64
         timeout-minutes: 10
@@ -53,6 +57,7 @@ jobs:
         run: ./odin check examples/all -vet -strict-style -target:openbsd_amd64
         timeout-minutes: 10
   build_macOS:
+    name: MacOS Build, Check, and Test
     runs-on: macos-latest
     steps:
       - uses: actions/checkout@v1
@@ -92,13 +97,49 @@ jobs:
           cd tests/internal
           make
         timeout-minutes: 10
-      - name: Odin check examples/all for Darwin arm64
-        run: ./odin check examples/all -vet -strict-style -target:darwin_arm64
+  build_macOS_arm:
+    name: MacOS ARM Build, Check, and Test
+    runs-on: macos-14 # This is an arm/m1 runner.
+    steps:
+      - uses: actions/checkout@v1
+      - name: Download LLVM, botan and setup PATH
+        run: |
+          brew install llvm@13 botan
+          echo "/opt/homebrew/opt/llvm@13/bin" >> $GITHUB_PATH
+          TMP_PATH=$(xcrun --show-sdk-path)/user/include
+          echo "CPATH=$TMP_PATH" >> $GITHUB_ENV
+      - name: build odin
+        run: ./build_odin.sh release
+      - name: Odin version
+        run: ./odin version
+        timeout-minutes: 1
+      - name: Odin report
+        run: ./odin report
+        timeout-minutes: 1
+      - name: Odin check
+        run: ./odin check examples/demo -vet
+        timeout-minutes: 10
+      - name: Odin run
+        run: ./odin run examples/demo
         timeout-minutes: 10
-      - name: Odin check examples/all for Linux arm64
-        run: ./odin check examples/all -vet -strict-style -target:linux_arm64
+      - name: Odin run -debug
+        run: ./odin run examples/demo -debug
+        timeout-minutes: 10
+      - name: Odin check examples/all
+        run: ./odin check examples/all -strict-style
+        timeout-minutes: 10
+      - name: Core library tests
+        run: |
+          cd tests/core
+          make
+        timeout-minutes: 10
+      - name: Odin internals tests
+        run: |
+          cd tests/internal
+          make
         timeout-minutes: 10
   build_windows:
+    name: Windows Build, Check, and Test
     runs-on: windows-2022
     steps:
       - uses: actions/checkout@v1

+ 45 - 2
.github/workflows/nightly.yml

@@ -7,6 +7,7 @@ on:
 
 jobs:
   build_windows:
+    name: Windows Build
     if: github.repository == 'odin-lang/Odin'
     runs-on: windows-2022
     steps:
@@ -29,6 +30,7 @@ jobs:
           cp LICENSE dist
           cp LLVM-C.dll dist
           cp -r shared dist
+          cp -r base dist
           cp -r core dist
           cp -r vendor dist
           cp -r bin dist
@@ -39,6 +41,7 @@ jobs:
           name: windows_artifacts
           path: dist
   build_ubuntu:
+    name: Ubuntu Build
     if: github.repository == 'odin-lang/Odin'
     runs-on: ubuntu-latest
     steps:
@@ -56,6 +59,7 @@ jobs:
           cp LICENSE dist
           cp libLLVM* dist
           cp -r shared dist
+          cp -r base dist
           cp -r core dist
           cp -r vendor dist
           cp -r examples dist
@@ -65,8 +69,9 @@ jobs:
           name: ubuntu_artifacts
           path: dist
   build_macos:
+    name: MacOS Build
     if: github.repository == 'odin-lang/Odin'
-    runs-on: macOS-latest
+    runs-on: macos-latest
     steps:
       - uses: actions/checkout@v1
       - name: Download LLVM and setup PATH
@@ -85,6 +90,7 @@ jobs:
           cp odin dist
           cp LICENSE dist
           cp -r shared dist
+          cp -r base dist
           cp -r core dist
           cp -r vendor dist
           cp -r examples dist
@@ -93,9 +99,40 @@ jobs:
         with:
           name: macos_artifacts
           path: dist
+  build_macos_arm:
+    name: MacOS ARM Build
+    if: github.repository == 'odin-lang/Odin'
+    runs-on: macos-14
+    steps:
+      - uses: actions/checkout@v1
+      - name: Download LLVM and setup PATH
+        run: |
+          brew install llvm@13
+          echo "/opt/homebrew/opt/llvm@13/bin" >> $GITHUB_PATH
+          TMP_PATH=$(xcrun --show-sdk-path)/user/include
+          echo "CPATH=$TMP_PATH" >> $GITHUB_ENV
+      - name: build odin
+        run: make nightly
+      - name: Odin run
+        run: ./odin run examples/demo
+      - name: Copy artifacts
+        run: |
+          mkdir dist
+          cp odin dist
+          cp LICENSE dist
+          cp -r shared dist
+          cp -r base dist
+          cp -r core dist
+          cp -r vendor dist
+          cp -r examples dist
+      - name: Upload artifact
+        uses: actions/upload-artifact@v1
+        with:
+          name: macos_arm_artifacts
+          path: dist
   upload_b2:
     runs-on: [ubuntu-latest]
-    needs: [build_windows, build_macos, build_ubuntu]
+    needs: [build_windows, build_macos, build_macos_arm, build_ubuntu]
     steps:
       - uses: actions/checkout@v1
       - uses: actions/setup-python@v2
@@ -126,6 +163,11 @@ jobs:
         with:
           name: macos_artifacts
 
+      - name: Download macOS arm artifacts
+        uses: actions/download-artifact@v1
+        with:
+          name: macos_arm_artifacts
+
       - name: Create archives and upload
         shell: bash
         env:
@@ -142,6 +184,7 @@ jobs:
           ./ci/upload_create_nightly.sh "$BUCKET" windows-amd64 windows_artifacts/
           ./ci/upload_create_nightly.sh "$BUCKET" ubuntu-amd64 ubuntu_artifacts/
           ./ci/upload_create_nightly.sh "$BUCKET" macos-amd64 macos_artifacts/
+          ./ci/upload_create_nightly.sh "$BUCKET" macos-arm64 macos_arm_artifacts/
 
           echo Deleting old artifacts in B2
           python3 ci/delete_old_binaries.py "$BUCKET" "$DAYS_TO_KEEP"

+ 2 - 1
.gitignore

@@ -39,7 +39,7 @@ tests/core/test_core_net
 tests/core/test_core_os_exit
 tests/core/test_core_reflect
 tests/core/test_core_strings
-tests/core/test_crypto_hash
+tests/core/test_crypto
 tests/core/test_hash
 tests/core/test_hxa
 tests/core/test_json
@@ -49,6 +49,7 @@ tests/core/test_varint
 tests/core/test_xml
 tests/core/test_core_slice
 tests/core/test_core_thread
+tests/core/test_core_runtime
 tests/vendor/vendor_botan
 # Visual Studio 2015 cache/options directory
 .vs/

+ 0 - 0
core/builtin/builtin.odin → base/builtin/builtin.odin


+ 6 - 0
core/intrinsics/intrinsics.odin → base/intrinsics/intrinsics.odin

@@ -5,6 +5,12 @@ package intrinsics
 // Package-Related
 is_package_imported :: proc(package_name: string) -> bool ---
 
+// Matrix Related Procedures
+transpose        :: proc(m: $T/matrix[$R, $C]$E)    -> matrix[C, R]E ---
+outer_product    :: proc(a: $A/[$X]$E, b: $B/[$Y]E) -> matrix[X, Y]E ---
+hadamard_product :: proc(a, b: $T/matrix[$R, $C]$E) -> T ---
+matrix_flatten   :: proc(m: $T/matrix[$R, $C]$E)    -> [R*C]E ---
+
 // Types
 soa_struct :: proc($N: int, $T: typeid) -> type/#soa[N]T
 

+ 19 - 1
core/runtime/core.odin → base/runtime/core.odin

@@ -21,7 +21,7 @@
 //+no-instrumentation
 package runtime
 
-import "core:intrinsics"
+import "base:intrinsics"
 
 // NOTE(bill): This must match the compiler's
 Calling_Convention :: enum u8 {
@@ -181,6 +181,14 @@ Type_Info_Matrix :: struct {
 Type_Info_Soa_Pointer :: struct {
 	elem: ^Type_Info,
 }
+Type_Info_Bit_Field :: struct {
+	backing_type: ^Type_Info,
+	names:        []string,
+	types:        []^Type_Info,
+	bit_sizes:    []uintptr,
+	bit_offsets:  []uintptr,
+	tags:         []string,
+}
 
 Type_Info_Flag :: enum u8 {
 	Comparable     = 0,
@@ -223,6 +231,7 @@ Type_Info :: struct {
 		Type_Info_Relative_Multi_Pointer,
 		Type_Info_Matrix,
 		Type_Info_Soa_Pointer,
+		Type_Info_Bit_Field,
 	},
 }
 
@@ -256,6 +265,7 @@ Typeid_Kind :: enum u8 {
 	Relative_Multi_Pointer,
 	Matrix,
 	Soa_Pointer,
+	Bit_Field,
 }
 #assert(len(Typeid_Kind) < 32)
 
@@ -296,6 +306,14 @@ Source_Code_Location :: struct {
 	procedure:    string,
 }
 
+/*
+	Used by the built-in directory `#load_directory(path: string) -> []Load_Directory_File`
+*/
+Load_Directory_File :: struct {
+	name: string,
+	data: []byte, // immutable data
+}
+
 Assertion_Failure_Proc :: #type proc(prefix, message: string, loc: Source_Code_Location) -> !
 
 // Allocation Stuff

+ 18 - 36
core/runtime/core_builtin.odin → base/runtime/core_builtin.odin

@@ -1,6 +1,6 @@
 package runtime
 
-import "core:intrinsics"
+import "base:intrinsics"
 
 @builtin
 Maybe :: union($T: typeid) {T}
@@ -122,7 +122,7 @@ pop :: proc(array: ^$T/[dynamic]$E, loc := #caller_location) -> (res: E) #no_bou
 // `pop_safe` trys to remove and return the end value of dynamic array `array` and reduces the length of `array` by 1.
 // If the operation is not possible, it will return false.
 @builtin
-pop_safe :: proc(array: ^$T/[dynamic]$E) -> (res: E, ok: bool) #no_bounds_check {
+pop_safe :: proc "contextless" (array: ^$T/[dynamic]$E) -> (res: E, ok: bool) #no_bounds_check {
 	if len(array) == 0 {
 		return
 	}
@@ -148,7 +148,7 @@ pop_front :: proc(array: ^$T/[dynamic]$E, loc := #caller_location) -> (res: E) #
 // `pop_front_safe` trys to return and remove the first value of dynamic array `array` and reduces the length of `array` by 1.
 // If the operation is not possible, it will return false.
 @builtin
-pop_front_safe :: proc(array: ^$T/[dynamic]$E) -> (res: E, ok: bool) #no_bounds_check {
+pop_front_safe :: proc "contextless" (array: ^$T/[dynamic]$E) -> (res: E, ok: bool) #no_bounds_check {
 	if len(array) == 0 {
 		return
 	}
@@ -172,7 +172,7 @@ reserve :: proc{reserve_dynamic_array, reserve_map}
 @builtin
 non_zero_reserve :: proc{non_zero_reserve_dynamic_array}
 
-// `resize` will try to resize memory of a passed dynamic array or map to the requested element count (setting the `len`, and possibly `cap`).
+// `resize` will try to resize memory of a passed dynamic array to the requested element count (setting the `len`, and possibly `cap`).
 @builtin
 resize :: proc{resize_dynamic_array}
 
@@ -312,6 +312,7 @@ make_dynamic_array_len :: proc($T: typeid/[dynamic]$E, #any_int len: int, alloca
 @(builtin, require_results)
 make_dynamic_array_len_cap :: proc($T: typeid/[dynamic]$E, #any_int len: int, #any_int cap: int, allocator := context.allocator, loc := #caller_location) -> (array: T, err: Allocator_Error) #optional_allocator_error {
 	make_dynamic_array_error_loc(loc, len, cap)
+	array.allocator = allocator // initialize allocator before just in case it fails to allocate any memory
 	data := mem_alloc_bytes(size_of(E)*cap, align_of(E), allocator, loc) or_return
 	s := Raw_Dynamic_Array{raw_data(data), len, cap, allocator}
 	if data == nil && size_of(E) != 0 {
@@ -823,42 +824,23 @@ map_insert :: proc(m: ^$T/map[$K]$V, key: K, value: V, loc := #caller_location)
 	return (^V)(__dynamic_map_set_without_hash((^Raw_Map)(m), map_info(T), rawptr(&key), rawptr(&value), loc))
 }
 
-
-@builtin
-incl_elem :: proc(s: ^$S/bit_set[$E; $U], elem: E) {
-	s^ |= {elem}
-}
-@builtin
-incl_elems :: proc(s: ^$S/bit_set[$E; $U], elems: ..E) {
-	for elem in elems {
-		s^ |= {elem}
-	}
-}
-@builtin
-incl_bit_set :: proc(s: ^$S/bit_set[$E; $U], other: S) {
-	s^ |= other
-}
-@builtin
-excl_elem :: proc(s: ^$S/bit_set[$E; $U], elem: E) {
-	s^ &~= {elem}
-}
-@builtin
-excl_elems :: proc(s: ^$S/bit_set[$E; $U], elems: ..E) {
-	for elem in elems {
-		s^ &~= {elem}
-	}
-}
-@builtin
-excl_bit_set :: proc(s: ^$S/bit_set[$E; $U], other: S) {
-	s^ &~= other
+// Explicitly inserts a key and value into a map `m`, the same as `map_insert`, but the return values differ.
+// - `prev_key_ptr` will return the previous pointer of a key if it exists, and `nil` otherwise.
+// - `value_ptr` will return the pointer of the memory where the insertion happens, and `nil` if the map failed to resize
+// - `found_previous` will be true if `prev_key_ptr != nil`
+@(require_results)
+map_insert_and_check_for_previous :: proc(m: ^$T/map[$K]$V, key: K, value: V, loc := #caller_location) -> (prev_key_ptr: ^K, value_ptr: ^V, found_previous: bool) {
+	key, value := key, value
+	kp, vp := __dynamic_map_set_extra_without_hash((^Raw_Map)(m), map_info(T), rawptr(&key), rawptr(&value), loc)
+	prev_key_ptr   = (^K)(kp)
+	value_ptr      = (^V)(vp)
+	found_previous = kp != nil
+	return
 }
 
-@builtin incl :: proc{incl_elem, incl_elems, incl_bit_set}
-@builtin excl :: proc{excl_elem, excl_elems, excl_bit_set}
-
 
 @builtin
-card :: proc(s: $S/bit_set[$E; $U]) -> int {
+card :: proc "contextless" (s: $S/bit_set[$E; $U]) -> int {
 	when size_of(S) == 1 {
 		return int(intrinsics.count_ones(transmute(u8)s))
 	} else when size_of(S) == 2 {

+ 1 - 1
core/runtime/core_builtin_soa.odin → base/runtime/core_builtin_soa.odin

@@ -1,6 +1,6 @@
 package runtime
 
-import "core:intrinsics"
+import "base:intrinsics"
 _ :: intrinsics
 
 /*

+ 1 - 1
core/runtime/default_allocators_arena.odin → base/runtime/default_allocators_arena.odin

@@ -1,6 +1,6 @@
 package runtime
 
-import "core:intrinsics"
+import "base:intrinsics"
 
 DEFAULT_ARENA_GROWING_MINIMUM_BLOCK_SIZE :: uint(DEFAULT_TEMP_ALLOCATOR_BACKING_SIZE)
 

+ 12 - 0
base/runtime/default_allocators_general.odin

@@ -0,0 +1,12 @@
+package runtime
+
+when ODIN_DEFAULT_TO_NIL_ALLOCATOR {
+	default_allocator_proc :: nil_allocator_proc
+	default_allocator :: nil_allocator
+} else when ODIN_DEFAULT_TO_PANIC_ALLOCATOR {
+	default_allocator_proc :: panic_allocator_proc
+	default_allocator :: panic_allocator
+} else {
+	default_allocator :: heap_allocator
+	default_allocator_proc :: heap_allocator_proc
+}

+ 0 - 8
core/runtime/default_allocators_nil.odin → base/runtime/default_allocators_nil.odin

@@ -31,14 +31,6 @@ nil_allocator :: proc() -> Allocator {
 }
 
 
-
-when ODIN_OS == .Freestanding {
-	default_allocator_proc :: nil_allocator_proc
-	default_allocator :: nil_allocator
-}
-
-
-
 panic_allocator_proc :: proc(allocator_data: rawptr, mode: Allocator_Mode,
                              size, alignment: int,
                              old_memory: rawptr, old_size: int, loc := #caller_location) -> ([]byte, Allocator_Error) {

+ 0 - 0
core/runtime/default_temporary_allocator.odin → base/runtime/default_temporary_allocator.odin


+ 3 - 2
core/runtime/docs.odin → base/runtime/docs.odin

@@ -44,7 +44,7 @@ memcpy
 memove
 
 
-## Procedures required by the LLVM backend
+## Procedures required by the LLVM backend if u128/i128 is used
 umodti3
 udivti3
 modti3
@@ -59,11 +59,12 @@ truncdfhf2
 gnu_h2f_ieee
 gnu_f2h_ieee
 extendhfsf2
+
+## Procedures required by the LLVM backend if f16 is used
 __ashlti3 // wasm specific
 __multi3  // wasm specific
 
 
-
 ## Required an entry point is defined (i.e. 'main')
 
 args__

+ 0 - 0
core/runtime/dynamic_array_internal.odin → base/runtime/dynamic_array_internal.odin


+ 52 - 1
core/runtime/dynamic_map_internal.odin → base/runtime/dynamic_map_internal.odin

@@ -1,6 +1,6 @@
 package runtime
 
-import "core:intrinsics"
+import "base:intrinsics"
 _ :: intrinsics
 
 // High performance, cache-friendly, open-addressed Robin Hood hashing hash map
@@ -841,6 +841,33 @@ __dynamic_map_get :: proc "contextless" (#no_alias m: ^Raw_Map, #no_alias info:
 	}
 }
 
+__dynamic_map_get_key_and_value :: proc "contextless" (#no_alias m: ^Raw_Map, #no_alias info: ^Map_Info, h: Map_Hash, key: rawptr) -> (key_ptr, value_ptr: rawptr) {
+	if m.len == 0 {
+		return nil, nil
+	}
+	pos := map_desired_position(m^, h)
+	distance := uintptr(0)
+	mask := (uintptr(1) << map_log2_cap(m^)) - 1
+	ks, vs, hs, _, _ := map_kvh_data_dynamic(m^, info)
+	for {
+		element_hash := hs[pos]
+		if map_hash_is_empty(element_hash) {
+			return nil, nil
+		} else if distance > map_probe_distance(m^, element_hash, pos) {
+			return nil, nil
+		} else if element_hash == h {
+			other_key := rawptr(map_cell_index_dynamic(ks, info.ks, pos))
+			if info.key_equal(key, other_key) {
+				key_ptr   = other_key
+				value_ptr = rawptr(map_cell_index_dynamic(vs, info.vs, pos))
+				return
+			}
+		}
+		pos = (pos + 1) & mask
+		distance += 1
+	}
+}
+
 // IMPORTANT: USED WITHIN THE COMPILER
 __dynamic_map_check_grow :: proc "odin" (#no_alias m: ^Raw_Map, #no_alias info: ^Map_Info, loc := #caller_location) -> (err: Allocator_Error, has_grown: bool) {
 	if m.len >= map_resize_threshold(m^) {
@@ -874,6 +901,30 @@ __dynamic_map_set :: proc "odin" (#no_alias m: ^Raw_Map, #no_alias info: ^Map_In
 	m.len += 1
 	return rawptr(result)
 }
+__dynamic_map_set_extra_without_hash :: proc "odin" (#no_alias m: ^Raw_Map, #no_alias info: ^Map_Info, key, value: rawptr, loc := #caller_location) -> (prev_key_ptr, value_ptr: rawptr) {
+	return __dynamic_map_set_extra(m, info, info.key_hasher(key, map_seed(m^)), key, value, loc)
+}
+
+__dynamic_map_set_extra :: proc "odin" (#no_alias m: ^Raw_Map, #no_alias info: ^Map_Info, hash: Map_Hash, key, value: rawptr, loc := #caller_location) -> (prev_key_ptr, value_ptr: rawptr) {
+	if prev_key_ptr, value_ptr = __dynamic_map_get_key_and_value(m, info, hash, key); value_ptr != nil {
+		intrinsics.mem_copy_non_overlapping(value_ptr, value, info.vs.size_of_type)
+		return
+	}
+
+	hash := hash
+	err, has_grown := __dynamic_map_check_grow(m, info, loc)
+	if err != nil {
+		return nil, nil
+	}
+	if has_grown {
+		hash = info.key_hasher(key, map_seed(m^))
+	}
+
+	result := map_insert_hash_dynamic(m, info, hash, uintptr(key), uintptr(value))
+	m.len += 1
+	return nil, rawptr(result)
+}
+
 
 // IMPORTANT: USED WITHIN THE COMPILER
 @(private)

+ 1 - 1
core/runtime/entry_unix.odin → base/runtime/entry_unix.odin

@@ -3,7 +3,7 @@
 //+no-instrumentation
 package runtime
 
-import "core:intrinsics"
+import "base:intrinsics"
 
 when ODIN_BUILD_MODE == .Dynamic {
 	@(link_name="_odin_entry_point", linkage="strong", require/*, link_section=".init"*/)

+ 0 - 0
core/runtime/entry_unix_no_crt_amd64.asm → base/runtime/entry_unix_no_crt_amd64.asm


+ 0 - 0
core/runtime/entry_unix_no_crt_darwin_arm64.asm → base/runtime/entry_unix_no_crt_darwin_arm64.asm


+ 0 - 0
core/runtime/entry_unix_no_crt_i386.asm → base/runtime/entry_unix_no_crt_i386.asm


+ 1 - 1
core/runtime/entry_wasm.odin → base/runtime/entry_wasm.odin

@@ -3,7 +3,7 @@
 //+no-instrumentation
 package runtime
 
-import "core:intrinsics"
+import "base:intrinsics"
 
 when !ODIN_TEST && !ODIN_NO_ENTRY_POINT {
 	@(link_name="_start", linkage="strong", require, export)

+ 1 - 1
core/runtime/entry_windows.odin → base/runtime/entry_windows.odin

@@ -3,7 +3,7 @@
 //+no-instrumentation
 package runtime
 
-import "core:intrinsics"
+import "base:intrinsics"
 
 when ODIN_BUILD_MODE == .Dynamic {
 	@(link_name="DllMain", linkage="strong", require)

+ 0 - 0
core/runtime/error_checks.odin → base/runtime/error_checks.odin


+ 110 - 0
base/runtime/heap_allocator.odin

@@ -0,0 +1,110 @@
+package runtime
+
+import "base:intrinsics"
+
+heap_allocator :: proc() -> Allocator {
+	return Allocator{
+		procedure = heap_allocator_proc,
+		data = nil,
+	}
+}
+
+heap_allocator_proc :: proc(allocator_data: rawptr, mode: Allocator_Mode,
+                            size, alignment: int,
+                            old_memory: rawptr, old_size: int, loc := #caller_location) -> ([]byte, Allocator_Error) {
+	//
+	// NOTE(tetra, 2020-01-14): The heap doesn't respect alignment.
+	// Instead, we overallocate by `alignment + size_of(rawptr) - 1`, and insert
+	// padding. We also store the original pointer returned by heap_alloc right before
+	// the pointer we return to the user.
+	//
+
+	aligned_alloc :: proc(size, alignment: int, old_ptr: rawptr = nil, zero_memory := true) -> ([]byte, Allocator_Error) {
+		a := max(alignment, align_of(rawptr))
+		space := size + a - 1
+
+		allocated_mem: rawptr
+		if old_ptr != nil {
+			original_old_ptr := ([^]rawptr)(old_ptr)[-1]
+			allocated_mem = heap_resize(original_old_ptr, space+size_of(rawptr))
+		} else {
+			allocated_mem = heap_alloc(space+size_of(rawptr), zero_memory)
+		}
+		aligned_mem := rawptr(([^]u8)(allocated_mem)[size_of(rawptr):])
+
+		ptr := uintptr(aligned_mem)
+		aligned_ptr := (ptr - 1 + uintptr(a)) & -uintptr(a)
+		diff := int(aligned_ptr - ptr)
+		if (size + diff) > space || allocated_mem == nil {
+			return nil, .Out_Of_Memory
+		}
+
+		aligned_mem = rawptr(aligned_ptr)
+		([^]rawptr)(aligned_mem)[-1] = allocated_mem
+
+		return byte_slice(aligned_mem, size), nil
+	}
+
+	aligned_free :: proc(p: rawptr) {
+		if p != nil {
+			heap_free(([^]rawptr)(p)[-1])
+		}
+	}
+
+	aligned_resize :: proc(p: rawptr, old_size: int, new_size: int, new_alignment: int, zero_memory := true) -> (new_memory: []byte, err: Allocator_Error) {
+		if p == nil {
+			return nil, nil
+		}
+
+		new_memory = aligned_alloc(new_size, new_alignment, p, zero_memory) or_return
+
+		// NOTE: heap_resize does not zero the new memory, so we do it
+		if zero_memory && new_size > old_size {
+			new_region := raw_data(new_memory[old_size:])
+			intrinsics.mem_zero(new_region, new_size - old_size)
+		}
+		return
+	}
+
+	switch mode {
+	case .Alloc, .Alloc_Non_Zeroed:
+		return aligned_alloc(size, alignment, nil, mode == .Alloc)
+
+	case .Free:
+		aligned_free(old_memory)
+
+	case .Free_All:
+		return nil, .Mode_Not_Implemented
+
+	case .Resize, .Resize_Non_Zeroed:
+		if old_memory == nil {
+			return aligned_alloc(size, alignment, nil, mode == .Resize)
+		}
+		return aligned_resize(old_memory, old_size, size, alignment, mode == .Resize)
+
+	case .Query_Features:
+		set := (^Allocator_Mode_Set)(old_memory)
+		if set != nil {
+			set^ = {.Alloc, .Alloc_Non_Zeroed, .Free, .Resize, .Resize_Non_Zeroed, .Query_Features}
+		}
+		return nil, nil
+
+	case .Query_Info:
+		return nil, .Mode_Not_Implemented
+	}
+
+	return nil, nil
+}
+
+
+heap_alloc :: proc(size: int, zero_memory := true) -> rawptr {
+	return _heap_alloc(size, zero_memory)
+}
+
+heap_resize :: proc(ptr: rawptr, new_size: int) -> rawptr {
+	return _heap_resize(ptr, new_size)
+}
+
+heap_free :: proc(ptr: rawptr) {
+	_heap_free(ptr)
+}

+ 15 - 0
base/runtime/heap_allocator_other.odin

@@ -0,0 +1,15 @@
+//+build js, wasi, freestanding, essence
+//+private
+package runtime
+
+_heap_alloc :: proc(size: int, zero_memory := true) -> rawptr {
+	unimplemented("base:runtime 'heap_alloc' procedure is not supported on this platform")
+}
+
+_heap_resize :: proc(ptr: rawptr, new_size: int) -> rawptr {
+	unimplemented("base:runtime 'heap_resize' procedure is not supported on this platform")
+}
+
+_heap_free :: proc(ptr: rawptr) {
+	unimplemented("base:runtime 'heap_free' procedure is not supported on this platform")
+}

+ 38 - 0
base/runtime/heap_allocator_unix.odin

@@ -0,0 +1,38 @@
+//+build linux, darwin, freebsd, openbsd
+//+private
+package runtime
+
+when ODIN_OS == .Darwin {
+	foreign import libc "system:System.framework"
+} else {
+	foreign import libc "system:c"
+}
+
+@(default_calling_convention="c")
+foreign libc {
+	@(link_name="malloc")   _unix_malloc   :: proc(size: int) -> rawptr ---
+	@(link_name="calloc")   _unix_calloc   :: proc(num, size: int) -> rawptr ---
+	@(link_name="free")     _unix_free     :: proc(ptr: rawptr) ---
+	@(link_name="realloc")  _unix_realloc  :: proc(ptr: rawptr, size: int) -> rawptr ---
+}
+
+_heap_alloc :: proc(size: int, zero_memory := true) -> rawptr {
+	if size <= 0 {
+		return nil
+	}
+	if zero_memory {
+		return _unix_calloc(1, size)
+	} else {
+		return _unix_malloc(size)
+	}
+}
+
+_heap_resize :: proc(ptr: rawptr, new_size: int) -> rawptr {
+	// NOTE: _unix_realloc doesn't guarantee new memory will be zeroed on
+	// POSIX platforms. Ensure your caller takes this into account.
+	return _unix_realloc(ptr, new_size)
+}
+
+_heap_free :: proc(ptr: rawptr) {
+	_unix_free(ptr)
+}

+ 39 - 0
base/runtime/heap_allocator_windows.odin

@@ -0,0 +1,39 @@
+package runtime
+
+foreign import kernel32 "system:Kernel32.lib"
+
+@(private="file")
+@(default_calling_convention="system")
+foreign kernel32 {
+	// NOTE(bill): The types are not using the standard names (e.g. DWORD and LPVOID) to just minimizing the dependency
+
+	// default_allocator
+	GetProcessHeap :: proc() -> rawptr ---
+	HeapAlloc      :: proc(hHeap: rawptr, dwFlags: u32, dwBytes: uint) -> rawptr ---
+	HeapReAlloc    :: proc(hHeap: rawptr, dwFlags: u32, lpMem: rawptr, dwBytes: uint) -> rawptr ---
+	HeapFree       :: proc(hHeap: rawptr, dwFlags: u32, lpMem: rawptr) -> b32 ---
+}
+
+_heap_alloc :: proc(size: int, zero_memory := true) -> rawptr {
+	HEAP_ZERO_MEMORY :: 0x00000008
+	return HeapAlloc(GetProcessHeap(), HEAP_ZERO_MEMORY if zero_memory else 0, uint(size))
+}
+_heap_resize :: proc(ptr: rawptr, new_size: int) -> rawptr {
+	if new_size == 0 {
+		_heap_free(ptr)
+		return nil
+	}
+	if ptr == nil {
+		return _heap_alloc(new_size)
+	}
+
+	HEAP_ZERO_MEMORY :: 0x00000008
+	return HeapReAlloc(GetProcessHeap(), HEAP_ZERO_MEMORY, ptr, uint(new_size))
+}
+_heap_free :: proc(ptr: rawptr) {
+	if ptr == nil {
+		return
+	}
+	HeapFree(GetProcessHeap(), 0, ptr)
+}
+

+ 26 - 78
core/runtime/internal.odin → base/runtime/internal.odin

@@ -1,6 +1,6 @@
 package runtime
 
-import "core:intrinsics"
+import "base:intrinsics"
 
 @(private="file")
 IS_WASM :: ODIN_ARCH == .wasm32 || ODIN_ARCH == .wasm64p32
@@ -11,7 +11,7 @@ RUNTIME_LINKAGE :: "strong" when (
 	ODIN_BUILD_MODE == .Dynamic ||
 	!ODIN_NO_CRT) &&
 	!IS_WASM) else "internal"
-RUNTIME_REQUIRE :: !ODIN_TILDE
+RUNTIME_REQUIRE :: false // !ODIN_TILDE
 
 @(private)
 __float16 :: f16 when __ODIN_LLVM_F16_SUPPORTED else u16
@@ -22,51 +22,7 @@ byte_slice :: #force_inline proc "contextless" (data: rawptr, len: int) -> []byt
 	return ([^]byte)(data)[:max(len, 0)]
 }
 
-bswap_16 :: proc "contextless" (x: u16) -> u16 {
-	return x>>8 | x<<8
-}
-
-bswap_32 :: proc "contextless" (x: u32) -> u32 {
-	return x>>24 | (x>>8)&0xff00 | (x<<8)&0xff0000 | x<<24
-}
-
-bswap_64 :: proc "contextless" (x: u64) -> u64 {
-	z := x
-	z = (z & 0x00000000ffffffff) << 32 | (z & 0xffffffff00000000) >> 32
-	z = (z & 0x0000ffff0000ffff) << 16 | (z & 0xffff0000ffff0000) >> 16
-	z = (z & 0x00ff00ff00ff00ff) << 8  | (z & 0xff00ff00ff00ff00) >> 8
-	return z
-}
-
-bswap_128 :: proc "contextless" (x: u128) -> u128 {
-	z := transmute([4]u32)x
-	z[0], z[3] = bswap_32(z[3]), bswap_32(z[0])
-	z[1], z[2] = bswap_32(z[2]), bswap_32(z[1])
-	return transmute(u128)z
-}
-
-bswap_f16 :: proc "contextless" (f: f16) -> f16 {
-	x := transmute(u16)f
-	z := bswap_16(x)
-	return transmute(f16)z
-
-}
-
-bswap_f32 :: proc "contextless" (f: f32) -> f32 {
-	x := transmute(u32)f
-	z := bswap_32(x)
-	return transmute(f32)z
-
-}
-
-bswap_f64 :: proc "contextless" (f: f64) -> f64 {
-	x := transmute(u64)f
-	z := bswap_64(x)
-	return transmute(f64)z
-}
-
-
-is_power_of_two_int :: #force_inline proc(x: int) -> bool {
+is_power_of_two_int :: #force_inline proc "contextless" (x: int) -> bool {
 	if x <= 0 {
 		return false
 	}
@@ -84,7 +40,7 @@ align_forward_int :: #force_inline proc(ptr, align: int) -> int {
 	return p
 }
 
-is_power_of_two_uintptr :: #force_inline proc(x: uintptr) -> bool {
+is_power_of_two_uintptr :: #force_inline proc "contextless" (x: uintptr) -> bool {
 	if x <= 0 {
 		return false
 	}
@@ -608,36 +564,6 @@ string_decode_last_rune :: proc "contextless" (s: string) -> (rune, int) {
 	return r, size
 }
 
-
-abs_f16 :: #force_inline proc "contextless" (x: f16) -> f16 {
-	return -x if x < 0 else x
-}
-abs_f32 :: #force_inline proc "contextless" (x: f32) -> f32 {
-	return -x if x < 0 else x
-}
-abs_f64 :: #force_inline proc "contextless" (x: f64) -> f64 {
-	return -x if x < 0 else x
-}
-
-min_f16 :: #force_inline proc "contextless" (a, b: f16) -> f16 {
-	return a if a < b else b
-}
-min_f32 :: #force_inline proc "contextless" (a, b: f32) -> f32 {
-	return a if a < b else b
-}
-min_f64 :: #force_inline proc "contextless" (a, b: f64) -> f64 {
-	return a if a < b else b
-}
-max_f16 :: #force_inline proc "contextless" (a, b: f16) -> f16 {
-	return a if a > b else b
-}
-max_f32 :: #force_inline proc "contextless" (a, b: f32) -> f32 {
-	return a if a > b else b
-}
-max_f64 :: #force_inline proc "contextless" (a, b: f64) -> f64 {
-	return a if a > b else b
-}
-
 abs_complex32 :: #force_inline proc "contextless" (x: complex32) -> f16 {
 	p, q := abs(real(x)), abs(imag(x))
 	if p < q {
@@ -1108,3 +1034,25 @@ fixdfti :: proc(a: u64) -> i128 {
 	}
 
 }
+
+
+
+__write_bits :: proc "contextless" (dst, src: [^]byte, offset: uintptr, size: uintptr) {
+	for i in 0..<size {
+		j := offset+i
+		the_bit := byte((src[i/8]) & (1<<(i&7)) != 0)
+		b := the_bit<<(j&7)
+		dst[j/8] &~= b
+		dst[j/8] |=  b
+	}
+}
+
+__read_bits :: proc "contextless" (dst, src: [^]byte, offset: uintptr, size: uintptr) {
+	for j in 0..<size {
+		i := offset+j
+		the_bit := byte((src[i/8]) & (1<<(i&7)) != 0)
+		b := the_bit<<(j&7)
+		dst[j/8] &~= b
+		dst[j/8] |=  b
+	}
+}

+ 7 - 0
base/runtime/os_specific.odin

@@ -0,0 +1,7 @@
+package runtime
+
+_OS_Errno :: distinct int
+
+stderr_write :: proc "contextless" (data: []byte) -> (int, _OS_Errno) {
+	return _stderr_write(data)
+}

+ 22 - 0
base/runtime/os_specific_bsd.odin

@@ -0,0 +1,22 @@
+//+build freebsd, openbsd
+//+private
+package runtime
+
+foreign import libc "system:c"
+
+@(default_calling_convention="c")
+foreign libc {
+	@(link_name="write")
+	_unix_write :: proc(fd: i32, buf: rawptr, size: int) -> int ---
+
+	__error :: proc() -> ^i32 ---
+}
+
+_stderr_write :: proc "contextless" (data: []byte) -> (int, _OS_Errno) {
+	ret := _unix_write(2, raw_data(data), len(data))
+	if ret < len(data) {
+		err := __error()
+		return int(ret), _OS_Errno(err^ if err != nil else 0)
+	}
+	return int(ret), 0
+}

+ 15 - 0
base/runtime/os_specific_darwin.odin

@@ -0,0 +1,15 @@
+//+build darwin
+//+private
+package runtime
+
+import "base:intrinsics"
+
+_stderr_write :: proc "contextless" (data: []byte) -> (int, _OS_Errno) {
+	WRITE  :: 0x20000004
+	STDERR :: 2
+	ret := intrinsics.syscall(WRITE, STDERR, uintptr(raw_data(data)), uintptr(len(data)))
+	if ret < 0 {
+		return 0, _OS_Errno(-ret)
+	}
+	return int(ret), 0
+}

+ 2 - 1
core/runtime/os_specific_freestanding.odin → base/runtime/os_specific_freestanding.odin

@@ -1,7 +1,8 @@
 //+build freestanding
+//+private
 package runtime
 
 // TODO(bill): reimplement `os.write`
-_os_write :: proc "contextless" (data: []byte) -> (int, _OS_Errno) {
+_stderr_write :: proc "contextless" (data: []byte) -> (int, _OS_Errno) {
 	return 0, -1
 }

+ 2 - 1
core/runtime/os_specific_js.odin → base/runtime/os_specific_js.odin

@@ -1,9 +1,10 @@
 //+build js
+//+private
 package runtime
 
 foreign import "odin_env"
 
-_os_write :: proc "contextless" (data: []byte) -> (int, _OS_Errno) {
+_stderr_write :: proc "contextless" (data: []byte) -> (int, _OS_Errno) {
 	foreign odin_env {
 		write :: proc "contextless" (fd: u32, p: []byte) ---
 	}

+ 24 - 0
base/runtime/os_specific_linux.odin

@@ -0,0 +1,24 @@
+//+private
+package runtime
+
+import "base:intrinsics"
+
+_stderr_write :: proc "contextless" (data: []byte) -> (int, _OS_Errno) {
+	when ODIN_ARCH == .amd64 {
+		SYS_write :: uintptr(1)
+	} else when ODIN_ARCH == .arm64 {
+		SYS_write :: uintptr(64)
+	} else when ODIN_ARCH == .i386 {
+		SYS_write :: uintptr(4)
+	} else when ODIN_ARCH == .arm32 {
+		SYS_write :: uintptr(4)
+	}
+
+	stderr :: 2
+
+	ret := int(intrinsics.syscall(SYS_write, uintptr(stderr), uintptr(raw_data(data)), uintptr(len(data))))
+	if ret < 0 && ret > -4096 {
+		return 0, _OS_Errno(-ret)
+	}
+	return ret, 0
+}

+ 2 - 1
core/runtime/os_specific_wasi.odin → base/runtime/os_specific_wasi.odin

@@ -1,9 +1,10 @@
 //+build wasi
+//+private
 package runtime
 
 import "core:sys/wasm/wasi"
 
-_os_write :: proc "contextless" (data: []byte) -> (int, _OS_Errno) {
+_stderr_write :: proc "contextless" (data: []byte) -> (int, _OS_Errno) {
 	data := (wasi.ciovec_t)(data)
 	n, err := wasi.fd_write(1, {data})
 	return int(n), _OS_Errno(err)

+ 51 - 0
base/runtime/os_specific_windows.odin

@@ -0,0 +1,51 @@
+//+build windows
+//+private
+package runtime
+
+foreign import kernel32 "system:Kernel32.lib"
+
+@(private="file")
+@(default_calling_convention="system")
+foreign kernel32 {
+	// NOTE(bill): The types are not using the standard names (e.g. DWORD and LPVOID) to just minimizing the dependency
+
+	// stderr_write
+	GetStdHandle         :: proc(which: u32) -> rawptr ---
+	SetHandleInformation :: proc(hObject: rawptr, dwMask: u32, dwFlags: u32) -> b32 ---
+	WriteFile            :: proc(hFile: rawptr, lpBuffer: rawptr, nNumberOfBytesToWrite: u32, lpNumberOfBytesWritten: ^u32, lpOverlapped: rawptr) -> b32 ---
+	GetLastError         :: proc() -> u32 ---
+}
+
+_stderr_write :: proc "contextless" (data: []byte) -> (n: int, err: _OS_Errno) #no_bounds_check {
+	if len(data) == 0 {
+		return 0, 0
+	}
+
+	STD_ERROR_HANDLE :: ~u32(0) -12 + 1
+	HANDLE_FLAG_INHERIT :: 0x00000001
+	MAX_RW :: 1<<30
+
+	h := GetStdHandle(STD_ERROR_HANDLE)
+	when size_of(uintptr) == 8 {
+		SetHandleInformation(h, HANDLE_FLAG_INHERIT, 0)
+	}
+
+	single_write_length: u32
+	total_write: i64
+	length := i64(len(data))
+
+	for total_write < length {
+		remaining := length - total_write
+		to_write := u32(min(i32(remaining), MAX_RW))
+
+		e := WriteFile(h, &data[total_write], to_write, &single_write_length, nil)
+		if single_write_length <= 0 || !e {
+			err = _OS_Errno(GetLastError())
+			n = int(total_write)
+			return
+		}
+		total_write += i64(single_write_length)
+	}
+	n = int(total_write)
+	return
+}

+ 20 - 6
core/runtime/print.odin → base/runtime/print.odin

@@ -123,13 +123,13 @@ encode_rune :: proc "contextless" (c: rune) -> ([4]u8, int) {
 }
 
 print_string :: proc "contextless" (str: string) -> (n: int) {
-	n, _ = os_write(transmute([]byte)str)
+	n, _ = stderr_write(transmute([]byte)str)
 	return
 }
 
 print_strings :: proc "contextless" (args: ..string) -> (n: int) {
 	for str in args {
-		m, err := os_write(transmute([]byte)str)
+		m, err := stderr_write(transmute([]byte)str)
 		n += m
 		if err != 0 {
 			break
@@ -139,7 +139,7 @@ print_strings :: proc "contextless" (args: ..string) -> (n: int) {
 }
 
 print_byte :: proc "contextless" (b: byte) -> (n: int) {
-	n, _ = os_write([]byte{b})
+	n, _ = stderr_write([]byte{b})
 	return
 }
 
@@ -178,7 +178,7 @@ print_rune :: proc "contextless" (r: rune) -> int #no_bounds_check {
 	}
 
 	b, n := encode_rune(r)
-	m, _ := os_write(b[:n])
+	m, _ := stderr_write(b[:n])
 	return m
 }
 
@@ -194,7 +194,7 @@ print_u64 :: proc "contextless" (x: u64) #no_bounds_check {
 	}
 	i -= 1; a[i] = _INTEGER_DIGITS_VAR[u % b]
 
-	os_write(a[i:])
+	stderr_write(a[i:])
 }
 
 
@@ -216,7 +216,7 @@ print_i64 :: proc "contextless" (x: i64) #no_bounds_check {
 		i -= 1; a[i] = '-'
 	}
 
-	os_write(a[i:])
+	stderr_write(a[i:])
 }
 
 print_uint    :: proc "contextless" (x: uint)    { print_u64(u64(x)) }
@@ -459,6 +459,20 @@ print_type :: proc "contextless" (ti: ^Type_Info) {
 		}
 		print_byte(']')
 
+	case Type_Info_Bit_Field:
+		print_string("bit_field ")
+		print_type(info.backing_type)
+		print_string(" {")
+		for name, i in info.names {
+			if i > 0 { print_string(", ") }
+			print_string(name)
+			print_string(": ")
+			print_type(info.types[i])
+			print_string(" | ")
+			print_u64(u64(info.bit_sizes[i]))
+		}
+		print_byte('}')
+
 
 	case Type_Info_Simd_Vector:
 		print_string("#simd[")

+ 0 - 0
core/runtime/procs.odin → base/runtime/procs.odin


+ 1 - 1
core/runtime/procs_darwin.odin → base/runtime/procs_darwin.odin

@@ -3,7 +3,7 @@ package runtime
 
 foreign import "system:Foundation.framework"
 
-import "core:intrinsics"
+import "base:intrinsics"
 
 objc_id :: ^intrinsics.objc_object
 objc_Class :: ^intrinsics.objc_class

+ 0 - 0
core/runtime/procs_js.odin → base/runtime/procs_js.odin


+ 0 - 0
core/runtime/procs_wasm.odin → base/runtime/procs_wasm.odin


+ 0 - 0
core/runtime/procs_windows_amd64.asm → base/runtime/procs_windows_amd64.asm


+ 0 - 0
core/runtime/procs_windows_amd64.odin → base/runtime/procs_windows_amd64.odin


+ 0 - 0
core/runtime/procs_windows_i386.odin → base/runtime/procs_windows_i386.odin


+ 1 - 1
core/runtime/udivmod128.odin → base/runtime/udivmod128.odin

@@ -1,6 +1,6 @@
 package runtime
 
-import "core:intrinsics"
+import "base:intrinsics"
 
 udivmod128 :: proc "c" (a, b: u128, rem: ^u128) -> u128 {
 	_ctz :: intrinsics.count_trailing_zeros

+ 1 - 1
core/bufio/scanner.odin

@@ -4,7 +4,7 @@ import "core:bytes"
 import "core:io"
 import "core:mem"
 import "core:unicode/utf8"
-import "core:intrinsics"
+import "base:intrinsics"
 
 // Extra errors returns by scanning procedures
 Scanner_Extra_Error :: enum i32 {

+ 1 - 1
core/c/c.odin

@@ -1,6 +1,6 @@
 package c
 
-import builtin "core:builtin"
+import builtin "base:builtin"
 
 char           :: builtin.u8  // assuming -funsigned-char
 

+ 1 - 1
core/c/libc/complex.odin

@@ -67,7 +67,7 @@ foreign libc {
 	crealf  :: proc(z: complex_float) -> float ---
 }
 
-import builtin "core:builtin"
+import builtin "base:builtin"
 
 complex_float  :: distinct builtin.complex64
 complex_double :: distinct builtin.complex128

+ 1 - 1
core/c/libc/math.odin

@@ -2,7 +2,7 @@ package libc
 
 // 7.12 Mathematics
 
-import "core:intrinsics"
+import "base:intrinsics"
 
 when ODIN_OS == .Windows {
 	foreign import libc "system:libucrt.lib"

+ 1 - 1
core/c/libc/stdarg.odin

@@ -2,7 +2,7 @@ package libc
 
 // 7.16 Variable arguments
 
-import "core:intrinsics"
+import "base:intrinsics"
 
 @(private="file")
 @(default_calling_convention="none")

+ 1 - 1
core/c/libc/stdatomic.odin

@@ -2,7 +2,7 @@ package libc
 
 // 7.17 Atomics
 
-import "core:intrinsics"
+import "base:intrinsics"
 
 ATOMIC_BOOL_LOCK_FREE     :: true
 ATOMIC_CHAR_LOCK_FREE     :: true

+ 1 - 1
core/c/libc/string.odin

@@ -1,6 +1,6 @@
 package libc
 
-import "core:runtime"
+import "base:runtime"
 
 // 7.24 String handling
 

+ 1 - 1
core/compress/common.odin

@@ -12,7 +12,7 @@ package compress
 
 import "core:io"
 import "core:bytes"
-import "core:runtime"
+import "base:runtime"
 
 /*
 	These settings bound how much compression algorithms will allocate for their output buffer.

+ 1 - 1
core/compress/shoco/shoco.odin

@@ -11,7 +11,7 @@
 // package shoco is an implementation of the shoco short string compressor
 package shoco
 
-import "core:intrinsics"
+import "base:intrinsics"
 import "core:compress"
 
 Shoco_Pack :: struct {

+ 1 - 1
core/container/bit_array/bit_array.odin

@@ -1,6 +1,6 @@
 package dynamic_bit_array
 
-import "core:intrinsics"
+import "base:intrinsics"
 import "core:mem"
 
 /*

+ 1 - 1
core/container/intrusive/list/intrusive_list.odin

@@ -1,6 +1,6 @@
 package container_intrusive_list
 
-import "core:intrinsics"
+import "base:intrinsics"
 
 // An intrusive doubly-linked list
 //

+ 2 - 2
core/container/lru/lru_cache.odin

@@ -1,7 +1,7 @@
 package container_lru
 
-import "core:runtime"
-import "core:intrinsics"
+import "base:runtime"
+import "base:intrinsics"
 _ :: runtime
 _ :: intrinsics
 

+ 1 - 1
core/container/priority_queue/priority_queue.odin

@@ -1,6 +1,6 @@
 package container_priority_queue
 
-import "core:builtin"
+import "base:builtin"
 
 Priority_Queue :: struct($T: typeid) {
 	queue: [dynamic]T,

+ 2 - 2
core/container/queue/queue.odin

@@ -1,7 +1,7 @@
 package container_queue
 
-import "core:builtin"
-import "core:runtime"
+import "base:builtin"
+import "base:runtime"
 _ :: runtime
 
 // Dynamically resizable double-ended queue/ring-buffer

+ 2 - 2
core/container/small_array/small_array.odin

@@ -1,7 +1,7 @@
 package container_small_array
 
-import "core:builtin"
-import "core:runtime"
+import "base:builtin"
+import "base:runtime"
 _ :: runtime
 
 Small_Array :: struct($N: int, $T: typeid) where N >= 0 {

+ 2 - 2
core/container/topological_sort/topological_sort.odin

@@ -3,8 +3,8 @@
 // map type is being used to accelerate lookups.
 package container_topological_sort
 
-import "core:intrinsics"
-import "core:runtime"
+import "base:intrinsics"
+import "base:runtime"
 _ :: intrinsics
 _ :: runtime
 

+ 8 - 70
core/crypto/README.md

@@ -1,84 +1,22 @@
 # crypto
 
-A cryptography library for the Odin language
+A cryptography library for the Odin language.
 
 ## Supported
 
-This library offers various algorithms implemented in Odin.
-Please see the chart below for some of the options.
-
-## Hashing algorithms
-
-| Algorithm                                                                                                    |                  |
-|:-------------------------------------------------------------------------------------------------------------|:-----------------|
-| [BLAKE2B](https://datatracker.ietf.org/doc/html/rfc7693)                                                     | &#10004;&#65039; |
-| [BLAKE2S](https://datatracker.ietf.org/doc/html/rfc7693)                                                     | &#10004;&#65039; |
-| [SHA-2](https://csrc.nist.gov/csrc/media/publications/fips/180/2/archive/2002-08-01/documents/fips180-2.pdf) | &#10004;&#65039; |
-| [SHA-3](https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.202.pdf)                                            | &#10004;&#65039; |
-| [SHAKE](https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.202.pdf)                                            | &#10004;&#65039; |
-| [SM3](https://datatracker.ietf.org/doc/html/draft-sca-cfrg-sm3-02)                                           | &#10004;&#65039; |
-| legacy/[Keccak](https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.202.pdf)                                    | &#10004;&#65039; |
-| legacy/[MD5](https://datatracker.ietf.org/doc/html/rfc1321)                                                  | &#10004;&#65039; |
-| legacy/[SHA-1](https://datatracker.ietf.org/doc/html/rfc3174)                                                | &#10004;&#65039; |
-
-#### High level API
-
-Each hash algorithm contains a procedure group named `hash`, or if the algorithm provides more than one digest size `hash_<size>`\*.
-Included in these groups are six procedures.
-- `hash_string` - Hash a given string and return the computed hash. Just calls `hash_bytes` internally
-- `hash_bytes` - Hash a given byte slice and return the computed hash
-- `hash_string_to_buffer` - Hash a given string and put the computed hash in the second proc parameter. Just calls `hash_bytes_to_buffer` internally
-- `hash_bytes_to_buffer` - Hash a given string and put the computed hash in the second proc parameter. The destination buffer has to be at least as big as the digest size of the hash
-- `hash_stream` - Takes a stream from io.Stream and returns the computed hash from it
-- `hash_file` - Takes a file handle and returns the computed hash from it. A second optional boolean parameter controls if the file is streamed (this is the default) or read at once (set to true)
-
-\* On some algorithms there is another part to the name, since they might offer control about additional parameters.
-For instance, `SHA-2` offers different sizes.
-Computing a 512-bit hash is therefore achieved by calling `sha2.hash_512(...)`.
-
-#### Low level API
-
-The above mentioned procedures internally call three procedures: `init`, `update` and `final`.
-You may also directly call them, if you wish.
-
-#### Example
-
-```odin
-package crypto_example
-
-// Import the desired package
-import "core:crypto/blake2b"
-
-main :: proc() {
-    input := "foo"
-
-    // Compute the hash, using the high level API
-    computed_hash := blake2b.hash(input)
-
-    // Variant that takes a destination buffer, instead of returning the computed hash
-    hash := make([]byte, sha2.DIGEST_SIZE) // @note: Destination buffer has to be at least as big as the digest size of the hash
-    blake2b.hash(input, hash[:])
-
-    // Compute the hash, using the low level API
-    ctx: blake2b.Context
-    computed_hash_low: [blake2b.DIGEST_SIZE]byte
-    blake2b.init(&ctx)
-    blake2b.update(&ctx, transmute([]byte)input)
-    blake2b.final(&ctx, computed_hash_low[:])
-}
-```
-For example uses of all available algorithms, please see the tests within `tests/core/crypto`.
+This package offers various algorithms implemented in Odin, along with
+useful helpers such as access to the system entropy source, and a
+constant-time byte comparison.
 
 ## Implementation considerations
 
 - The crypto packages are not thread-safe.
 - Best-effort is make to mitigate timing side-channels on reasonable
-  architectures. Architectures that are known to be unreasonable include
+  architectures.  Architectures that are known to be unreasonable include
   but are not limited to i386, i486, and WebAssembly.
-- Some but not all of the packages attempt to santize sensitive data,
-  however this is not done consistently through the library at the moment.
-  As Thomas Pornin puts it "In general, such memory cleansing is a fool's
-  quest."
+- The packages attempt to santize sensitive data, however this is, and
+  will remain a "best-effort" implementation decision.  As Thomas Pornin
+  puts it "In general, such memory cleansing is a fool's quest."
 - All of these packages have not received independent third party review.
 
 ## License

+ 58 - 39
core/crypto/_blake2/blake2.odin

@@ -11,6 +11,7 @@ package _blake2
 */
 
 import "core:encoding/endian"
+import "core:mem"
 
 BLAKE2S_BLOCK_SIZE :: 64
 BLAKE2S_SIZE :: 32
@@ -28,7 +29,6 @@ Blake2s_Context :: struct {
 	is_keyed:     bool,
 	size:         byte,
 	is_last_node: bool,
-	cfg:          Blake2_Config,
 
 	is_initialized: bool,
 }
@@ -44,7 +44,6 @@ Blake2b_Context :: struct {
 	is_keyed:     bool,
 	size:         byte,
 	is_last_node: bool,
-	cfg:          Blake2_Config,
 
 	is_initialized: bool,
 }
@@ -83,62 +82,61 @@ BLAKE2B_IV := [8]u64 {
 	0x1f83d9abfb41bd6b, 0x5be0cd19137e2179,
 }
 
-init :: proc(ctx: ^$T) {
+init :: proc(ctx: ^$T, cfg: ^Blake2_Config) {
 	when T == Blake2s_Context {
-		block_size :: BLAKE2S_BLOCK_SIZE
 		max_size :: BLAKE2S_SIZE
 	} else when T == Blake2b_Context {
-		block_size :: BLAKE2B_BLOCK_SIZE
 		max_size :: BLAKE2B_SIZE
 	}
 
-	if ctx.cfg.size > max_size {
+	if cfg.size > max_size {
 		panic("blake2: requested output size exceeeds algorithm max")
 	}
 
-	p := make([]byte, block_size)
-	defer delete(p)
+	// To save having to allocate a scratch buffer, use the internal
+	// data buffer (`ctx.x`), as it is exactly the correct size.
+	p := ctx.x[:]
 
-	p[0] = ctx.cfg.size
-	p[1] = byte(len(ctx.cfg.key))
+	p[0] = cfg.size
+	p[1] = byte(len(cfg.key))
 
-	if ctx.cfg.salt != nil {
+	if cfg.salt != nil {
 		when T == Blake2s_Context {
-			copy(p[16:], ctx.cfg.salt)
+			copy(p[16:], cfg.salt)
 		} else when T == Blake2b_Context {
-			copy(p[32:], ctx.cfg.salt)
+			copy(p[32:], cfg.salt)
 		}
 	}
-	if ctx.cfg.person != nil {
+	if cfg.person != nil {
 		when T == Blake2s_Context {
-			copy(p[24:], ctx.cfg.person)
+			copy(p[24:], cfg.person)
 		} else when T == Blake2b_Context {
-			copy(p[48:], ctx.cfg.person)
+			copy(p[48:], cfg.person)
 		}
 	}
 
-	if ctx.cfg.tree != nil {
-		p[2] = ctx.cfg.tree.(Blake2_Tree).fanout
-		p[3] = ctx.cfg.tree.(Blake2_Tree).max_depth
-		endian.unchecked_put_u32le(p[4:], ctx.cfg.tree.(Blake2_Tree).leaf_size)
+	if cfg.tree != nil {
+		p[2] = cfg.tree.(Blake2_Tree).fanout
+		p[3] = cfg.tree.(Blake2_Tree).max_depth
+		endian.unchecked_put_u32le(p[4:], cfg.tree.(Blake2_Tree).leaf_size)
 		when T == Blake2s_Context {
-			p[8] = byte(ctx.cfg.tree.(Blake2_Tree).node_offset)
-			p[9] = byte(ctx.cfg.tree.(Blake2_Tree).node_offset >> 8)
-			p[10] = byte(ctx.cfg.tree.(Blake2_Tree).node_offset >> 16)
-			p[11] = byte(ctx.cfg.tree.(Blake2_Tree).node_offset >> 24)
-			p[12] = byte(ctx.cfg.tree.(Blake2_Tree).node_offset >> 32)
-			p[13] = byte(ctx.cfg.tree.(Blake2_Tree).node_offset >> 40)
-			p[14] = ctx.cfg.tree.(Blake2_Tree).node_depth
-			p[15] = ctx.cfg.tree.(Blake2_Tree).inner_hash_size
+			p[8] = byte(cfg.tree.(Blake2_Tree).node_offset)
+			p[9] = byte(cfg.tree.(Blake2_Tree).node_offset >> 8)
+			p[10] = byte(cfg.tree.(Blake2_Tree).node_offset >> 16)
+			p[11] = byte(cfg.tree.(Blake2_Tree).node_offset >> 24)
+			p[12] = byte(cfg.tree.(Blake2_Tree).node_offset >> 32)
+			p[13] = byte(cfg.tree.(Blake2_Tree).node_offset >> 40)
+			p[14] = cfg.tree.(Blake2_Tree).node_depth
+			p[15] = cfg.tree.(Blake2_Tree).inner_hash_size
 		} else when T == Blake2b_Context {
-			endian.unchecked_put_u64le(p[8:], ctx.cfg.tree.(Blake2_Tree).node_offset)
-			p[16] = ctx.cfg.tree.(Blake2_Tree).node_depth
-			p[17] = ctx.cfg.tree.(Blake2_Tree).inner_hash_size
+			endian.unchecked_put_u64le(p[8:], cfg.tree.(Blake2_Tree).node_offset)
+			p[16] = cfg.tree.(Blake2_Tree).node_depth
+			p[17] = cfg.tree.(Blake2_Tree).inner_hash_size
 		}
 	} else {
 		p[2], p[3] = 1, 1
 	}
-	ctx.size = ctx.cfg.size
+	ctx.size = cfg.size
 	for i := 0; i < 8; i += 1 {
 		when T == Blake2s_Context {
 			ctx.h[i] = BLAKE2S_IV[i] ~ endian.unchecked_get_u32le(p[i * 4:])
@@ -147,11 +145,14 @@ init :: proc(ctx: ^$T) {
 			ctx.h[i] = BLAKE2B_IV[i] ~ endian.unchecked_get_u64le(p[i * 8:])
 		}
 	}
-	if ctx.cfg.tree != nil && ctx.cfg.tree.(Blake2_Tree).is_last_node {
+
+	mem.zero(&ctx.x, size_of(ctx.x)) // Done with the scratch space, no barrier.
+
+	if cfg.tree != nil && cfg.tree.(Blake2_Tree).is_last_node {
 		ctx.is_last_node = true
 	}
-	if len(ctx.cfg.key) > 0 {
-		copy(ctx.padded_key[:], ctx.cfg.key)
+	if len(cfg.key) > 0 {
+		copy(ctx.padded_key[:], cfg.key)
 		update(ctx, ctx.padded_key[:])
 		ctx.is_keyed = true
 	}
@@ -194,22 +195,40 @@ update :: proc(ctx: ^$T, p: []byte) {
 	ctx.nx += copy(ctx.x[ctx.nx:], p)
 }
 
-final :: proc(ctx: ^$T, hash: []byte) {
+final :: proc(ctx: ^$T, hash: []byte, finalize_clone: bool = false) {
 	assert(ctx.is_initialized)
 
+	ctx := ctx
+	if finalize_clone {
+		tmp_ctx: T
+		clone(&tmp_ctx, ctx)
+		ctx = &tmp_ctx
+	}
+	defer(reset(ctx))
+
 	when T == Blake2s_Context {
-		if len(hash) < int(ctx.cfg.size) {
+		if len(hash) < int(ctx.size) {
 			panic("crypto/blake2s: invalid destination digest size")
 		}
 		blake2s_final(ctx, hash)
 	} else when T == Blake2b_Context {
-		if len(hash) < int(ctx.cfg.size) {
+		if len(hash) < int(ctx.size) {
 			panic("crypto/blake2b: invalid destination digest size")
 		}
 		blake2b_final(ctx, hash)
 	}
+}
+
+clone :: proc(ctx, other: ^$T) {
+	ctx^ = other^
+}
+
+reset :: proc(ctx: ^$T) {
+	if !ctx.is_initialized {
+		return
+	}
 
-	ctx.is_initialized = false
+	mem.zero_explicit(ctx, size_of(ctx^))
 }
 
 @(private)

+ 68 - 43
core/crypto/_sha3/sha3.odin

@@ -12,10 +12,16 @@ package _sha3
 */
 
 import "core:math/bits"
+import "core:mem"
 
 ROUNDS :: 24
 
-Sha3_Context :: struct {
+RATE_224 :: 1152 / 8
+RATE_256 :: 1088 / 8
+RATE_384 :: 832 / 8
+RATE_512 :: 576 / 8
+
+Context :: struct {
 	st:        struct #raw_union {
 		b: [200]u8,
 		q: [25]u64,
@@ -103,81 +109,100 @@ keccakf :: proc "contextless" (st: ^[25]u64) {
 	}
 }
 
-init :: proc(c: ^Sha3_Context) {
+init :: proc(ctx: ^Context) {
 	for i := 0; i < 25; i += 1 {
-		c.st.q[i] = 0
+		ctx.st.q[i] = 0
 	}
-	c.rsiz = 200 - 2 * c.mdlen
-	c.pt = 0
+	ctx.rsiz = 200 - 2 * ctx.mdlen
+	ctx.pt = 0
 
-	c.is_initialized = true
-	c.is_finalized = false
+	ctx.is_initialized = true
+	ctx.is_finalized = false
 }
 
-update :: proc(c: ^Sha3_Context, data: []byte) {
-	assert(c.is_initialized)
-	assert(!c.is_finalized)
+update :: proc(ctx: ^Context, data: []byte) {
+	assert(ctx.is_initialized)
+	assert(!ctx.is_finalized)
 
-	j := c.pt
+	j := ctx.pt
 	for i := 0; i < len(data); i += 1 {
-		c.st.b[j] ~= data[i]
+		ctx.st.b[j] ~= data[i]
 		j += 1
-		if j >= c.rsiz {
-			keccakf(&c.st.q)
+		if j >= ctx.rsiz {
+			keccakf(&ctx.st.q)
 			j = 0
 		}
 	}
-	c.pt = j
+	ctx.pt = j
 }
 
-final :: proc(c: ^Sha3_Context, hash: []byte) {
-	assert(c.is_initialized)
+final :: proc(ctx: ^Context, hash: []byte, finalize_clone: bool = false) {
+	assert(ctx.is_initialized)
 
-	if len(hash) < c.mdlen {
-		if c.is_keccak {
+	if len(hash) < ctx.mdlen {
+		if ctx.is_keccak {
 			panic("crypto/keccac: invalid destination digest size")
 		}
 		panic("crypto/sha3: invalid destination digest size")
 	}
-	if c.is_keccak {
-		c.st.b[c.pt] ~= 0x01
+
+	ctx := ctx
+	if finalize_clone {
+		tmp_ctx: Context
+		clone(&tmp_ctx, ctx)
+		ctx = &tmp_ctx
+	}
+	defer(reset(ctx))
+
+	if ctx.is_keccak {
+		ctx.st.b[ctx.pt] ~= 0x01
 	} else {
-		c.st.b[c.pt] ~= 0x06
+		ctx.st.b[ctx.pt] ~= 0x06
 	}
 
-	c.st.b[c.rsiz - 1] ~= 0x80
-	keccakf(&c.st.q)
-	for i := 0; i < c.mdlen; i += 1 {
-		hash[i] = c.st.b[i]
+	ctx.st.b[ctx.rsiz - 1] ~= 0x80
+	keccakf(&ctx.st.q)
+	for i := 0; i < ctx.mdlen; i += 1 {
+		hash[i] = ctx.st.b[i]
+	}
+}
+
+clone :: proc(ctx, other: ^Context) {
+	ctx^ = other^
+}
+
+reset :: proc(ctx: ^Context) {
+	if !ctx.is_initialized {
+		return
 	}
 
-	c.is_initialized = false // No more absorb, no more squeeze.
+	mem.zero_explicit(ctx, size_of(ctx^))
 }
 
-shake_xof :: proc(c: ^Sha3_Context) {
-	assert(c.is_initialized)
-	assert(!c.is_finalized)
+shake_xof :: proc(ctx: ^Context) {
+	assert(ctx.is_initialized)
+	assert(!ctx.is_finalized)
 
-	c.st.b[c.pt] ~= 0x1F
-	c.st.b[c.rsiz - 1] ~= 0x80
-	keccakf(&c.st.q)
-	c.pt = 0
+	ctx.st.b[ctx.pt] ~= 0x1F
+	ctx.st.b[ctx.rsiz - 1] ~= 0x80
+	keccakf(&ctx.st.q)
+	ctx.pt = 0
 
-	c.is_finalized = true // No more absorb, unlimited squeeze.
+	ctx.is_finalized = true // No more absorb, unlimited squeeze.
 }
 
-shake_out :: proc(c: ^Sha3_Context, hash: []byte) {
-	assert(c.is_initialized)
-	assert(c.is_finalized)
+shake_out :: proc(ctx: ^Context, hash: []byte) {
+	assert(ctx.is_initialized)
+	assert(ctx.is_finalized)
 
-	j := c.pt
+	j := ctx.pt
 	for i := 0; i < len(hash); i += 1 {
-		if j >= c.rsiz {
-			keccakf(&c.st.q)
+		if j >= ctx.rsiz {
+			keccakf(&ctx.st.q)
 			j = 0
 		}
-		hash[i] = c.st.b[j]
+		hash[i] = ctx.st.b[j]
 		j += 1
 	}
-	c.pt = j
+	ctx.pt = j
 }

+ 32 - 100
core/crypto/blake2b/blake2b.odin

@@ -1,3 +1,10 @@
+/*
+package blake2b implements the BLAKE2b hash algorithm.
+
+See:
+- https://datatracker.ietf.org/doc/html/rfc7693
+- https://www.blake2.net
+*/
 package blake2b
 
 /*
@@ -6,122 +13,47 @@ package blake2b
 
     List of contributors:
         zhibog, dotbmp:  Initial implementation.
-
-    Interface for the BLAKE2b hashing algorithm.
-    BLAKE2b and BLAKE2s share the implementation in the _blake2 package.
 */
 
-import "core:io"
-import "core:os"
-
 import "../_blake2"
 
-/*
-    High level API
-*/
-
+// DIGEST_SIZE is the BLAKE2b digest size in bytes.
 DIGEST_SIZE :: 64
 
-// hash_string will hash the given input and return the
-// computed hash
-hash_string :: proc(data: string) -> [DIGEST_SIZE]byte {
-	return hash_bytes(transmute([]byte)(data))
-}
+// BLOCK_SIZE is the BLAKE2b block size in bytes.
+BLOCK_SIZE :: _blake2.BLAKE2B_BLOCK_SIZE
 
-// hash_bytes will hash the given input and return the
-// computed hash
-hash_bytes :: proc(data: []byte) -> [DIGEST_SIZE]byte {
-	hash: [DIGEST_SIZE]byte
-	ctx: Context
-	cfg: _blake2.Blake2_Config
-	cfg.size = _blake2.BLAKE2B_SIZE
-	ctx.cfg = cfg
-	init(&ctx)
-	update(&ctx, data)
-	final(&ctx, hash[:])
-	return hash
-}
-
-// hash_string_to_buffer will hash the given input and assign the
-// computed hash to the second parameter.
-// It requires that the destination buffer is at least as big as the digest size
-hash_string_to_buffer :: proc(data: string, hash: []byte) {
-	hash_bytes_to_buffer(transmute([]byte)(data), hash)
-}
-
-// hash_bytes_to_buffer will hash the given input and write the
-// computed hash into the second parameter.
-// It requires that the destination buffer is at least as big as the digest size
-hash_bytes_to_buffer :: proc(data, hash: []byte) {
-	ctx: Context
-	cfg: _blake2.Blake2_Config
-	cfg.size = _blake2.BLAKE2B_SIZE
-	ctx.cfg = cfg
-	init(&ctx)
-	update(&ctx, data)
-	final(&ctx, hash)
-}
+// Context is a BLAKE2b instance.
+Context :: _blake2.Blake2b_Context
 
-// hash_stream will read the stream in chunks and compute a
-// hash from its contents
-hash_stream :: proc(s: io.Stream) -> ([DIGEST_SIZE]byte, bool) {
-	hash: [DIGEST_SIZE]byte
-	ctx: Context
+// init initializes a Context with the default BLAKE2b config.
+init :: proc(ctx: ^Context) {
 	cfg: _blake2.Blake2_Config
 	cfg.size = _blake2.BLAKE2B_SIZE
-	ctx.cfg = cfg
-	init(&ctx)
-
-	buf := make([]byte, 512)
-	defer delete(buf)
-
-	read := 1
-	for read > 0 {
-		read, _ = io.read(s, buf)
-		if read > 0 {
-			update(&ctx, buf[:read])
-		}
-	}
-	final(&ctx, hash[:])
-	return hash, true
+	_blake2.init(ctx, &cfg)
 }
 
-// hash_file will read the file provided by the given handle
-// and compute a hash
-hash_file :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE]byte, bool) {
-	if !load_at_once {
-		return hash_stream(os.stream_from_handle(hd))
-	} else {
-		if buf, ok := os.read_entire_file(hd); ok {
-			return hash_bytes(buf[:]), ok
-		}
-	}
-	return [DIGEST_SIZE]byte{}, false
+// update adds more data to the Context.
+update :: proc(ctx: ^Context, data: []byte) {
+	_blake2.update(ctx, data)
 }
 
-hash :: proc {
-	hash_stream,
-	hash_file,
-	hash_bytes,
-	hash_string,
-	hash_bytes_to_buffer,
-	hash_string_to_buffer,
+// final finalizes the Context, writes the digest to hash, and calls
+// reset on the Context.
+//
+// Iff finalize_clone is set, final will work on a copy of the Context,
+// which is useful for for calculating rolling digests.
+final :: proc(ctx: ^Context, hash: []byte, finalize_clone: bool = false) {
+	_blake2.final(ctx, hash, finalize_clone)
 }
 
-/*
-    Low level API
-*/
-
-Context :: _blake2.Blake2b_Context
-
-init :: proc(ctx: ^Context) {
-	_blake2.init(ctx)
-}
-
-update :: proc(ctx: ^Context, data: []byte) {
-	_blake2.update(ctx, data)
+// clone clones the Context other into ctx.
+clone :: proc(ctx, other: ^Context) {
+	_blake2.clone(ctx, other)
 }
 
-final :: proc(ctx: ^Context, hash: []byte) {
-	_blake2.final(ctx, hash)
+// reset sanitizes the Context.  The Context must be re-initialized to
+// be used again.
+reset :: proc(ctx: ^Context) {
+	_blake2.reset(ctx)
 }

+ 32 - 100
core/crypto/blake2s/blake2s.odin

@@ -1,3 +1,10 @@
+/*
+package blake2s implements the BLAKE2s hash algorithm.
+
+See:
+- https://datatracker.ietf.org/doc/html/rfc7693
+- https://www.blake2.net/
+*/
 package blake2s
 
 /*
@@ -6,122 +13,47 @@ package blake2s
 
     List of contributors:
         zhibog, dotbmp:  Initial implementation.
-
-    Interface for the BLAKE2s hashing algorithm.
-    BLAKE2s and BLAKE2b share the implementation in the _blake2 package.
 */
 
-import "core:io"
-import "core:os"
-
 import "../_blake2"
 
-/*
-    High level API
-*/
-
+// DIGEST_SIZE is the BLAKE2s digest size in bytes.
 DIGEST_SIZE :: 32
 
-// hash_string will hash the given input and return the
-// computed hash
-hash_string :: proc(data: string) -> [DIGEST_SIZE]byte {
-	return hash_bytes(transmute([]byte)(data))
-}
+// BLOCK_SIZE is the BLAKE2s block size in bytes.
+BLOCK_SIZE :: _blake2.BLAKE2S_BLOCK_SIZE
 
-// hash_bytes will hash the given input and return the
-// computed hash
-hash_bytes :: proc(data: []byte) -> [DIGEST_SIZE]byte {
-	hash: [DIGEST_SIZE]byte
-	ctx: Context
-	cfg: _blake2.Blake2_Config
-	cfg.size = _blake2.BLAKE2S_SIZE
-	ctx.cfg = cfg
-	init(&ctx)
-	update(&ctx, data)
-	final(&ctx, hash[:])
-	return hash
-}
-
-// hash_string_to_buffer will hash the given input and assign the
-// computed hash to the second parameter.
-// It requires that the destination buffer is at least as big as the digest size
-hash_string_to_buffer :: proc(data: string, hash: []byte) {
-	hash_bytes_to_buffer(transmute([]byte)(data), hash)
-}
-
-// hash_bytes_to_buffer will hash the given input and write the
-// computed hash into the second parameter.
-// It requires that the destination buffer is at least as big as the digest size
-hash_bytes_to_buffer :: proc(data, hash: []byte) {
-	ctx: Context
-	cfg: _blake2.Blake2_Config
-	cfg.size = _blake2.BLAKE2S_SIZE
-	ctx.cfg = cfg
-	init(&ctx)
-	update(&ctx, data)
-	final(&ctx, hash)
-}
+// Context is a BLAKE2s instance.
+Context :: _blake2.Blake2s_Context
 
-// hash_stream will read the stream in chunks and compute a
-// hash from its contents
-hash_stream :: proc(s: io.Stream) -> ([DIGEST_SIZE]byte, bool) {
-	hash: [DIGEST_SIZE]byte
-	ctx: Context
+// init initializes a Context with the default BLAKE2s config.
+init :: proc(ctx: ^Context) {
 	cfg: _blake2.Blake2_Config
 	cfg.size = _blake2.BLAKE2S_SIZE
-	ctx.cfg = cfg
-	init(&ctx)
-
-	buf := make([]byte, 512)
-	defer delete(buf)
-
-	read := 1
-	for read > 0 {
-		read, _ = io.read(s, buf)
-		if read > 0 {
-			update(&ctx, buf[:read])
-		}
-	}
-	final(&ctx, hash[:])
-	return hash, true
+	_blake2.init(ctx, &cfg)
 }
 
-// hash_file will read the file provided by the given handle
-// and compute a hash
-hash_file :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE]byte, bool) {
-	if !load_at_once {
-		return hash_stream(os.stream_from_handle(hd))
-	} else {
-		if buf, ok := os.read_entire_file(hd); ok {
-			return hash_bytes(buf[:]), ok
-		}
-	}
-	return [DIGEST_SIZE]byte{}, false
+// update adds more data to the Context.
+update :: proc(ctx: ^Context, data: []byte) {
+	_blake2.update(ctx, data)
 }
 
-hash :: proc {
-	hash_stream,
-	hash_file,
-	hash_bytes,
-	hash_string,
-	hash_bytes_to_buffer,
-	hash_string_to_buffer,
+// final finalizes the Context, writes the digest to hash, and calls
+// reset on the Context.
+//
+// Iff finalize_clone is set, final will work on a copy of the Context,
+// which is useful for for calculating rolling digests.
+final :: proc(ctx: ^Context, hash: []byte, finalize_clone: bool = false) {
+	_blake2.final(ctx, hash, finalize_clone)
 }
 
-/*
-    Low level API
-*/
-
-Context :: _blake2.Blake2s_Context
-
-init :: proc(ctx: ^Context) {
-	_blake2.init(ctx)
-}
-
-update :: proc(ctx: ^Context, data: []byte) {
-	_blake2.update(ctx, data)
+// clone clones the Context other into ctx.
+clone :: proc(ctx, other: ^Context) {
+	_blake2.clone(ctx, other)
 }
 
-final :: proc(ctx: ^Context, hash: []byte) {
-	_blake2.final(ctx, hash)
+// reset sanitizes the Context.  The Context must be re-initialized to
+// be used again.
+reset :: proc(ctx: ^Context) {
+	_blake2.reset(ctx)
 }

+ 62 - 0
core/crypto/hash/doc.odin

@@ -0,0 +1,62 @@
+/*
+package hash provides a generic interface to the supported hash algorithms.
+
+A high-level convenience procedure group `hash` is provided to easily
+accomplish common tasks.
+- `hash_string` - Hash a given string and return the digest.
+- `hash_bytes` - Hash a given byte slice and return the digest.
+- `hash_string_to_buffer` - Hash a given string and put the digest in
+  the third parameter.  It requires that the destination buffer
+  is at least as big as the digest size.
+- `hash_bytes_to_buffer` - Hash a given string and put the computed
+  digest in the third parameter.  It requires that the destination
+  buffer is at least as big as the digest size.
+- `hash_stream` - Incrementally fully consume a `io.Stream`, and return
+  the computed digest.
+- `hash_file` - Takes a file handle and returns the computed digest.
+  A third optional boolean parameter controls if the file is streamed
+  (default), or or read at once.
+
+```odin
+package hash_example
+
+import "core:crypto/hash"
+
+main :: proc() {
+	input := "Feed the fire."
+
+	// Compute the digest, using the high level API.
+	returned_digest := hash.hash(hash.Algorithm.SHA512_256, input)
+	defer delete(returned_digest)
+
+	// Variant that takes a destination buffer, instead of returning
+	// the digest.
+	digest := make([]byte, hash.DIGEST_SIZES[hash.Algorithm.BLAKE2B]) // @note: Destination buffer has to be at least as big as the digest size of the hash.
+	defer delete(digest)
+	hash.hash(hash.Algorithm.BLAKE2B, input, digest)
+}
+```
+
+A generic low level API is provided supporting the init/update/final interface
+that is typical with cryptographic hash function implementations.
+
+```odin
+package hash_example
+
+import "core:crypto/hash"
+
+main :: proc() {
+    input := "Let the cinders burn."
+
+    // Compute the digest, using the low level API.
+    ctx: hash.Context
+    digest := make([]byte, hash.DIGEST_SIZES[hash.Algorithm.SHA3_512])
+    defer delete(digest)
+
+    hash.init(&ctx, hash.Algorithm.SHA3_512)
+    hash.update(&ctx, transmute([]byte)input)
+    hash.final(&ctx, digest)
+}
+```
+*/
+package crypto_hash

+ 116 - 0
core/crypto/hash/hash.odin

@@ -0,0 +1,116 @@
+package crypto_hash
+
+/*
+    Copyright 2021 zhibog
+    Made available under the BSD-3 license.
+
+    List of contributors:
+        zhibog, dotbmp:  Initial implementation.
+*/
+
+import "core:io"
+import "core:mem"
+import "core:os"
+
+// hash_bytes will hash the given input and return the computed digest
+// in a newly allocated slice.
+hash_string :: proc(algorithm: Algorithm, data: string, allocator := context.allocator) -> []byte {
+	return hash_bytes(algorithm, transmute([]byte)(data), allocator)
+}
+
+// hash_bytes will hash the given input and return the computed digest
+// in a newly allocated slice.
+hash_bytes :: proc(algorithm: Algorithm, data: []byte, allocator := context.allocator) -> []byte {
+	dst := make([]byte, DIGEST_SIZES[algorithm], allocator)
+	hash_bytes_to_buffer(algorithm, data, dst)
+	return dst
+}
+
+// hash_string_to_buffer will hash the given input and assign the
+// computed digest to the third parameter.  It requires that the
+// destination buffer is at least as big as the digest size.
+hash_string_to_buffer :: proc(algorithm: Algorithm, data: string, hash: []byte) {
+	hash_bytes_to_buffer(algorithm, transmute([]byte)(data), hash)
+}
+
+// hash_bytes_to_buffer will hash the given input and write the
+// computed digest into the third parameter.  It requires that the
+// destination buffer is at least as big as the digest size.
+hash_bytes_to_buffer :: proc(algorithm: Algorithm, data, hash: []byte) {
+	ctx: Context
+
+	init(&ctx, algorithm)
+	update(&ctx, data)
+	final(&ctx, hash)
+}
+
+// hash_stream will incrementally fully consume a stream, and return the
+// computed digest in a newly allocated slice.
+hash_stream :: proc(
+	algorithm: Algorithm,
+	s: io.Stream,
+	allocator := context.allocator,
+) -> (
+	[]byte,
+	io.Error,
+) {
+	ctx: Context
+
+	buf: [MAX_BLOCK_SIZE * 4]byte
+	defer mem.zero_explicit(&buf, size_of(buf))
+
+	init(&ctx, algorithm)
+
+	loop: for {
+		n, err := io.read(s, buf[:])
+		if n > 0 {
+			// XXX/yawning: Can io.read return n > 0 and EOF?
+			update(&ctx, buf[:n])
+		}
+		#partial switch err {
+		case .None:
+		case .EOF:
+			break loop
+		case:
+			return nil, err
+		}
+	}
+
+	dst := make([]byte, DIGEST_SIZES[algorithm], allocator)
+	final(&ctx, dst)
+
+	return dst, io.Error.None
+}
+
+// hash_file will read the file provided by the given handle and return the
+// computed digest in a newly allocated slice.
+hash_file :: proc(
+	algorithm: Algorithm,
+	hd: os.Handle,
+	load_at_once := false,
+	allocator := context.allocator,
+) -> (
+	[]byte,
+	io.Error,
+) {
+	if !load_at_once {
+		return hash_stream(algorithm, os.stream_from_handle(hd), allocator)
+	}
+
+	buf, ok := os.read_entire_file(hd, allocator)
+	if !ok {
+		return nil, io.Error.Unknown
+	}
+	defer delete(buf, allocator)
+
+	return hash_bytes(algorithm, buf, allocator), io.Error.None
+}
+
+hash :: proc {
+	hash_stream,
+	hash_file,
+	hash_bytes,
+	hash_string,
+	hash_bytes_to_buffer,
+	hash_string_to_buffer,
+}

+ 353 - 0
core/crypto/hash/low_level.odin

@@ -0,0 +1,353 @@
+package crypto_hash
+
+import "core:crypto/blake2b"
+import "core:crypto/blake2s"
+import "core:crypto/sha2"
+import "core:crypto/sha3"
+import "core:crypto/sm3"
+import "core:crypto/legacy/keccak"
+import "core:crypto/legacy/md5"
+import "core:crypto/legacy/sha1"
+
+import "core:reflect"
+
+// MAX_DIGEST_SIZE is the maximum size digest that can be returned by any
+// of the Algorithms supported via this package.
+MAX_DIGEST_SIZE :: 64
+// MAX_BLOCK_SIZE is the maximum block size used by any of Algorithms
+// supported by this package.
+MAX_BLOCK_SIZE :: sha3.BLOCK_SIZE_224
+
+// Algorithm is the algorithm identifier associated with a given Context.
+Algorithm :: enum {
+	Invalid,
+	BLAKE2B,
+	BLAKE2S,
+	SHA224,
+	SHA256,
+	SHA384,
+	SHA512,
+	SHA512_256,
+	SHA3_224,
+	SHA3_256,
+	SHA3_384,
+	SHA3_512,
+	SM3,
+	Legacy_KECCAK_224,
+	Legacy_KECCAK_256,
+	Legacy_KECCAK_384,
+	Legacy_KECCAK_512,
+	Insecure_MD5,
+	Insecure_SHA1,
+}
+
+// ALGORITHM_NAMES is the Algorithm to algorithm name string.
+ALGORITHM_NAMES := [Algorithm]string {
+	.Invalid           = "Invalid",
+	.BLAKE2B           = "BLAKE2b",
+	.BLAKE2S           = "BLAKE2s",
+	.SHA224            = "SHA-224",
+	.SHA256            = "SHA-256",
+	.SHA384            = "SHA-384",
+	.SHA512            = "SHA-512",
+	.SHA512_256        = "SHA-512/256",
+	.SHA3_224          = "SHA3-224",
+	.SHA3_256          = "SHA3-256",
+	.SHA3_384          = "SHA3-384",
+	.SHA3_512          = "SHA3-512",
+	.SM3               = "SM3",
+	.Legacy_KECCAK_224 = "Keccak-224",
+	.Legacy_KECCAK_256 = "Keccak-256",
+	.Legacy_KECCAK_384 = "Keccak-384",
+	.Legacy_KECCAK_512 = "Keccak-512",
+	.Insecure_MD5      = "MD5",
+	.Insecure_SHA1     = "SHA-1",
+}
+
+// DIGEST_SIZES is the Algorithm to digest size in bytes.
+DIGEST_SIZES := [Algorithm]int {
+	.Invalid           = 0,
+	.BLAKE2B           = blake2b.DIGEST_SIZE,
+	.BLAKE2S           = blake2s.DIGEST_SIZE,
+	.SHA224            = sha2.DIGEST_SIZE_224,
+	.SHA256            = sha2.DIGEST_SIZE_256,
+	.SHA384            = sha2.DIGEST_SIZE_384,
+	.SHA512            = sha2.DIGEST_SIZE_512,
+	.SHA512_256        = sha2.DIGEST_SIZE_512_256,
+	.SHA3_224          = sha3.DIGEST_SIZE_224,
+	.SHA3_256          = sha3.DIGEST_SIZE_256,
+	.SHA3_384          = sha3.DIGEST_SIZE_384,
+	.SHA3_512          = sha3.DIGEST_SIZE_512,
+	.SM3               = sm3.DIGEST_SIZE,
+	.Legacy_KECCAK_224 = keccak.DIGEST_SIZE_224,
+	.Legacy_KECCAK_256 = keccak.DIGEST_SIZE_256,
+	.Legacy_KECCAK_384 = keccak.DIGEST_SIZE_384,
+	.Legacy_KECCAK_512 = keccak.DIGEST_SIZE_512,
+	.Insecure_MD5      = md5.DIGEST_SIZE,
+	.Insecure_SHA1     = sha1.DIGEST_SIZE,
+}
+
+// BLOCK_SIZES is the Algoritm to block size in bytes.
+BLOCK_SIZES := [Algorithm]int {
+	.Invalid           = 0,
+	.BLAKE2B           = blake2b.BLOCK_SIZE,
+	.BLAKE2S           = blake2s.BLOCK_SIZE,
+	.SHA224            = sha2.BLOCK_SIZE_256,
+	.SHA256            = sha2.BLOCK_SIZE_256,
+	.SHA384            = sha2.BLOCK_SIZE_512,
+	.SHA512            = sha2.BLOCK_SIZE_512,
+	.SHA512_256        = sha2.BLOCK_SIZE_512,
+	.SHA3_224          = sha3.BLOCK_SIZE_224,
+	.SHA3_256          = sha3.BLOCK_SIZE_256,
+	.SHA3_384          = sha3.BLOCK_SIZE_384,
+	.SHA3_512          = sha3.BLOCK_SIZE_512,
+	.SM3               = sm3.BLOCK_SIZE,
+	.Legacy_KECCAK_224 = keccak.BLOCK_SIZE_224,
+	.Legacy_KECCAK_256 = keccak.BLOCK_SIZE_256,
+	.Legacy_KECCAK_384 = keccak.BLOCK_SIZE_384,
+	.Legacy_KECCAK_512 = keccak.BLOCK_SIZE_512,
+	.Insecure_MD5      = md5.BLOCK_SIZE,
+	.Insecure_SHA1     = sha1.BLOCK_SIZE,
+}
+
+// Context is a concrete instantiation of a specific hash algorithm.
+Context :: struct {
+	_algo: Algorithm,
+	_impl: union {
+		blake2b.Context,
+		blake2s.Context,
+		sha2.Context_256,
+		sha2.Context_512,
+		sha3.Context,
+		sm3.Context,
+		keccak.Context,
+		md5.Context,
+		sha1.Context,
+	},
+}
+
+@(private)
+_IMPL_IDS := [Algorithm]typeid {
+	.Invalid           = nil,
+	.BLAKE2B           = typeid_of(blake2b.Context),
+	.BLAKE2S           = typeid_of(blake2s.Context),
+	.SHA224            = typeid_of(sha2.Context_256),
+	.SHA256            = typeid_of(sha2.Context_256),
+	.SHA384            = typeid_of(sha2.Context_512),
+	.SHA512            = typeid_of(sha2.Context_512),
+	.SHA512_256        = typeid_of(sha2.Context_512),
+	.SHA3_224          = typeid_of(sha3.Context),
+	.SHA3_256          = typeid_of(sha3.Context),
+	.SHA3_384          = typeid_of(sha3.Context),
+	.SHA3_512          = typeid_of(sha3.Context),
+	.SM3               = typeid_of(sm3.Context),
+	.Legacy_KECCAK_224 = typeid_of(keccak.Context),
+	.Legacy_KECCAK_256 = typeid_of(keccak.Context),
+	.Legacy_KECCAK_384 = typeid_of(keccak.Context),
+	.Legacy_KECCAK_512 = typeid_of(keccak.Context),
+	.Insecure_MD5      = typeid_of(md5.Context),
+	.Insecure_SHA1     = typeid_of(sha1.Context),
+}
+
+// init initializes a Context with a specific hash Algorithm.
+init :: proc(ctx: ^Context, algorithm: Algorithm) {
+	if ctx._impl != nil {
+		reset(ctx)
+	}
+
+	// Directly specialize the union by setting the type ID (save a copy).
+	reflect.set_union_variant_typeid(
+		ctx._impl,
+		_IMPL_IDS[algorithm],
+	)
+	switch algorithm {
+	case .BLAKE2B:
+		blake2b.init(&ctx._impl.(blake2b.Context))
+	case .BLAKE2S:
+		blake2s.init(&ctx._impl.(blake2s.Context))
+	case .SHA224:
+		sha2.init_224(&ctx._impl.(sha2.Context_256))
+	case .SHA256:
+		sha2.init_256(&ctx._impl.(sha2.Context_256))
+	case .SHA384:
+		sha2.init_384(&ctx._impl.(sha2.Context_512))
+	case .SHA512:
+		sha2.init_512(&ctx._impl.(sha2.Context_512))
+	case .SHA512_256:
+		sha2.init_512_256(&ctx._impl.(sha2.Context_512))
+	case .SHA3_224:
+		sha3.init_224(&ctx._impl.(sha3.Context))
+	case .SHA3_256:
+		sha3.init_256(&ctx._impl.(sha3.Context))
+	case .SHA3_384:
+		sha3.init_384(&ctx._impl.(sha3.Context))
+	case .SHA3_512:
+		sha3.init_512(&ctx._impl.(sha3.Context))
+	case .SM3:
+		sm3.init(&ctx._impl.(sm3.Context))
+	case .Legacy_KECCAK_224:
+		keccak.init_224(&ctx._impl.(keccak.Context))
+	case .Legacy_KECCAK_256:
+		keccak.init_256(&ctx._impl.(keccak.Context))
+	case .Legacy_KECCAK_384:
+		keccak.init_384(&ctx._impl.(keccak.Context))
+	case .Legacy_KECCAK_512:
+		keccak.init_512(&ctx._impl.(keccak.Context))
+	case .Insecure_MD5:
+		md5.init(&ctx._impl.(md5.Context))
+	case .Insecure_SHA1:
+		sha1.init(&ctx._impl.(sha1.Context))
+	case .Invalid:
+		panic("crypto/hash: uninitialized algorithm")
+	case:
+		panic("crypto/hash: invalid algorithm")
+	}
+
+	ctx._algo = algorithm
+}
+
+// update adds more data to the Context.
+update :: proc(ctx: ^Context, data: []byte) {
+	switch &impl in ctx._impl {
+	case blake2b.Context:
+		blake2b.update(&impl, data)
+	case blake2s.Context:
+		blake2s.update(&impl, data)
+	case sha2.Context_256:
+		sha2.update(&impl, data)
+	case sha2.Context_512:
+		sha2.update(&impl, data)
+	case sha3.Context:
+		sha3.update(&impl, data)
+	case sm3.Context:
+		sm3.update(&impl, data)
+	case keccak.Context:
+		keccak.update(&impl, data)
+	case md5.Context:
+		md5.update(&impl, data)
+	case sha1.Context:
+		sha1.update(&impl, data)
+	case:
+		panic("crypto/hash: uninitialized algorithm")
+	}
+}
+
+// final finalizes the Context, writes the digest to hash, and calls
+// reset on the Context.
+//
+// Iff finalize_clone is set, final will work on a copy of the Context,
+// which is useful for for calculating rolling digests.
+final :: proc(ctx: ^Context, hash: []byte, finalize_clone: bool = false) {
+	switch &impl in ctx._impl {
+	case blake2b.Context:
+		blake2b.final(&impl, hash, finalize_clone)
+	case blake2s.Context:
+		blake2s.final(&impl, hash, finalize_clone)
+	case sha2.Context_256:
+		sha2.final(&impl, hash, finalize_clone)
+	case sha2.Context_512:
+		sha2.final(&impl, hash, finalize_clone)
+	case sha3.Context:
+		sha3.final(&impl, hash, finalize_clone)
+	case sm3.Context:
+		sm3.final(&impl, hash, finalize_clone)
+	case keccak.Context:
+		keccak.final(&impl, hash, finalize_clone)
+	case md5.Context:
+		md5.final(&impl, hash, finalize_clone)
+	case sha1.Context:
+		sha1.final(&impl, hash, finalize_clone)
+	case:
+		panic("crypto/hash: uninitialized algorithm")
+	}
+
+	if !finalize_clone {
+		reset(ctx)
+	}
+}
+
+// clone clones the Context other into ctx.
+clone :: proc(ctx, other: ^Context) {
+	// XXX/yawning: Maybe these cases should panic, because both cases,
+	// are probably bugs.
+	if ctx == other {
+		return
+	}
+	if ctx._impl != nil {
+		reset(ctx)
+	}
+
+	ctx._algo = other._algo
+
+	reflect.set_union_variant_typeid(
+		ctx._impl,
+		reflect.union_variant_typeid(other._impl),
+	)
+	switch &src_impl in other._impl {
+	case blake2b.Context:
+		blake2b.clone(&ctx._impl.(blake2b.Context), &src_impl)
+	case blake2s.Context:
+		blake2s.clone(&ctx._impl.(blake2s.Context), &src_impl)
+	case sha2.Context_256:
+		sha2.clone(&ctx._impl.(sha2.Context_256), &src_impl)
+	case sha2.Context_512:
+		sha2.clone(&ctx._impl.(sha2.Context_512), &src_impl)
+	case sha3.Context:
+		sha3.clone(&ctx._impl.(sha3.Context), &src_impl)
+	case sm3.Context:
+		sm3.clone(&ctx._impl.(sm3.Context), &src_impl)
+	case keccak.Context:
+		keccak.clone(&ctx._impl.(keccak.Context), &src_impl)
+	case md5.Context:
+		md5.clone(&ctx._impl.(md5.Context), &src_impl)
+	case sha1.Context:
+		sha1.clone(&ctx._impl.(sha1.Context), &src_impl)
+	case:
+		panic("crypto/hash: uninitialized algorithm")
+	}
+}
+
+// reset sanitizes the Context.  The Context must be re-initialized to
+// be used again.
+reset :: proc(ctx: ^Context) {
+	switch &impl in ctx._impl {
+	case blake2b.Context:
+		blake2b.reset(&impl)
+	case blake2s.Context:
+		blake2s.reset(&impl)
+	case sha2.Context_256:
+		sha2.reset(&impl)
+	case sha2.Context_512:
+		sha2.reset(&impl)
+	case sha3.Context:
+		sha3.reset(&impl)
+	case sm3.Context:
+		sm3.reset(&impl)
+	case keccak.Context:
+		keccak.reset(&impl)
+	case md5.Context:
+		md5.reset(&impl)
+	case sha1.Context:
+		sha1.reset(&impl)
+	case:
+	// Unlike clone, calling reset repeatedly is fine.
+	}
+
+	ctx._algo = .Invalid
+	ctx._impl = nil
+}
+
+// algorithm returns the Algorithm used by a Context instance.
+algorithm :: proc(ctx: ^Context) -> Algorithm {
+	return ctx._algo
+}
+
+// digest_size returns the digest size of a Context instance in bytes.
+digest_size :: proc(ctx: ^Context) -> int {
+	return DIGEST_SIZES[ctx._algo]
+}
+
+// block_size returns the block size of a Context instance in bytes.
+block_size :: proc(ctx: ^Context) -> int {
+	return BLOCK_SIZES[ctx._algo]
+}

+ 162 - 0
core/crypto/hmac/hmac.odin

@@ -0,0 +1,162 @@
+/*
+package hmac implements the HMAC MAC algorithm.
+
+See:
+- https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.198-1.pdf
+*/
+package hmac
+
+import "core:crypto"
+import "core:crypto/hash"
+import "core:mem"
+
+// sum will compute the HMAC with the specified algorithm and key
+// over msg, and write the computed digest to dst.  It requires that
+// the dst buffer is the tag size.
+sum :: proc(algorithm: hash.Algorithm, dst, msg, key: []byte) {
+	ctx: Context
+
+	init(&ctx, algorithm, key)
+	update(&ctx, msg)
+	final(&ctx, dst)
+}
+
+// verify will verify the HMAC tag computed with the specified algorithm
+// and key over msg and return true iff the tag is valid.  It requires
+// that the tag is correctly sized.
+verify :: proc(algorithm: hash.Algorithm, tag, msg, key: []byte) -> bool {
+	tag_buf: [hash.MAX_DIGEST_SIZE]byte
+
+	derived_tag := tag_buf[:hash.DIGEST_SIZES[algorithm]]
+	sum(algorithm, derived_tag, msg, key)
+
+	return crypto.compare_constant_time(derived_tag, tag) == 1
+}
+
+// Context is a concrete instantiation of HMAC with a specific hash
+// algorithm.
+Context :: struct {
+	_o_hash:         hash.Context, // H(k ^ ipad) (not finalized)
+	_i_hash:         hash.Context, // H(k ^ opad) (not finalized)
+	_tag_sz:         int,
+	_is_initialized: bool,
+}
+
+// init initializes a Context with a specific hash Algorithm and key.
+init :: proc(ctx: ^Context, algorithm: hash.Algorithm, key: []byte) {
+	if ctx._is_initialized {
+		reset(ctx)
+	}
+
+	_init_hashes(ctx, algorithm, key)
+
+	ctx._tag_sz = hash.DIGEST_SIZES[algorithm]
+	ctx._is_initialized = true
+}
+
+// update adds more data to the Context.
+update :: proc(ctx: ^Context, data: []byte) {
+	assert(ctx._is_initialized)
+
+	hash.update(&ctx._i_hash, data)
+}
+
+// final finalizes the Context, writes the tag to dst, and calls
+// reset on the Context.
+final :: proc(ctx: ^Context, dst: []byte) {
+	assert(ctx._is_initialized)
+
+	defer (reset(ctx))
+
+	if len(dst) != ctx._tag_sz {
+		panic("crypto/hmac: invalid destination tag size")
+	}
+
+	hash.final(&ctx._i_hash, dst) // H((k ^ ipad) || text)
+
+	hash.update(&ctx._o_hash, dst) // H((k ^ opad) || H((k ^ ipad) || text))
+	hash.final(&ctx._o_hash, dst)
+}
+
+// reset sanitizes the Context.  The Context must be re-initialized to
+// be used again.
+reset :: proc(ctx: ^Context) {
+	if !ctx._is_initialized {
+		return
+	}
+
+	hash.reset(&ctx._o_hash)
+	hash.reset(&ctx._i_hash)
+	ctx._tag_sz = 0
+	ctx._is_initialized = false
+}
+
+// algorithm returns the Algorithm used by a Context instance.
+algorithm :: proc(ctx: ^Context) -> hash.Algorithm {
+	assert(ctx._is_initialized)
+
+	return hash.algorithm(&ctx._i_hash)
+}
+
+// tag_size returns the tag size of a Context instance in bytes.
+tag_size :: proc(ctx: ^Context) -> int {
+	assert(ctx._is_initialized)
+
+	return ctx._tag_sz
+}
+
+@(private)
+_I_PAD :: 0x36
+_O_PAD :: 0x5c
+
+@(private)
+_init_hashes :: proc(ctx: ^Context, algorithm: hash.Algorithm, key: []byte) {
+	K0_buf: [hash.MAX_BLOCK_SIZE]byte
+	kPad_buf: [hash.MAX_BLOCK_SIZE]byte
+
+	kLen := len(key)
+	B := hash.BLOCK_SIZES[algorithm]
+	K0 := K0_buf[:B]
+	defer mem.zero_explicit(raw_data(K0), B)
+
+	switch {
+	case kLen == B, kLen < B:
+		// If the length of K = B: set K0 = K.
+		//
+		// If the length of K < B: append zeros to the end of K to
+		// create a B-byte string K0 (e.g., if K is 20 bytes in
+		// length and B = 64, then K will be appended with 44 zero
+		// bytes x’00’).
+		//
+		// K0 is zero-initialized, so the copy handles both cases.
+		copy(K0, key)
+	case kLen > B:
+		// If the length of K > B: hash K to obtain an L byte string,
+		// then append (B-L) zeros to create a B-byte string K0
+		// (i.e., K0 = H(K) || 00...00).
+		tmpCtx := &ctx._o_hash // Saves allocating a hash.Context.
+		hash.init(tmpCtx, algorithm)
+		hash.update(tmpCtx, key)
+		hash.final(tmpCtx, K0)
+	}
+
+	// Initialize the hashes, and write the padded keys:
+	// - ctx._i_hash -> H(K0 ^ ipad)
+	// - ctx._o_hash -> H(K0 ^ opad)
+
+	hash.init(&ctx._o_hash, algorithm)
+	hash.init(&ctx._i_hash, algorithm)
+
+	kPad := kPad_buf[:B]
+	defer mem.zero_explicit(raw_data(kPad), B)
+
+	for v, i in K0 {
+		kPad[i] = v ~ _I_PAD
+	}
+	hash.update(&ctx._i_hash, kPad)
+
+	for v, i in K0 {
+		kPad[i] = v ~ _O_PAD
+	}
+	hash.update(&ctx._o_hash, kPad)
+}

+ 54 - 336
core/crypto/legacy/keccak/keccak.odin

@@ -1,3 +1,11 @@
+/*
+package keccak implements the Keccak hash algorithm family.
+
+During the SHA-3 standardization process, the padding scheme was changed
+thus Keccac and SHA-3 produce different outputs.  Most users should use
+SHA-3 and/or SHAKE instead, however the legacy algorithm is provided for
+backward compatibility purposes.
+*/
 package keccak
 
 /*
@@ -6,372 +14,82 @@ package keccak
 
     List of contributors:
         zhibog, dotbmp:  Initial implementation.
-
-    Interface for the Keccak hashing algorithm.
-    This is done because the padding in the SHA3 standard was changed by the NIST, resulting in a different output.
 */
 
-import "core:io"
-import "core:os"
-
 import "../../_sha3"
 
-/*
-    High level API
-*/
-
+// DIGEST_SIZE_224 is the Keccak-224 digest size.
 DIGEST_SIZE_224 :: 28
+// DIGEST_SIZE_256 is the Keccak-256 digest size.
 DIGEST_SIZE_256 :: 32
+// DIGEST_SIZE_384 is the Keccak-384 digest size.
 DIGEST_SIZE_384 :: 48
+// DIGEST_SIZE_512 is the Keccak-512 digest size.
 DIGEST_SIZE_512 :: 64
 
-// hash_string_224 will hash the given input and return the
-// computed hash
-hash_string_224 :: proc(data: string) -> [DIGEST_SIZE_224]byte {
-	return hash_bytes_224(transmute([]byte)(data))
-}
+// BLOCK_SIZE_224 is the Keccak-224 block size in bytes.
+BLOCK_SIZE_224 :: _sha3.RATE_224
+// BLOCK_SIZE_256 is the Keccak-256 block size in bytes.
+BLOCK_SIZE_256 :: _sha3.RATE_256
+// BLOCK_SIZE_384 is the Keccak-384 block size in bytes.
+BLOCK_SIZE_384 :: _sha3.RATE_384
+// BLOCK_SIZE_512 is the Keccak-512 block size in bytes.
+BLOCK_SIZE_512 :: _sha3.RATE_512
 
-// hash_bytes_224 will hash the given input and return the
-// computed hash
-hash_bytes_224 :: proc(data: []byte) -> [DIGEST_SIZE_224]byte {
-	hash: [DIGEST_SIZE_224]byte
-	ctx: Context
-	ctx.mdlen = DIGEST_SIZE_224
-	ctx.is_keccak = true
-	init(&ctx)
-	update(&ctx, data)
-	final(&ctx, hash[:])
-	return hash
-}
-
-// hash_string_to_buffer_224 will hash the given input and assign the
-// computed hash to the second parameter.
-// It requires that the destination buffer is at least as big as the digest size
-hash_string_to_buffer_224 :: proc(data: string, hash: []byte) {
-	hash_bytes_to_buffer_224(transmute([]byte)(data), hash)
-}
-
-// hash_bytes_to_buffer_224 will hash the given input and write the
-// computed hash into the second parameter.
-// It requires that the destination buffer is at least as big as the digest size
-hash_bytes_to_buffer_224 :: proc(data, hash: []byte) {
-	ctx: Context
-	ctx.mdlen = DIGEST_SIZE_224
-	ctx.is_keccak = true
-	init(&ctx)
-	update(&ctx, data)
-	final(&ctx, hash)
-}
+// Context is a Keccak instance.
+Context :: distinct _sha3.Context
 
-// hash_stream_224 will read the stream in chunks and compute a
-// hash from its contents
-hash_stream_224 :: proc(s: io.Stream) -> ([DIGEST_SIZE_224]byte, bool) {
-	hash: [DIGEST_SIZE_224]byte
-	ctx: Context
+// init_224 initializes a Context for Keccak-224.
+init_224 :: proc(ctx: ^Context) {
 	ctx.mdlen = DIGEST_SIZE_224
-	ctx.is_keccak = true
-	init(&ctx)
-
-	buf := make([]byte, 512)
-	defer delete(buf)
-
-	read := 1
-	for read > 0 {
-		read, _ = io.read(s, buf)
-		if read > 0 {
-			update(&ctx, buf[:read])
-		}
-	}
-	final(&ctx, hash[:])
-	return hash, true
+	_init(ctx)
 }
 
-// hash_file_224 will read the file provided by the given handle
-// and compute a hash
-hash_file_224 :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE_224]byte, bool) {
-	if !load_at_once {
-		return hash_stream_224(os.stream_from_handle(hd))
-	} else {
-		if buf, ok := os.read_entire_file(hd); ok {
-			return hash_bytes_224(buf[:]), ok
-		}
-	}
-	return [DIGEST_SIZE_224]byte{}, false
-}
-
-hash_224 :: proc {
-	hash_stream_224,
-	hash_file_224,
-	hash_bytes_224,
-	hash_string_224,
-	hash_bytes_to_buffer_224,
-	hash_string_to_buffer_224,
-}
-
-// hash_string_256 will hash the given input and return the
-// computed hash
-hash_string_256 :: proc(data: string) -> [DIGEST_SIZE_256]byte {
-	return hash_bytes_256(transmute([]byte)(data))
-}
-
-// hash_bytes_256 will hash the given input and return the
-// computed hash
-hash_bytes_256 :: proc(data: []byte) -> [DIGEST_SIZE_256]byte {
-	hash: [DIGEST_SIZE_256]byte
-	ctx: Context
+// init_256 initializes a Context for Keccak-256.
+init_256 :: proc(ctx: ^Context) {
 	ctx.mdlen = DIGEST_SIZE_256
-	ctx.is_keccak = true
-	init(&ctx)
-	update(&ctx, data)
-	final(&ctx, hash[:])
-	return hash
-}
-
-// hash_string_to_buffer_256 will hash the given input and assign the
-// computed hash to the second parameter.
-// It requires that the destination buffer is at least as big as the digest size
-hash_string_to_buffer_256 :: proc(data: string, hash: []byte) {
-	hash_bytes_to_buffer_256(transmute([]byte)(data), hash)
-}
-
-// hash_bytes_to_buffer_256 will hash the given input and write the
-// computed hash into the second parameter.
-// It requires that the destination buffer is at least as big as the digest size
-hash_bytes_to_buffer_256 :: proc(data, hash: []byte) {
-	ctx: Context
-	ctx.mdlen = DIGEST_SIZE_256
-	ctx.is_keccak = true
-	init(&ctx)
-	update(&ctx, data)
-	final(&ctx, hash)
-}
-
-// hash_stream_256 will read the stream in chunks and compute a
-// hash from its contents
-hash_stream_256 :: proc(s: io.Stream) -> ([DIGEST_SIZE_256]byte, bool) {
-	hash: [DIGEST_SIZE_256]byte
-	ctx: Context
-	ctx.mdlen = DIGEST_SIZE_256
-	ctx.is_keccak = true
-	init(&ctx)
-
-	buf := make([]byte, 512)
-	defer delete(buf)
-
-	read := 1
-	for read > 0 {
-		read, _ = io.read(s, buf)
-		if read > 0 {
-			update(&ctx, buf[:read])
-		}
-	}
-	final(&ctx, hash[:])
-	return hash, true
-}
-
-// hash_file_256 will read the file provided by the given handle
-// and compute a hash
-hash_file_256 :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE_256]byte, bool) {
-	if !load_at_once {
-		return hash_stream_256(os.stream_from_handle(hd))
-	} else {
-		if buf, ok := os.read_entire_file(hd); ok {
-			return hash_bytes_256(buf[:]), ok
-		}
-	}
-	return [DIGEST_SIZE_256]byte{}, false
+	_init(ctx)
 }
 
-hash_256 :: proc {
-	hash_stream_256,
-	hash_file_256,
-	hash_bytes_256,
-	hash_string_256,
-	hash_bytes_to_buffer_256,
-	hash_string_to_buffer_256,
-}
-
-// hash_string_384 will hash the given input and return the
-// computed hash
-hash_string_384 :: proc(data: string) -> [DIGEST_SIZE_384]byte {
-	return hash_bytes_384(transmute([]byte)(data))
-}
-
-// hash_bytes_384 will hash the given input and return the
-// computed hash
-hash_bytes_384 :: proc(data: []byte) -> [DIGEST_SIZE_384]byte {
-	hash: [DIGEST_SIZE_384]byte
-	ctx: Context
+// init_384 initializes a Context for Keccak-384.
+init_384 :: proc(ctx: ^Context) {
 	ctx.mdlen = DIGEST_SIZE_384
-	ctx.is_keccak = true
-	init(&ctx)
-	update(&ctx, data)
-	final(&ctx, hash[:])
-	return hash
-}
-
-// hash_string_to_buffer_384 will hash the given input and assign the
-// computed hash to the second parameter.
-// It requires that the destination buffer is at least as big as the digest size
-hash_string_to_buffer_384 :: proc(data: string, hash: []byte) {
-	hash_bytes_to_buffer_384(transmute([]byte)(data), hash)
-}
-
-// hash_bytes_to_buffer_384 will hash the given input and write the
-// computed hash into the second parameter.
-// It requires that the destination buffer is at least as big as the digest size
-hash_bytes_to_buffer_384 :: proc(data, hash: []byte) {
-	ctx: Context
-	ctx.mdlen = DIGEST_SIZE_384
-	ctx.is_keccak = true
-	init(&ctx)
-	update(&ctx, data)
-	final(&ctx, hash)
-}
-
-// hash_stream_384 will read the stream in chunks and compute a
-// hash from its contents
-hash_stream_384 :: proc(s: io.Stream) -> ([DIGEST_SIZE_384]byte, bool) {
-	hash: [DIGEST_SIZE_384]byte
-	ctx: Context
-	ctx.mdlen = DIGEST_SIZE_384
-	ctx.is_keccak = true
-	init(&ctx)
-
-	buf := make([]byte, 512)
-	defer delete(buf)
-
-	read := 1
-	for read > 0 {
-		read, _ = io.read(s, buf)
-		if read > 0 {
-			update(&ctx, buf[:read])
-		}
-	}
-	final(&ctx, hash[:])
-	return hash, true
-}
-
-// hash_file_384 will read the file provided by the given handle
-// and compute a hash
-hash_file_384 :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE_384]byte, bool) {
-	if !load_at_once {
-		return hash_stream_384(os.stream_from_handle(hd))
-	} else {
-		if buf, ok := os.read_entire_file(hd); ok {
-			return hash_bytes_384(buf[:]), ok
-		}
-	}
-	return [DIGEST_SIZE_384]byte{}, false
-}
-
-hash_384 :: proc {
-	hash_stream_384,
-	hash_file_384,
-	hash_bytes_384,
-	hash_string_384,
-	hash_bytes_to_buffer_384,
-	hash_string_to_buffer_384,
+	_init(ctx)
 }
 
-// hash_string_512 will hash the given input and return the
-// computed hash
-hash_string_512 :: proc(data: string) -> [DIGEST_SIZE_512]byte {
-	return hash_bytes_512(transmute([]byte)(data))
-}
-
-// hash_bytes_512 will hash the given input and return the
-// computed hash
-hash_bytes_512 :: proc(data: []byte) -> [DIGEST_SIZE_512]byte {
-	hash: [DIGEST_SIZE_512]byte
-	ctx: Context
+// init_512 initializes a Context for Keccak-512.
+init_512 :: proc(ctx: ^Context) {
 	ctx.mdlen = DIGEST_SIZE_512
-	ctx.is_keccak = true
-	init(&ctx)
-	update(&ctx, data)
-	final(&ctx, hash[:])
-	return hash
+	_init(ctx)
 }
 
-// hash_string_to_buffer_512 will hash the given input and assign the
-// computed hash to the second parameter.
-// It requires that the destination buffer is at least as big as the digest size
-hash_string_to_buffer_512 :: proc(data: string, hash: []byte) {
-	hash_bytes_to_buffer_512(transmute([]byte)(data), hash)
-}
-
-// hash_bytes_to_buffer_512 will hash the given input and write the
-// computed hash into the second parameter.
-// It requires that the destination buffer is at least as big as the digest size
-hash_bytes_to_buffer_512 :: proc(data, hash: []byte) {
-	ctx: Context
-	ctx.mdlen = DIGEST_SIZE_512
+@(private)
+_init :: proc(ctx: ^Context) {
 	ctx.is_keccak = true
-	init(&ctx)
-	update(&ctx, data)
-	final(&ctx, hash)
+	_sha3.init(transmute(^_sha3.Context)(ctx))
 }
 
-// hash_stream_512 will read the stream in chunks and compute a
-// hash from its contents
-hash_stream_512 :: proc(s: io.Stream) -> ([DIGEST_SIZE_512]byte, bool) {
-	hash: [DIGEST_SIZE_512]byte
-	ctx: Context
-	ctx.mdlen = DIGEST_SIZE_512
-	ctx.is_keccak = true
-	init(&ctx)
-
-	buf := make([]byte, 512)
-	defer delete(buf)
-
-	read := 1
-	for read > 0 {
-		read, _ = io.read(s, buf)
-		if read > 0 {
-			update(&ctx, buf[:read])
-		}
-	}
-	final(&ctx, hash[:])
-	return hash, true
-}
-
-// hash_file_512 will read the file provided by the given handle
-// and compute a hash
-hash_file_512 :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE_512]byte, bool) {
-	if !load_at_once {
-		return hash_stream_512(os.stream_from_handle(hd))
-	} else {
-		if buf, ok := os.read_entire_file(hd); ok {
-			return hash_bytes_512(buf[:]), ok
-		}
-	}
-	return [DIGEST_SIZE_512]byte{}, false
-}
-
-hash_512 :: proc {
-	hash_stream_512,
-	hash_file_512,
-	hash_bytes_512,
-	hash_string_512,
-	hash_bytes_to_buffer_512,
-	hash_string_to_buffer_512,
+// update adds more data to the Context.
+update :: proc(ctx: ^Context, data: []byte) {
+	_sha3.update(transmute(^_sha3.Context)(ctx), data)
 }
 
-/*
-    Low level API
-*/
-
-Context :: _sha3.Sha3_Context
-
-init :: proc(ctx: ^Context) {
-	ctx.is_keccak = true
-	_sha3.init(ctx)
+// final finalizes the Context, writes the digest to hash, and calls
+// reset on the Context.
+//
+// Iff finalize_clone is set, final will work on a copy of the Context,
+// which is useful for for calculating rolling digests.
+final :: proc(ctx: ^Context, hash: []byte, finalize_clone: bool = false) {
+	_sha3.final(transmute(^_sha3.Context)(ctx), hash, finalize_clone)
 }
 
-update :: proc(ctx: ^Context, data: []byte) {
-	_sha3.update(ctx, data)
+// clone clones the Context other into ctx.
+clone :: proc(ctx, other: ^Context) {
+	_sha3.clone(transmute(^_sha3.Context)(ctx), transmute(^_sha3.Context)(other))
 }
 
-final :: proc(ctx: ^Context, hash: []byte) {
-	_sha3.final(ctx, hash)
+// reset sanitizes the Context.  The Context must be re-initialized to
+// be used again.
+reset :: proc(ctx: ^Context) {
+	_sha3.reset(transmute(^_sha3.Context)(ctx))
 }

+ 50 - 98
core/crypto/legacy/md5/md5.odin

@@ -1,3 +1,13 @@
+/*
+package md5 implements the MD5 hash algorithm.
+
+WARNING: The MD5 algorithm is known to be insecure and should only be
+used for interoperating with legacy applications.
+
+See:
+- https://eprint.iacr.org/2005/075
+- https://datatracker.ietf.org/doc/html/rfc1321
+*/
 package md5
 
 /*
@@ -6,103 +16,29 @@ package md5
 
     List of contributors:
         zhibog, dotbmp:  Initial implementation.
-
-    Implementation of the MD5 hashing algorithm, as defined in RFC 1321 <https://datatracker.ietf.org/doc/html/rfc1321>
 */
 
 import "core:encoding/endian"
-import "core:io"
 import "core:math/bits"
 import "core:mem"
-import "core:os"
-
-/*
-    High level API
-*/
 
+// DIGEST_SIZE is the MD5 digest size in bytes.
 DIGEST_SIZE :: 16
 
-// hash_string will hash the given input and return the
-// computed hash
-hash_string :: proc(data: string) -> [DIGEST_SIZE]byte {
-	return hash_bytes(transmute([]byte)(data))
-}
-
-// hash_bytes will hash the given input and return the
-// computed hash
-hash_bytes :: proc(data: []byte) -> [DIGEST_SIZE]byte {
-	hash: [DIGEST_SIZE]byte
-	ctx: Context
-	init(&ctx)
-	update(&ctx, data)
-	final(&ctx, hash[:])
-	return hash
-}
-
-// hash_string_to_buffer will hash the given input and assign the
-// computed hash to the second parameter.
-// It requires that the destination buffer is at least as big as the digest size
-hash_string_to_buffer :: proc(data: string, hash: []byte) {
-	hash_bytes_to_buffer(transmute([]byte)(data), hash)
-}
-
-// hash_bytes_to_buffer will hash the given input and write the
-// computed hash into the second parameter.
-// It requires that the destination buffer is at least as big as the digest size
-hash_bytes_to_buffer :: proc(data, hash: []byte) {
-	ctx: Context
-	init(&ctx)
-	update(&ctx, data)
-	final(&ctx, hash)
-}
-
-// hash_stream will read the stream in chunks and compute a
-// hash from its contents
-hash_stream :: proc(s: io.Stream) -> ([DIGEST_SIZE]byte, bool) {
-	hash: [DIGEST_SIZE]byte
-	ctx: Context
-	init(&ctx)
-
-	buf := make([]byte, 512)
-	defer delete(buf)
-
-	read := 1
-	for read > 0 {
-		read, _ = io.read(s, buf)
-		if read > 0 {
-			update(&ctx, buf[:read])
-		}
-	}
-	final(&ctx, hash[:])
-	return hash, true
-}
+// BLOCK_SIZE is the MD5 block size in bytes.
+BLOCK_SIZE :: 64
 
-// hash_file will read the file provided by the given handle
-// and compute a hash
-hash_file :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE]byte, bool) {
-	if !load_at_once {
-		return hash_stream(os.stream_from_handle(hd))
-	} else {
-		if buf, ok := os.read_entire_file(hd); ok {
-			return hash_bytes(buf[:]), ok
-		}
-	}
-	return [DIGEST_SIZE]byte{}, false
-}
+// Context is a MD5 instance.
+Context :: struct {
+	data:    [BLOCK_SIZE]byte,
+	state:   [4]u32,
+	bitlen:  u64,
+	datalen: u32,
 
-hash :: proc {
-	hash_stream,
-	hash_file,
-	hash_bytes,
-	hash_string,
-	hash_bytes_to_buffer,
-	hash_string_to_buffer,
+	is_initialized: bool,
 }
 
-/*
-    Low level API
-*/
-
+// init initializes a Context.
 init :: proc(ctx: ^Context) {
 	ctx.state[0] = 0x67452301
 	ctx.state[1] = 0xefcdab89
@@ -115,6 +51,7 @@ init :: proc(ctx: ^Context) {
 	ctx.is_initialized = true
 }
 
+// update adds more data to the Context.
 update :: proc(ctx: ^Context, data: []byte) {
 	assert(ctx.is_initialized)
 
@@ -129,13 +66,26 @@ update :: proc(ctx: ^Context, data: []byte) {
 	}
 }
 
-final :: proc(ctx: ^Context, hash: []byte) {
+// final finalizes the Context, writes the digest to hash, and calls
+// reset on the Context.
+//
+// Iff finalize_clone is set, final will work on a copy of the Context,
+// which is useful for for calculating rolling digests.
+final :: proc(ctx: ^Context, hash: []byte, finalize_clone: bool = false) {
 	assert(ctx.is_initialized)
 
 	if len(hash) < DIGEST_SIZE {
 		panic("crypto/md5: invalid destination digest size")
 	}
 
+	ctx := ctx
+	if finalize_clone {
+		tmp_ctx: Context
+		clone(&tmp_ctx, ctx)
+		ctx = &tmp_ctx
+	}
+	defer(reset(ctx))
+
 	i := ctx.datalen
 
 	if ctx.datalen < 56 {
@@ -163,25 +113,27 @@ final :: proc(ctx: ^Context, hash: []byte) {
 	for i = 0; i < DIGEST_SIZE / 4; i += 1 {
 		endian.unchecked_put_u32le(hash[i * 4:], ctx.state[i])
 	}
+}
 
-	ctx.is_initialized = false
+// clone clones the Context other into ctx.
+clone :: proc(ctx, other: ^$T) {
+	ctx^ = other^
+}
+
+// reset sanitizes the Context.  The Context must be re-initialized to
+// be used again.
+reset :: proc(ctx: ^$T) {
+	if !ctx.is_initialized {
+		return
+	}
+
+	mem.zero_explicit(ctx, size_of(ctx^))
 }
 
 /*
     MD5 implementation
 */
 
-BLOCK_SIZE :: 64
-
-Context :: struct {
-	data:    [BLOCK_SIZE]byte,
-	state:   [4]u32,
-	bitlen:  u64,
-	datalen: u32,
-
-	is_initialized: bool,
-}
-
 /*
     @note(zh): F, G, H and I, as mentioned in the RFC, have been inlined into FF, GG, HH
     and II respectively, instead of declaring them separately.

+ 52 - 99
core/crypto/legacy/sha1/sha1.odin

@@ -1,3 +1,14 @@
+/*
+package sha1 implements the SHA1 hash algorithm.
+
+WARNING: The SHA1 algorithm is known to be insecure and should only be
+used for interoperating with legacy applications.
+
+See:
+- https://eprint.iacr.org/2017/190
+- https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.180-4.pdf
+- https://datatracker.ietf.org/doc/html/rfc3174
+*/
 package sha1
 
 /*
@@ -6,103 +17,30 @@ package sha1
 
     List of contributors:
         zhibog, dotbmp:  Initial implementation.
-
-    Implementation of the SHA1 hashing algorithm, as defined in RFC 3174 <https://datatracker.ietf.org/doc/html/rfc3174>
 */
 
 import "core:encoding/endian"
-import "core:io"
 import "core:math/bits"
 import "core:mem"
-import "core:os"
-
-/*
-    High level API
-*/
 
+// DIGEST_SIZE is the SHA1 digest size in bytes.
 DIGEST_SIZE :: 20
 
-// hash_string will hash the given input and return the
-// computed hash
-hash_string :: proc(data: string) -> [DIGEST_SIZE]byte {
-	return hash_bytes(transmute([]byte)(data))
-}
-
-// hash_bytes will hash the given input and return the
-// computed hash
-hash_bytes :: proc(data: []byte) -> [DIGEST_SIZE]byte {
-	hash: [DIGEST_SIZE]byte
-	ctx: Context
-	init(&ctx)
-	update(&ctx, data)
-	final(&ctx, hash[:])
-	return hash
-}
-
-// hash_string_to_buffer will hash the given input and assign the
-// computed hash to the second parameter.
-// It requires that the destination buffer is at least as big as the digest size
-hash_string_to_buffer :: proc(data: string, hash: []byte) {
-	hash_bytes_to_buffer(transmute([]byte)(data), hash)
-}
-
-// hash_bytes_to_buffer will hash the given input and write the
-// computed hash into the second parameter.
-// It requires that the destination buffer is at least as big as the digest size
-hash_bytes_to_buffer :: proc(data, hash: []byte) {
-	ctx: Context
-	init(&ctx)
-	update(&ctx, data)
-	final(&ctx, hash)
-}
-
-// hash_stream will read the stream in chunks and compute a
-// hash from its contents
-hash_stream :: proc(s: io.Stream) -> ([DIGEST_SIZE]byte, bool) {
-	hash: [DIGEST_SIZE]byte
-	ctx: Context
-	init(&ctx)
-
-	buf := make([]byte, 512)
-	defer delete(buf)
-
-	read := 1
-	for read > 0 {
-		read, _ = io.read(s, buf)
-		if read > 0 {
-			update(&ctx, buf[:read])
-		}
-	}
-	final(&ctx, hash[:])
-	return hash, true
-}
+// BLOCK_SIZE is the SHA1 block size in bytes.
+BLOCK_SIZE :: 64
 
-// hash_file will read the file provided by the given handle
-// and compute a hash
-hash_file :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE]byte, bool) {
-	if !load_at_once {
-		return hash_stream(os.stream_from_handle(hd))
-	} else {
-		if buf, ok := os.read_entire_file(hd); ok {
-			return hash_bytes(buf[:]), ok
-		}
-	}
-	return [DIGEST_SIZE]byte{}, false
-}
+// Context is a SHA1 instance.
+Context :: struct {
+	data:    [BLOCK_SIZE]byte,
+	state:   [5]u32,
+	k:       [4]u32,
+	bitlen:  u64,
+	datalen: u32,
 
-hash :: proc {
-	hash_stream,
-	hash_file,
-	hash_bytes,
-	hash_string,
-	hash_bytes_to_buffer,
-	hash_string_to_buffer,
+	is_initialized: bool,
 }
 
-/*
-    Low level API
-*/
-
+// init initializes a Context.
 init :: proc(ctx: ^Context) {
 	ctx.state[0] = 0x67452301
 	ctx.state[1] = 0xefcdab89
@@ -120,6 +58,7 @@ init :: proc(ctx: ^Context) {
 	ctx.is_initialized = true
 }
 
+// update adds more data to the Context.
 update :: proc(ctx: ^Context, data: []byte) {
 	assert(ctx.is_initialized)
 
@@ -134,13 +73,26 @@ update :: proc(ctx: ^Context, data: []byte) {
 	}
 }
 
-final :: proc(ctx: ^Context, hash: []byte) {
+// final finalizes the Context, writes the digest to hash, and calls
+// reset on the Context.
+//
+// Iff finalize_clone is set, final will work on a copy of the Context,
+// which is useful for for calculating rolling digests.
+final :: proc(ctx: ^Context, hash: []byte, finalize_clone: bool = false) {
 	assert(ctx.is_initialized)
 
 	if len(hash) < DIGEST_SIZE {
 		panic("crypto/sha1: invalid destination digest size")
 	}
 
+	ctx := ctx
+	if finalize_clone {
+		tmp_ctx: Context
+		clone(&tmp_ctx, ctx)
+		ctx = &tmp_ctx
+	}
+	defer(reset(ctx))
+
 	i := ctx.datalen
 
 	if ctx.datalen < 56 {
@@ -168,26 +120,27 @@ final :: proc(ctx: ^Context, hash: []byte) {
 	for i = 0; i < DIGEST_SIZE / 4; i += 1 {
 		endian.unchecked_put_u32be(hash[i * 4:], ctx.state[i])
 	}
+}
+
+// clone clones the Context other into ctx.
+clone :: proc(ctx, other: ^$T) {
+	ctx^ = other^
+}
+
+// reset sanitizes the Context.  The Context must be re-initialized to
+// be used again.
+reset :: proc(ctx: ^$T) {
+	if !ctx.is_initialized {
+		return
+	}
 
-	ctx.is_initialized = false
+	mem.zero_explicit(ctx, size_of(ctx^))
 }
 
 /*
     SHA1 implementation
 */
 
-BLOCK_SIZE :: 64
-
-Context :: struct {
-	data:    [BLOCK_SIZE]byte,
-	datalen: u32,
-	bitlen:  u64,
-	state:   [5]u32,
-	k:       [4]u32,
-
-	is_initialized: bool,
-}
-
 @(private)
 transform :: proc "contextless" (ctx: ^Context, data: []byte) {
 	a, b, c, d, e, i, t: u32

+ 0 - 4
core/crypto/poly1305/poly1305.odin

@@ -23,10 +23,6 @@ verify :: proc (tag, msg, key: []byte) -> bool {
 	ctx: Context = ---
 	derived_tag: [16]byte = ---
 
-	if len(tag) != TAG_SIZE {
-		panic("crypto/poly1305: invalid tag size")
-	}
-
 	init(&ctx, key)
 	update(&ctx, msg)
 	final(&ctx, derived_tag[:])

+ 88 - 425
core/crypto/sha2/sha2.odin

@@ -1,3 +1,10 @@
+/*
+package sha2 implements the SHA2 hash algorithm family.
+
+See:
+- https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.180-4.pdf
+- https://datatracker.ietf.org/doc/html/rfc3874
+*/
 package sha2
 
 /*
@@ -6,431 +13,83 @@ package sha2
 
     List of contributors:
         zhibog, dotbmp:  Initial implementation.
-
-    Implementation of the SHA2 hashing algorithm, as defined in <https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.180-4.pdf>
-    and in RFC 3874 <https://datatracker.ietf.org/doc/html/rfc3874>
 */
 
 import "core:encoding/endian"
-import "core:io"
 import "core:math/bits"
-import "core:os"
-
-/*
-    High level API
-*/
+import "core:mem"
 
+// DIGEST_SIZE_224 is the SHA-224 digest size in bytes.
 DIGEST_SIZE_224 :: 28
+// DIGEST_SIZE_256 is the SHA-256 digest size in bytes.
 DIGEST_SIZE_256 :: 32
+// DIGEST_SIZE_384 is the SHA-384 digest size in bytes.
 DIGEST_SIZE_384 :: 48
+// DIGEST_SIZE_512 is the SHA-512 digest size in bytes.
 DIGEST_SIZE_512 :: 64
+// DIGEST_SIZE_512_256 is the SHA-512/256 digest size in bytes.
 DIGEST_SIZE_512_256 :: 32
 
-// hash_string_224 will hash the given input and return the
-// computed hash
-hash_string_224 :: proc(data: string) -> [DIGEST_SIZE_224]byte {
-	return hash_bytes_224(transmute([]byte)(data))
-}
-
-// hash_bytes_224 will hash the given input and return the
-// computed hash
-hash_bytes_224 :: proc(data: []byte) -> [DIGEST_SIZE_224]byte {
-	hash: [DIGEST_SIZE_224]byte
-	ctx: Context_256
-	ctx.md_bits = 224
-	init(&ctx)
-	update(&ctx, data)
-	final(&ctx, hash[:])
-	return hash
-}
-
-// hash_string_to_buffer_224 will hash the given input and assign the
-// computed hash to the second parameter.
-// It requires that the destination buffer is at least as big as the digest size
-hash_string_to_buffer_224 :: proc(data: string, hash: []byte) {
-	hash_bytes_to_buffer_224(transmute([]byte)(data), hash)
-}
-
-// hash_bytes_to_buffer_224 will hash the given input and write the
-// computed hash into the second parameter.
-// It requires that the destination buffer is at least as big as the digest size
-hash_bytes_to_buffer_224 :: proc(data, hash: []byte) {
-	ctx: Context_256
-	ctx.md_bits = 224
-	init(&ctx)
-	update(&ctx, data)
-	final(&ctx, hash)
-}
-
-// hash_stream_224 will read the stream in chunks and compute a
-// hash from its contents
-hash_stream_224 :: proc(s: io.Stream) -> ([DIGEST_SIZE_224]byte, bool) {
-	hash: [DIGEST_SIZE_224]byte
-	ctx: Context_256
-	ctx.md_bits = 224
-	init(&ctx)
-
-	buf := make([]byte, 512)
-	defer delete(buf)
+// BLOCK_SIZE_256 is the SHA-224 and SHA-256 block size in bytes.
+BLOCK_SIZE_256 :: 64
+// BLOCK_SIZE_512 is the SHA-384, SHA-512, and SHA-512/256 block size
+// in bytes.
+BLOCK_SIZE_512 :: 128
 
-	read := 1
-	for read > 0 {
-		read, _ = io.read(s, buf)
-		if read > 0 {
-			update(&ctx, buf[:read])
-		}
-	}
-	final(&ctx, hash[:])
-	return hash, true
-}
-
-// hash_file_224 will read the file provided by the given handle
-// and compute a hash
-hash_file_224 :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE_224]byte, bool) {
-	if !load_at_once {
-		return hash_stream_224(os.stream_from_handle(hd))
-	} else {
-		if buf, ok := os.read_entire_file(hd); ok {
-			return hash_bytes_224(buf[:]), ok
-		}
-	}
-	return [DIGEST_SIZE_224]byte{}, false
-}
-
-hash_224 :: proc {
-	hash_stream_224,
-	hash_file_224,
-	hash_bytes_224,
-	hash_string_224,
-	hash_bytes_to_buffer_224,
-	hash_string_to_buffer_224,
-}
+// Context_256 is a SHA-224 or SHA-256 instance.
+Context_256 :: struct {
+	block:     [BLOCK_SIZE_256]byte,
+	h:         [8]u32,
+	bitlength: u64,
+	length:    u64,
+	md_bits:   int,
 
-// hash_string_256 will hash the given input and return the
-// computed hash
-hash_string_256 :: proc(data: string) -> [DIGEST_SIZE_256]byte {
-	return hash_bytes_256(transmute([]byte)(data))
+	is_initialized: bool,
 }
 
-// hash_bytes_256 will hash the given input and return the
-// computed hash
-hash_bytes_256 :: proc(data: []byte) -> [DIGEST_SIZE_256]byte {
-	hash: [DIGEST_SIZE_256]byte
-	ctx: Context_256
-	ctx.md_bits = 256
-	init(&ctx)
-	update(&ctx, data)
-	final(&ctx, hash[:])
-	return hash
-}
+// Context_512 is a SHA-384, SHA-512 or SHA-512/256 instance.
+Context_512 :: struct {
+	block:     [BLOCK_SIZE_512]byte,
+	h:         [8]u64,
+	bitlength: u64,
+	length:    u64,
+	md_bits:   int,
 
-// hash_string_to_buffer_256 will hash the given input and assign the
-// computed hash to the second parameter.
-// It requires that the destination buffer is at least as big as the digest size
-hash_string_to_buffer_256 :: proc(data: string, hash: []byte) {
-	hash_bytes_to_buffer_256(transmute([]byte)(data), hash)
+	is_initialized: bool,
 }
 
-// hash_bytes_to_buffer_256 will hash the given input and write the
-// computed hash into the second parameter.
-// It requires that the destination buffer is at least as big as the digest size
-hash_bytes_to_buffer_256 :: proc(data, hash: []byte) {
-	ctx: Context_256
-	ctx.md_bits = 256
-	init(&ctx)
-	update(&ctx, data)
-	final(&ctx, hash)
+// init_224 initializes a Context_256 for SHA-224.
+init_224 :: proc(ctx: ^Context_256) {
+	ctx.md_bits = 224
+	_init(ctx)
 }
 
-// hash_stream_256 will read the stream in chunks and compute a
-// hash from its contents
-hash_stream_256 :: proc(s: io.Stream) -> ([DIGEST_SIZE_256]byte, bool) {
-	hash: [DIGEST_SIZE_256]byte
-	ctx: Context_256
+// init_256 initializes a Context_256 for SHA-256.
+init_256 :: proc(ctx: ^Context_256) {
 	ctx.md_bits = 256
-	init(&ctx)
-
-	buf := make([]byte, 512)
-	defer delete(buf)
-
-	read := 1
-	for read > 0 {
-		read, _ = io.read(s, buf)
-		if read > 0 {
-			update(&ctx, buf[:read])
-		}
-	}
-	final(&ctx, hash[:])
-	return hash, true
+	_init(ctx)
 }
 
-// hash_file_256 will read the file provided by the given handle
-// and compute a hash
-hash_file_256 :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE_256]byte, bool) {
-	if !load_at_once {
-		return hash_stream_256(os.stream_from_handle(hd))
-	} else {
-		if buf, ok := os.read_entire_file(hd); ok {
-			return hash_bytes_256(buf[:]), ok
-		}
-	}
-	return [DIGEST_SIZE_256]byte{}, false
-}
-
-hash_256 :: proc {
-	hash_stream_256,
-	hash_file_256,
-	hash_bytes_256,
-	hash_string_256,
-	hash_bytes_to_buffer_256,
-	hash_string_to_buffer_256,
-}
-
-// hash_string_384 will hash the given input and return the
-// computed hash
-hash_string_384 :: proc(data: string) -> [DIGEST_SIZE_384]byte {
-	return hash_bytes_384(transmute([]byte)(data))
-}
-
-// hash_bytes_384 will hash the given input and return the
-// computed hash
-hash_bytes_384 :: proc(data: []byte) -> [DIGEST_SIZE_384]byte {
-	hash: [DIGEST_SIZE_384]byte
-	ctx: Context_512
-	ctx.md_bits = 384
-	init(&ctx)
-	update(&ctx, data)
-	final(&ctx, hash[:])
-	return hash
-}
-
-// hash_string_to_buffer_384 will hash the given input and assign the
-// computed hash to the second parameter.
-// It requires that the destination buffer is at least as big as the digest size
-hash_string_to_buffer_384 :: proc(data: string, hash: []byte) {
-	hash_bytes_to_buffer_384(transmute([]byte)(data), hash)
-}
-
-// hash_bytes_to_buffer_384 will hash the given input and write the
-// computed hash into the second parameter.
-// It requires that the destination buffer is at least as big as the digest size
-hash_bytes_to_buffer_384 :: proc(data, hash: []byte) {
-	ctx: Context_512
-	ctx.md_bits = 384
-	init(&ctx)
-	update(&ctx, data)
-	final(&ctx, hash)
-}
-
-// hash_stream_384 will read the stream in chunks and compute a
-// hash from its contents
-hash_stream_384 :: proc(s: io.Stream) -> ([DIGEST_SIZE_384]byte, bool) {
-	hash: [DIGEST_SIZE_384]byte
-	ctx: Context_512
+// init_384 initializes a Context_512 for SHA-384.
+init_384 :: proc(ctx: ^Context_512) {
 	ctx.md_bits = 384
-	init(&ctx)
-
-	buf := make([]byte, 512)
-	defer delete(buf)
-
-	read := 1
-	for read > 0 {
-		read, _ = io.read(s, buf)
-		if read > 0 {
-			update(&ctx, buf[:read])
-		}
-	}
-	final(&ctx, hash[:])
-	return hash, true
-}
-
-// hash_file_384 will read the file provided by the given handle
-// and compute a hash
-hash_file_384 :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE_384]byte, bool) {
-	if !load_at_once {
-		return hash_stream_384(os.stream_from_handle(hd))
-	} else {
-		if buf, ok := os.read_entire_file(hd); ok {
-			return hash_bytes_384(buf[:]), ok
-		}
-	}
-	return [DIGEST_SIZE_384]byte{}, false
+	_init(ctx)
 }
 
-hash_384 :: proc {
-	hash_stream_384,
-	hash_file_384,
-	hash_bytes_384,
-	hash_string_384,
-	hash_bytes_to_buffer_384,
-	hash_string_to_buffer_384,
-}
-
-// hash_string_512 will hash the given input and return the
-// computed hash
-hash_string_512 :: proc(data: string) -> [DIGEST_SIZE_512]byte {
-	return hash_bytes_512(transmute([]byte)(data))
-}
-
-// hash_bytes_512 will hash the given input and return the
-// computed hash
-hash_bytes_512 :: proc(data: []byte) -> [DIGEST_SIZE_512]byte {
-	hash: [DIGEST_SIZE_512]byte
-	ctx: Context_512
-	ctx.md_bits = 512
-	init(&ctx)
-	update(&ctx, data)
-	final(&ctx, hash[:])
-	return hash
-}
-
-// hash_string_to_buffer_512 will hash the given input and assign the
-// computed hash to the second parameter.
-// It requires that the destination buffer is at least as big as the digest size
-hash_string_to_buffer_512 :: proc(data: string, hash: []byte) {
-	hash_bytes_to_buffer_512(transmute([]byte)(data), hash)
-}
-
-// hash_bytes_to_buffer_512 will hash the given input and write the
-// computed hash into the second parameter.
-// It requires that the destination buffer is at least as big as the digest size
-hash_bytes_to_buffer_512 :: proc(data, hash: []byte) {
-	ctx: Context_512
+// init_512 initializes a Context_512 for SHA-512.
+init_512 :: proc(ctx: ^Context_512) {
 	ctx.md_bits = 512
-	init(&ctx)
-	update(&ctx, data)
-	final(&ctx, hash)
+	_init(ctx)
 }
 
-// hash_stream_512 will read the stream in chunks and compute a
-// hash from its contents
-hash_stream_512 :: proc(s: io.Stream) -> ([DIGEST_SIZE_512]byte, bool) {
-	hash: [DIGEST_SIZE_512]byte
-	ctx: Context_512
-	ctx.md_bits = 512
-	init(&ctx)
-
-	buf := make([]byte, 512)
-	defer delete(buf)
-
-	read := 1
-	for read > 0 {
-		read, _ = io.read(s, buf)
-		if read > 0 {
-			update(&ctx, buf[:read])
-		}
-	}
-	final(&ctx, hash[:])
-	return hash, true
-}
-
-// hash_file_512 will read the file provided by the given handle
-// and compute a hash
-hash_file_512 :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE_512]byte, bool) {
-	if !load_at_once {
-		return hash_stream_512(os.stream_from_handle(hd))
-	} else {
-		if buf, ok := os.read_entire_file(hd); ok {
-			return hash_bytes_512(buf[:]), ok
-		}
-	}
-	return [DIGEST_SIZE_512]byte{}, false
-}
-
-hash_512 :: proc {
-	hash_stream_512,
-	hash_file_512,
-	hash_bytes_512,
-	hash_string_512,
-	hash_bytes_to_buffer_512,
-	hash_string_to_buffer_512,
-}
-
-// hash_string_512_256 will hash the given input and return the
-// computed hash
-hash_string_512_256 :: proc(data: string) -> [DIGEST_SIZE_512_256]byte {
-	return hash_bytes_512_256(transmute([]byte)(data))
-}
-
-// hash_bytes_512_256 will hash the given input and return the
-// computed hash
-hash_bytes_512_256 :: proc(data: []byte) -> [DIGEST_SIZE_512_256]byte {
-	hash: [DIGEST_SIZE_512_256]byte
-	ctx: Context_512
-	ctx.md_bits = 256
-	init(&ctx)
-	update(&ctx, data)
-	final(&ctx, hash[:])
-	return hash
-}
-
-// hash_string_to_buffer_512_256 will hash the given input and assign the
-// computed hash to the second parameter.
-// It requires that the destination buffer is at least as big as the digest size
-hash_string_to_buffer_512_256 :: proc(data: string, hash: []byte) {
-	hash_bytes_to_buffer_512_256(transmute([]byte)(data), hash)
-}
-
-// hash_bytes_to_buffer_512_256 will hash the given input and write the
-// computed hash into the second parameter.
-// It requires that the destination buffer is at least as big as the digest size
-hash_bytes_to_buffer_512_256 :: proc(data, hash: []byte) {
-	ctx: Context_512
-	ctx.md_bits = 256
-	init(&ctx)
-	update(&ctx, data)
-	final(&ctx, hash)
-}
-
-// hash_stream_512_256 will read the stream in chunks and compute a
-// hash from its contents
-hash_stream_512_256 :: proc(s: io.Stream) -> ([DIGEST_SIZE_512_256]byte, bool) {
-	hash: [DIGEST_SIZE_512_256]byte
-	ctx: Context_512
+// init_512_256 initializes a Context_512 for SHA-512/256.
+init_512_256 :: proc(ctx: ^Context_512) {
 	ctx.md_bits = 256
-	init(&ctx)
-
-	buf := make([]byte, 512)
-	defer delete(buf)
-
-	read := 1
-	for read > 0 {
-		read, _ = io.read(s, buf)
-		if read > 0 {
-			update(&ctx, buf[:read])
-		}
-	}
-	final(&ctx, hash[:])
-	return hash, true
+	_init(ctx)
 }
 
-// hash_file_512_256 will read the file provided by the given handle
-// and compute a hash
-hash_file_512_256 :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE_512_256]byte, bool) {
-	if !load_at_once {
-		return hash_stream_512_256(os.stream_from_handle(hd))
-	} else {
-		if buf, ok := os.read_entire_file(hd); ok {
-			return hash_bytes_512_256(buf[:]), ok
-		}
-	}
-	return [DIGEST_SIZE_512_256]byte{}, false
-}
-
-hash_512_256 :: proc {
-	hash_stream_512_256,
-	hash_file_512_256,
-	hash_bytes_512_256,
-	hash_string_512_256,
-	hash_bytes_to_buffer_512_256,
-	hash_string_to_buffer_512_256,
-}
-
-/*
-    Low level API
-*/
-
-init :: proc(ctx: ^$T) {
+@(private)
+_init :: proc(ctx: ^$T) {
 	when T == Context_256 {
 		switch ctx.md_bits {
 		case 224:
@@ -497,13 +156,14 @@ init :: proc(ctx: ^$T) {
 	ctx.is_initialized = true
 }
 
+// update adds more data to the Context.
 update :: proc(ctx: ^$T, data: []byte) {
 	assert(ctx.is_initialized)
 
 	when T == Context_256 {
-		CURR_BLOCK_SIZE :: SHA256_BLOCK_SIZE
+		CURR_BLOCK_SIZE :: BLOCK_SIZE_256
 	} else when T == Context_512 {
-		CURR_BLOCK_SIZE :: SHA512_BLOCK_SIZE
+		CURR_BLOCK_SIZE :: BLOCK_SIZE_512
 	}
 
 	data := data
@@ -528,21 +188,34 @@ update :: proc(ctx: ^$T, data: []byte) {
 	}
 }
 
-final :: proc(ctx: ^$T, hash: []byte) {
+// final finalizes the Context, writes the digest to hash, and calls
+// reset on the Context.
+//
+// Iff finalize_clone is set, final will work on a copy of the Context,
+// which is useful for for calculating rolling digests.
+final :: proc(ctx: ^$T, hash: []byte, finalize_clone: bool = false) {
 	assert(ctx.is_initialized)
 
 	if len(hash) * 8 < ctx.md_bits {
 		panic("crypto/sha2: invalid destination digest size")
 	}
 
+	ctx := ctx
+	if finalize_clone {
+		tmp_ctx: T
+		clone(&tmp_ctx, ctx)
+		ctx = &tmp_ctx
+	}
+	defer(reset(ctx))
+
 	length := ctx.length
 
-	raw_pad: [SHA512_BLOCK_SIZE]byte
+	raw_pad: [BLOCK_SIZE_512]byte
 	when T == Context_256 {
-		CURR_BLOCK_SIZE :: SHA256_BLOCK_SIZE
+		CURR_BLOCK_SIZE :: BLOCK_SIZE_256
 		pm_len := 8 // 64-bits for length
 	} else when T == Context_512 {
-		CURR_BLOCK_SIZE :: SHA512_BLOCK_SIZE
+		CURR_BLOCK_SIZE :: BLOCK_SIZE_512
 		pm_len := 16 // 128-bits for length
 	}
 	pad := raw_pad[:CURR_BLOCK_SIZE]
@@ -576,37 +249,27 @@ final :: proc(ctx: ^$T, hash: []byte) {
 			endian.unchecked_put_u64be(hash[i * 8:], ctx.h[i])
 		}
 	}
-
-	ctx.is_initialized = false
 }
 
-/*
-    SHA2 implementation
-*/
-
-SHA256_BLOCK_SIZE :: 64
-SHA512_BLOCK_SIZE :: 128
-
-Context_256 :: struct {
-	block:     [SHA256_BLOCK_SIZE]byte,
-	h:         [8]u32,
-	bitlength: u64,
-	length:    u64,
-	md_bits:   int,
-
-	is_initialized: bool,
+// clone clones the Context other into ctx.
+clone :: proc(ctx, other: ^$T) {
+	ctx^ = other^
 }
 
-Context_512 :: struct {
-	block:     [SHA512_BLOCK_SIZE]byte,
-	h:         [8]u64,
-	bitlength: u64,
-	length:    u64,
-	md_bits:   int,
+// reset sanitizes the Context.  The Context must be re-initialized to
+// be used again.
+reset :: proc(ctx: ^$T) {
+	if !ctx.is_initialized {
+		return
+	}
 
-	is_initialized: bool,
+	mem.zero_explicit(ctx, size_of(ctx^))
 }
 
+/*
+    SHA2 implementation
+*/
+
 @(private)
 sha256_k := [64]u32 {
 	0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5,
@@ -737,12 +400,12 @@ sha2_transf :: proc "contextless" (ctx: ^$T, data: []byte) {
 		w: [64]u32
 		wv: [8]u32
 		t1, t2: u32
-		CURR_BLOCK_SIZE :: SHA256_BLOCK_SIZE
+		CURR_BLOCK_SIZE :: BLOCK_SIZE_256
 	} else when T == Context_512 {
 		w: [80]u64
 		wv: [8]u64
 		t1, t2: u64
-		CURR_BLOCK_SIZE :: SHA512_BLOCK_SIZE
+		CURR_BLOCK_SIZE :: BLOCK_SIZE_512
 	}
 
 	data := data

+ 56 - 324
core/crypto/sha3/sha3.odin

@@ -1,3 +1,13 @@
+/*
+package sha3 implements the SHA3 hash algorithm family.
+
+The SHAKE XOF can be found in crypto/shake.  While discouraged if the
+pre-standardization Keccak algorithm is required, it can be found in
+crypto/legacy/keccak.
+
+See:
+- https://nvlpubs.nist.gov/nistpubs/fips/nist.fips.202.pdf
+*/
 package sha3
 
 /*
@@ -6,359 +16,81 @@ package sha3
 
     List of contributors:
         zhibog, dotbmp:  Initial implementation.
-
-    Interface for the SHA3 hashing algorithm. The SHAKE functionality can be found in package shake.
-    If you wish to compute a Keccak hash, you can use the keccak package, it will use the original padding.
 */
 
-import "core:io"
-import "core:os"
-
 import "../_sha3"
 
-/*
-    High level API
-*/
-
+// DIGEST_SIZE_224 is the SHA3-224 digest size.
 DIGEST_SIZE_224 :: 28
+// DIGEST_SIZE_256 is the SHA3-256 digest size.
 DIGEST_SIZE_256 :: 32
+// DIGEST_SIZE_384 is the SHA3-384 digest size.
 DIGEST_SIZE_384 :: 48
+// DIGEST_SIZE_512 is the SHA3-512 digest size.
 DIGEST_SIZE_512 :: 64
 
-// hash_string_224 will hash the given input and return the
-// computed hash
-hash_string_224 :: proc(data: string) -> [DIGEST_SIZE_224]byte {
-	return hash_bytes_224(transmute([]byte)(data))
-}
+// BLOCK_SIZE_224 is the SHA3-224 block size in bytes.
+BLOCK_SIZE_224 :: _sha3.RATE_224
+// BLOCK_SIZE_256 is the SHA3-256 block size in bytes.
+BLOCK_SIZE_256 :: _sha3.RATE_256
+// BLOCK_SIZE_384 is the SHA3-384 block size in bytes.
+BLOCK_SIZE_384 :: _sha3.RATE_384
+// BLOCK_SIZE_512 is the SHA3-512 block size in bytes.
+BLOCK_SIZE_512 :: _sha3.RATE_512
 
-// hash_bytes_224 will hash the given input and return the
-// computed hash
-hash_bytes_224 :: proc(data: []byte) -> [DIGEST_SIZE_224]byte {
-	hash: [DIGEST_SIZE_224]byte
-	ctx: Context
-	ctx.mdlen = DIGEST_SIZE_224
-	init(&ctx)
-	update(&ctx, data)
-	final(&ctx, hash[:])
-	return hash
-}
+// Context is a SHA3 instance.
+Context :: distinct _sha3.Context
 
-// hash_string_to_buffer_224 will hash the given input and assign the
-// computed hash to the second parameter.
-// It requires that the destination buffer is at least as big as the digest size
-hash_string_to_buffer_224 :: proc(data: string, hash: []byte) {
-	hash_bytes_to_buffer_224(transmute([]byte)(data), hash)
-}
-
-// hash_bytes_to_buffer_224 will hash the given input and write the
-// computed hash into the second parameter.
-// It requires that the destination buffer is at least as big as the digest size
-hash_bytes_to_buffer_224 :: proc(data, hash: []byte) {
-	ctx: Context
+// init_224 initializes a Context for SHA3-224.
+init_224 :: proc(ctx: ^Context) {
 	ctx.mdlen = DIGEST_SIZE_224
-	init(&ctx)
-	update(&ctx, data)
-	final(&ctx, hash)
-}
-
-// hash_stream_224 will read the stream in chunks and compute a
-// hash from its contents
-hash_stream_224 :: proc(s: io.Stream) -> ([DIGEST_SIZE_224]byte, bool) {
-	hash: [DIGEST_SIZE_224]byte
-	ctx: Context
-	ctx.mdlen = DIGEST_SIZE_224
-	init(&ctx)
-
-	buf := make([]byte, 512)
-	defer delete(buf)
-
-	read := 1
-	for read > 0 {
-		read, _ = io.read(s, buf)
-		if read > 0 {
-			update(&ctx, buf[:read])
-		}
-	}
-	final(&ctx, hash[:])
-	return hash, true
+	_init(ctx)
 }
 
-// hash_file_224 will read the file provided by the given handle
-// and compute a hash
-hash_file_224 :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE_224]byte, bool) {
-	if !load_at_once {
-		return hash_stream_224(os.stream_from_handle(hd))
-	} else {
-		if buf, ok := os.read_entire_file(hd); ok {
-			return hash_bytes_224(buf[:]), ok
-		}
-	}
-	return [DIGEST_SIZE_224]byte{}, false
-}
-
-hash_224 :: proc {
-	hash_stream_224,
-	hash_file_224,
-	hash_bytes_224,
-	hash_string_224,
-	hash_bytes_to_buffer_224,
-	hash_string_to_buffer_224,
-}
-
-// hash_string_256 will hash the given input and return the
-// computed hash
-hash_string_256 :: proc(data: string) -> [DIGEST_SIZE_256]byte {
-	return hash_bytes_256(transmute([]byte)(data))
-}
-
-// hash_bytes_256 will hash the given input and return the
-// computed hash
-hash_bytes_256 :: proc(data: []byte) -> [DIGEST_SIZE_256]byte {
-	hash: [DIGEST_SIZE_256]byte
-	ctx: Context
-	ctx.mdlen = DIGEST_SIZE_256
-	init(&ctx)
-	update(&ctx, data)
-	final(&ctx, hash[:])
-	return hash
-}
-
-// hash_string_to_buffer_256 will hash the given input and assign the
-// computed hash to the second parameter.
-// It requires that the destination buffer is at least as big as the digest size
-hash_string_to_buffer_256 :: proc(data: string, hash: []byte) {
-	hash_bytes_to_buffer_256(transmute([]byte)(data), hash)
-}
-
-// hash_bytes_to_buffer_256 will hash the given input and write the
-// computed hash into the second parameter.
-// It requires that the destination buffer is at least as big as the digest size
-hash_bytes_to_buffer_256 :: proc(data, hash: []byte) {
-	ctx: Context
-	ctx.mdlen = DIGEST_SIZE_256
-	init(&ctx)
-	update(&ctx, data)
-	final(&ctx, hash)
-}
-
-// hash_stream_256 will read the stream in chunks and compute a
-// hash from its contents
-hash_stream_256 :: proc(s: io.Stream) -> ([DIGEST_SIZE_256]byte, bool) {
-	hash: [DIGEST_SIZE_256]byte
-	ctx: Context
+// init_256 initializes a Context for SHA3-256.
+init_256 :: proc(ctx: ^Context) {
 	ctx.mdlen = DIGEST_SIZE_256
-	init(&ctx)
-
-	buf := make([]byte, 512)
-	defer delete(buf)
-
-	read := 1
-	for read > 0 {
-		read, _ = io.read(s, buf)
-		if read > 0 {
-			update(&ctx, buf[:read])
-		}
-	}
-	final(&ctx, hash[:])
-	return hash, true
-}
-
-// hash_file_256 will read the file provided by the given handle
-// and compute a hash
-hash_file_256 :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE_256]byte, bool) {
-	if !load_at_once {
-		return hash_stream_256(os.stream_from_handle(hd))
-	} else {
-		if buf, ok := os.read_entire_file(hd); ok {
-			return hash_bytes_256(buf[:]), ok
-		}
-	}
-	return [DIGEST_SIZE_256]byte{}, false
+	_init(ctx)
 }
 
-hash_256 :: proc {
-	hash_stream_256,
-	hash_file_256,
-	hash_bytes_256,
-	hash_string_256,
-	hash_bytes_to_buffer_256,
-	hash_string_to_buffer_256,
-}
-
-// hash_string_384 will hash the given input and return the
-// computed hash
-hash_string_384 :: proc(data: string) -> [DIGEST_SIZE_384]byte {
-	return hash_bytes_384(transmute([]byte)(data))
-}
-
-// hash_bytes_384 will hash the given input and return the
-// computed hash
-hash_bytes_384 :: proc(data: []byte) -> [DIGEST_SIZE_384]byte {
-	hash: [DIGEST_SIZE_384]byte
-	ctx: Context
-	ctx.mdlen = DIGEST_SIZE_384
-	init(&ctx)
-	update(&ctx, data)
-	final(&ctx, hash[:])
-	return hash
-}
-
-// hash_string_to_buffer_384 will hash the given input and assign the
-// computed hash to the second parameter.
-// It requires that the destination buffer is at least as big as the digest size
-hash_string_to_buffer_384 :: proc(data: string, hash: []byte) {
-	hash_bytes_to_buffer_384(transmute([]byte)(data), hash)
-}
-
-// hash_bytes_to_buffer_384 will hash the given input and write the
-// computed hash into the second parameter.
-// It requires that the destination buffer is at least as big as the digest size
-hash_bytes_to_buffer_384 :: proc(data, hash: []byte) {
-	ctx: Context
+// init_384 initializes a Context for SHA3-384.
+init_384 :: proc(ctx: ^Context) {
 	ctx.mdlen = DIGEST_SIZE_384
-	init(&ctx)
-	update(&ctx, data)
-	final(&ctx, hash)
-}
-
-// hash_stream_384 will read the stream in chunks and compute a
-// hash from its contents
-hash_stream_384 :: proc(s: io.Stream) -> ([DIGEST_SIZE_384]byte, bool) {
-	hash: [DIGEST_SIZE_384]byte
-	ctx: Context
-	ctx.mdlen = DIGEST_SIZE_384
-	init(&ctx)
-
-	buf := make([]byte, 512)
-	defer delete(buf)
-
-	read := 1
-	for read > 0 {
-		read, _ = io.read(s, buf)
-		if read > 0 {
-			update(&ctx, buf[:read])
-		}
-	}
-	final(&ctx, hash[:])
-	return hash, true
-}
-
-// hash_file_384 will read the file provided by the given handle
-// and compute a hash
-hash_file_384 :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE_384]byte, bool) {
-	if !load_at_once {
-		return hash_stream_384(os.stream_from_handle(hd))
-	} else {
-		if buf, ok := os.read_entire_file(hd); ok {
-			return hash_bytes_384(buf[:]), ok
-		}
-	}
-	return [DIGEST_SIZE_384]byte{}, false
-}
-
-hash_384 :: proc {
-	hash_stream_384,
-	hash_file_384,
-	hash_bytes_384,
-	hash_string_384,
-	hash_bytes_to_buffer_384,
-	hash_string_to_buffer_384,
+	_init(ctx)
 }
 
-// hash_string_512 will hash the given input and return the
-// computed hash
-hash_string_512 :: proc(data: string) -> [DIGEST_SIZE_512]byte {
-	return hash_bytes_512(transmute([]byte)(data))
-}
-
-// hash_bytes_512 will hash the given input and return the
-// computed hash
-hash_bytes_512 :: proc(data: []byte) -> [DIGEST_SIZE_512]byte {
-	hash: [DIGEST_SIZE_512]byte
-	ctx: Context
-	ctx.mdlen = DIGEST_SIZE_512
-	init(&ctx)
-	update(&ctx, data)
-	final(&ctx, hash[:])
-	return hash
-}
-
-// hash_string_to_buffer_512 will hash the given input and assign the
-// computed hash to the second parameter.
-// It requires that the destination buffer is at least as big as the digest size
-hash_string_to_buffer_512 :: proc(data: string, hash: []byte) {
-	hash_bytes_to_buffer_512(transmute([]byte)(data), hash)
-}
-
-// hash_bytes_to_buffer_512 will hash the given input and write the
-// computed hash into the second parameter.
-// It requires that the destination buffer is at least as big as the digest size
-hash_bytes_to_buffer_512 :: proc(data, hash: []byte) {
-	ctx: Context
-	ctx.mdlen = DIGEST_SIZE_512
-	init(&ctx)
-	update(&ctx, data)
-	final(&ctx, hash)
-}
-
-// hash_stream_512 will read the stream in chunks and compute a
-// hash from its contents
-hash_stream_512 :: proc(s: io.Stream) -> ([DIGEST_SIZE_512]byte, bool) {
-	hash: [DIGEST_SIZE_512]byte
-	ctx: Context
+// init_512 initializes a Context for SHA3-512.
+init_512 :: proc(ctx: ^Context) {
 	ctx.mdlen = DIGEST_SIZE_512
-	init(&ctx)
-
-	buf := make([]byte, 512)
-	defer delete(buf)
-
-	read := 1
-	for read > 0 {
-		read, _ = io.read(s, buf)
-		if read > 0 {
-			update(&ctx, buf[:read])
-		}
-	}
-	final(&ctx, hash[:])
-	return hash, true
+	_init(ctx)
 }
 
-// hash_file_512 will read the file provided by the given handle
-// and compute a hash
-hash_file_512 :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE_512]byte, bool) {
-	if !load_at_once {
-		return hash_stream_512(os.stream_from_handle(hd))
-	} else {
-		if buf, ok := os.read_entire_file(hd); ok {
-			return hash_bytes_512(buf[:]), ok
-		}
-	}
-	return [DIGEST_SIZE_512]byte{}, false
+@(private)
+_init :: proc(ctx: ^Context) {
+	_sha3.init(transmute(^_sha3.Context)(ctx))
 }
 
-hash_512 :: proc {
-	hash_stream_512,
-	hash_file_512,
-	hash_bytes_512,
-	hash_string_512,
-	hash_bytes_to_buffer_512,
-	hash_string_to_buffer_512,
+// update adds more data to the Context.
+update :: proc(ctx: ^Context, data: []byte) {
+	_sha3.update(transmute(^_sha3.Context)(ctx), data)
 }
 
-/*
-    Low level API
-*/
-
-Context :: _sha3.Sha3_Context
-
-init :: proc(ctx: ^Context) {
-	_sha3.init(ctx)
+// final finalizes the Context, writes the digest to hash, and calls
+// reset on the Context.
+//
+// Iff finalize_clone is set, final will work on a copy of the Context,
+// which is useful for for calculating rolling digests.
+final :: proc(ctx: ^Context, hash: []byte, finalize_clone: bool = false) {
+	_sha3.final(transmute(^_sha3.Context)(ctx), hash, finalize_clone)
 }
 
-update :: proc(ctx: ^Context, data: []byte) {
-	_sha3.update(ctx, data)
+// clone clones the Context other into ctx.
+clone :: proc(ctx, other: ^Context) {
+	_sha3.clone(transmute(^_sha3.Context)(ctx), transmute(^_sha3.Context)(other))
 }
 
-final :: proc(ctx: ^Context, hash: []byte) {
-	_sha3.final(ctx, hash)
+// reset sanitizes the Context.  The Context must be re-initialized to
+// be used again.
+reset :: proc(ctx: ^Context) {
+	_sha3.reset(transmute(^_sha3.Context)(ctx))
 }

+ 40 - 178
core/crypto/shake/shake.odin

@@ -1,3 +1,11 @@
+/*
+package shake implements the SHAKE XOF algorithm family.
+
+The SHA3 hash algorithm can be found in the crypto/sha3.
+
+See:
+- https://nvlpubs.nist.gov/nistpubs/fips/nist.fips.202.pdf
+*/
 package shake
 
 /*
@@ -6,201 +14,55 @@ package shake
 
     List of contributors:
         zhibog, dotbmp:  Initial implementation.
-
-    Interface for the SHAKE hashing algorithm.
-    The SHA3 functionality can be found in package sha3.
-
-    TODO: This should provide an incremental squeeze interface, in addition
-    to the one-shot final call.
 */
 
-import "core:io"
-import "core:os"
-
 import "../_sha3"
 
-/*
-    High level API
-*/
-
-DIGEST_SIZE_128 :: 16
-DIGEST_SIZE_256 :: 32
-
-// hash_string_128 will hash the given input and return the
-// computed hash
-hash_string_128 :: proc(data: string) -> [DIGEST_SIZE_128]byte {
-	return hash_bytes_128(transmute([]byte)(data))
-}
-
-// hash_bytes_128 will hash the given input and return the
-// computed hash
-hash_bytes_128 :: proc(data: []byte) -> [DIGEST_SIZE_128]byte {
-	hash: [DIGEST_SIZE_128]byte
-	ctx: Context
-	ctx.mdlen = DIGEST_SIZE_128
-	init(&ctx)
-	update(&ctx, data)
-	final(&ctx, hash[:])
-	return hash
-}
-
-// hash_string_to_buffer_128 will hash the given input and assign the
-// computed hash to the second parameter.
-// It requires that the destination buffer is at least as big as the digest size
-hash_string_to_buffer_128 :: proc(data: string, hash: []byte) {
-	hash_bytes_to_buffer_128(transmute([]byte)(data), hash)
-}
-
-// hash_bytes_to_buffer_128 will hash the given input and write the
-// computed hash into the second parameter.
-// It requires that the destination buffer is at least as big as the digest size
-hash_bytes_to_buffer_128 :: proc(data, hash: []byte) {
-	ctx: Context
-	ctx.mdlen = DIGEST_SIZE_128
-	init(&ctx)
-	update(&ctx, data)
-	final(&ctx, hash)
-}
-
-// hash_stream_128 will read the stream in chunks and compute a
-// hash from its contents
-hash_stream_128 :: proc(s: io.Stream) -> ([DIGEST_SIZE_128]byte, bool) {
-	hash: [DIGEST_SIZE_128]byte
-	ctx: Context
-	ctx.mdlen = DIGEST_SIZE_128
-	init(&ctx)
-
-	buf := make([]byte, 512)
-	defer delete(buf)
+// Context is a SHAKE128 or SHAKE256 instance.
+Context :: distinct _sha3.Context
 
-	read := 1
-	for read > 0 {
-		read, _ = io.read(s, buf)
-		if read > 0 {
-			update(&ctx, buf[:read])
-		}
-	}
-	final(&ctx, hash[:])
-	return hash, true
-}
-
-// hash_file_128 will read the file provided by the given handle
-// and compute a hash
-hash_file_128 :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE_128]byte, bool) {
-	if !load_at_once {
-		return hash_stream_128(os.stream_from_handle(hd))
-	} else {
-		if buf, ok := os.read_entire_file(hd); ok {
-			return hash_bytes_128(buf[:]), ok
-		}
-	}
-	return [DIGEST_SIZE_128]byte{}, false
-}
-
-hash_128 :: proc {
-	hash_stream_128,
-	hash_file_128,
-	hash_bytes_128,
-	hash_string_128,
-	hash_bytes_to_buffer_128,
-	hash_string_to_buffer_128,
-}
-
-// hash_string_256 will hash the given input and return the
-// computed hash
-hash_string_256 :: proc(data: string) -> [DIGEST_SIZE_256]byte {
-	return hash_bytes_256(transmute([]byte)(data))
-}
-
-// hash_bytes_256 will hash the given input and return the
-// computed hash
-hash_bytes_256 :: proc(data: []byte) -> [DIGEST_SIZE_256]byte {
-	hash: [DIGEST_SIZE_256]byte
-	ctx: Context
-	ctx.mdlen = DIGEST_SIZE_256
-	init(&ctx)
-	update(&ctx, data)
-	final(&ctx, hash[:])
-	return hash
+// init_128 initializes a Context for SHAKE128.
+init_128 :: proc(ctx: ^Context) {
+	ctx.mdlen = 128 / 8
+	_init(ctx)
 }
 
-// hash_string_to_buffer_256 will hash the given input and assign the
-// computed hash to the second parameter.
-// It requires that the destination buffer is at least as big as the digest size
-hash_string_to_buffer_256 :: proc(data: string, hash: []byte) {
-	hash_bytes_to_buffer_256(transmute([]byte)(data), hash)
+// init_256 initializes a Context for SHAKE256.
+init_256 :: proc(ctx: ^Context) {
+	ctx.mdlen = 256 / 8
+	_init(ctx)
 }
 
-// hash_bytes_to_buffer_256 will hash the given input and write the
-// computed hash into the second parameter.
-// It requires that the destination buffer is at least as big as the digest size
-hash_bytes_to_buffer_256 :: proc(data, hash: []byte) {
-	ctx: Context
-	ctx.mdlen = DIGEST_SIZE_256
-	init(&ctx)
-	update(&ctx, data)
-	final(&ctx, hash[:])
+@(private)
+_init :: proc(ctx: ^Context) {
+	_sha3.init(transmute(^_sha3.Context)(ctx))
 }
 
-// hash_stream_256 will read the stream in chunks and compute a
-// hash from its contents
-hash_stream_256 :: proc(s: io.Stream) -> ([DIGEST_SIZE_256]byte, bool) {
-	hash: [DIGEST_SIZE_256]byte
-	ctx: Context
-	ctx.mdlen = DIGEST_SIZE_256
-	init(&ctx)
-
-	buf := make([]byte, 512)
-	defer delete(buf)
-
-	read := 1
-	for read > 0 {
-		read, _ = io.read(s, buf)
-		if read > 0 {
-			update(&ctx, buf[:read])
-		}
-	}
-	final(&ctx, hash[:])
-	return hash, true
+// write writes more data into the SHAKE instance.  This MUST not be called
+// after any reads have been done, and attempts to do so will panic.
+write :: proc(ctx: ^Context, data: []byte) {
+	_sha3.update(transmute(^_sha3.Context)(ctx), data)
 }
 
-// hash_file_256 will read the file provided by the given handle
-// and compute a hash
-hash_file_256 :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE_256]byte, bool) {
-	if !load_at_once {
-		return hash_stream_256(os.stream_from_handle(hd))
-	} else {
-		if buf, ok := os.read_entire_file(hd); ok {
-			return hash_bytes_256(buf[:]), ok
-		}
+// read reads output from the SHAKE instance.  There is no practical upper
+// limit to the amount of data that can be read from SHAKE.  After read has
+// been called one or more times, further calls to write will panic.
+read :: proc(ctx: ^Context, dst: []byte) {
+	ctx_ := transmute(^_sha3.Context)(ctx)
+	if !ctx.is_finalized {
+		_sha3.shake_xof(ctx_)
 	}
-	return [DIGEST_SIZE_256]byte{}, false
-}
-
-hash_256 :: proc {
-	hash_stream_256,
-	hash_file_256,
-	hash_bytes_256,
-	hash_string_256,
-	hash_bytes_to_buffer_256,
-	hash_string_to_buffer_256,
-}
-
-/*
-    Low level API
-*/
-
-Context :: _sha3.Sha3_Context
 
-init :: proc(ctx: ^Context) {
-	_sha3.init(ctx)
+	_sha3.shake_out(ctx_, dst)
 }
 
-update :: proc(ctx: ^Context, data: []byte) {
-	_sha3.update(ctx, data)
+// clone clones the Context other into ctx.
+clone :: proc(ctx, other: ^Context) {
+	_sha3.clone(transmute(^_sha3.Context)(ctx), transmute(^_sha3.Context)(other))
 }
 
-final :: proc(ctx: ^Context, hash: []byte) {
-	_sha3.shake_xof(ctx)
-	_sha3.shake_out(ctx, hash[:])
+// reset sanitizes the Context.  The Context must be re-initialized to
+// be used again.
+reset :: proc(ctx: ^Context) {
+	_sha3.reset(transmute(^_sha3.Context)(ctx))
 }

+ 47 - 98
core/crypto/sm3/sm3.odin

@@ -1,3 +1,9 @@
+/*
+package sm3 implements the SM3 hash algorithm.
+
+See:
+- https://datatracker.ietf.org/doc/html/draft-sca-cfrg-sm3-02
+*/
 package sm3
 
 /*
@@ -6,102 +12,29 @@ package sm3
 
     List of contributors:
         zhibog, dotbmp:  Initial implementation.
-
-    Implementation of the SM3 hashing algorithm, as defined in <https://datatracker.ietf.org/doc/html/draft-sca-cfrg-sm3-02>
 */
 
 import "core:encoding/endian"
-import "core:io"
 import "core:math/bits"
-import "core:os"
-
-/*
-    High level API
-*/
+import "core:mem"
 
+// DIGEST_SIZE is the SM3 digest size in bytes.
 DIGEST_SIZE :: 32
 
-// hash_string will hash the given input and return the
-// computed hash
-hash_string :: proc(data: string) -> [DIGEST_SIZE]byte {
-	return hash_bytes(transmute([]byte)(data))
-}
-
-// hash_bytes will hash the given input and return the
-// computed hash
-hash_bytes :: proc(data: []byte) -> [DIGEST_SIZE]byte {
-	hash: [DIGEST_SIZE]byte
-	ctx: Context
-	init(&ctx)
-	update(&ctx, data)
-	final(&ctx, hash[:])
-	return hash
-}
-
-// hash_string_to_buffer will hash the given input and assign the
-// computed hash to the second parameter.
-// It requires that the destination buffer is at least as big as the digest size
-hash_string_to_buffer :: proc(data: string, hash: []byte) {
-	hash_bytes_to_buffer(transmute([]byte)(data), hash)
-}
-
-// hash_bytes_to_buffer will hash the given input and write the
-// computed hash into the second parameter.
-// It requires that the destination buffer is at least as big as the digest size
-hash_bytes_to_buffer :: proc(data, hash: []byte) {
-	ctx: Context
-	init(&ctx)
-	update(&ctx, data)
-	final(&ctx, hash)
-}
-
-// hash_stream will read the stream in chunks and compute a
-// hash from its contents
-hash_stream :: proc(s: io.Stream) -> ([DIGEST_SIZE]byte, bool) {
-	hash: [DIGEST_SIZE]byte
-	ctx: Context
-	init(&ctx)
-
-	buf := make([]byte, 512)
-	defer delete(buf)
-
-	read := 1
-	for read > 0 {
-		read, _ = io.read(s, buf)
-		if read > 0 {
-			update(&ctx, buf[:read])
-		}
-	}
-	final(&ctx, hash[:])
-	return hash, true
-}
+// BLOCK_SIZE is the SM3 block size in bytes.
+BLOCK_SIZE :: 64
 
-// hash_file will read the file provided by the given handle
-// and compute a hash
-hash_file :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE]byte, bool) {
-	if !load_at_once {
-		return hash_stream(os.stream_from_handle(hd))
-	} else {
-		if buf, ok := os.read_entire_file(hd); ok {
-			return hash_bytes(buf[:]), ok
-		}
-	}
-	return [DIGEST_SIZE]byte{}, false
-}
+// Context is a SM3 instance.
+Context :: struct {
+	state:     [8]u32,
+	x:         [BLOCK_SIZE]byte,
+	bitlength: u64,
+	length:    u64,
 
-hash :: proc {
-	hash_stream,
-	hash_file,
-	hash_bytes,
-	hash_string,
-	hash_bytes_to_buffer,
-	hash_string_to_buffer,
+	is_initialized: bool,
 }
 
-/*
-    Low level API
-*/
-
+// init initializes a Context.
 init :: proc(ctx: ^Context) {
 	ctx.state[0] = IV[0]
 	ctx.state[1] = IV[1]
@@ -118,6 +51,7 @@ init :: proc(ctx: ^Context) {
 	ctx.is_initialized = true
 }
 
+// update adds more data to the Context.
 update :: proc(ctx: ^Context, data: []byte) {
 	assert(ctx.is_initialized)
 
@@ -143,13 +77,26 @@ update :: proc(ctx: ^Context, data: []byte) {
 	}
 }
 
-final :: proc(ctx: ^Context, hash: []byte) {
+// final finalizes the Context, writes the digest to hash, and calls
+// reset on the Context.
+//
+// Iff finalize_clone is set, final will work on a copy of the Context,
+// which is useful for for calculating rolling digests.
+final :: proc(ctx: ^Context, hash: []byte, finalize_clone: bool = false) {
 	assert(ctx.is_initialized)
 
 	if len(hash) < DIGEST_SIZE {
 		panic("crypto/sm3: invalid destination digest size")
 	}
 
+	ctx := ctx
+	if finalize_clone {
+		tmp_ctx: Context
+		clone(&tmp_ctx, ctx)
+		ctx = &tmp_ctx
+	}
+	defer(reset(ctx))
+
 	length := ctx.length
 
 	pad: [BLOCK_SIZE]byte
@@ -168,25 +115,27 @@ final :: proc(ctx: ^Context, hash: []byte) {
 	for i := 0; i < DIGEST_SIZE / 4; i += 1 {
 		endian.unchecked_put_u32be(hash[i * 4:], ctx.state[i])
 	}
+}
 
-	ctx.is_initialized = false
+// clone clones the Context other into ctx.
+clone :: proc(ctx, other: ^Context) {
+	ctx^ = other^
+}
+
+// reset sanitizes the Context.  The Context must be re-initialized to
+// be used again.
+reset :: proc(ctx: ^Context) {
+	if !ctx.is_initialized {
+		return
+	}
+
+	mem.zero_explicit(ctx, size_of(ctx^))
 }
 
 /*
     SM3 implementation
 */
 
-BLOCK_SIZE :: 64
-
-Context :: struct {
-	state:     [8]u32,
-	x:         [BLOCK_SIZE]byte,
-	bitlength: u64,
-	length:    u64,
-
-	is_initialized: bool,
-}
-
 @(private)
 IV := [8]u32 {
 	0x7380166f, 0x4914b2b9, 0x172442d7, 0xda8a0600,

+ 20 - 27
core/dynlib/lib.odin

@@ -1,8 +1,8 @@
 package dynlib
 
-import "core:intrinsics"
+import "base:intrinsics"
 import "core:reflect"
-import "core:runtime"
+import "base:runtime"
 _ :: intrinsics
 _ :: reflect
 _ :: runtime
@@ -123,40 +123,34 @@ Returns:
 
 See doc.odin for an example.
 */
-initialize_symbols :: proc(symbol_table: ^$T, library_path: string, symbol_prefix := "", handle_field_name := "__handle") -> (count: int, ok: bool) where intrinsics.type_is_struct(T) {
+initialize_symbols :: proc(
+	symbol_table: ^$T, library_path: string,
+	symbol_prefix := "", handle_field_name := "__handle",
+) -> (count: int = -1, ok: bool = false) where intrinsics.type_is_struct(T) {
 	assert(symbol_table != nil)
-	handle: Library
 
-	if handle, ok = load_library(library_path); !ok {
-		return -1, false
-	}
-
-	// `symbol_table` must be a struct because of the where clause, so this can't fail.
-	ti := runtime.type_info_base(type_info_of(T))
-	s, _ := ti.variant.(runtime.Type_Info_Struct)
+	handle := load_library(library_path) or_return
 
 	// Buffer to concatenate the prefix + symbol name.
 	prefixed_symbol_buf: [2048]u8 = ---
 
-	sym_ptr: rawptr
-	for field_name, i in s.names {
+	count = 0
+	for field, i in reflect.struct_fields_zipped(T) {
 		// Calculate address of struct member
-		field_ptr := rawptr(uintptr(rawptr(symbol_table)) + uintptr(s.offsets[i]))
+		field_ptr := rawptr(uintptr(symbol_table) + field.offset)
 
 		// If we've come across the struct member for the handle, store it and continue scanning for other symbols.
-		if field_name == handle_field_name {
+		if field.name == handle_field_name {
 			// We appear to be hot reloading. Unload previous incarnation of the library.
 			if old_handle := (^Library)(field_ptr)^; old_handle != nil {
-				if ok = unload_library(old_handle); !ok {
-					return count, ok
-				}
+				unload_library(old_handle) or_return
 			}
 			(^Library)(field_ptr)^ = handle
 			continue
 		}
 
 		// We're not the library handle, so the field needs to be a pointer type, be it a procedure pointer or an exported global.
-		if !(reflect.is_procedure(s.types[i]) || reflect.is_pointer(s.types[i])) {
+		if !(reflect.is_procedure(field.type) || reflect.is_pointer(field.type)) {
 			continue
 		}
 
@@ -164,22 +158,21 @@ initialize_symbols :: proc(symbol_table: ^$T, library_path: string, symbol_prefi
 		prefixed_name: string
 
 		// Do we have a symbol override tag?
-		if override, tag_ok := reflect.struct_tag_lookup(reflect.Struct_Tag(s.tags[i]), "dynlib"); tag_ok {
-			prefixed_name = string(override)
+		if override, tag_ok := reflect.struct_tag_lookup(field.tag, "dynlib"); tag_ok {
+			prefixed_name = override
 		}
 
 		// No valid symbol override tag found, fall back to `<symbol_prefix>name`.
 		if len(prefixed_name) == 0 {
 			offset := copy(prefixed_symbol_buf[:], symbol_prefix)
-			copy(prefixed_symbol_buf[offset:], field_name)
-			prefixed_name = string(prefixed_symbol_buf[:len(symbol_prefix) + len(field_name)])
+			copy(prefixed_symbol_buf[offset:], field.name)
+			prefixed_name = string(prefixed_symbol_buf[:len(symbol_prefix) + len(field.name)])
 		}
 
 		// Assign procedure (or global) pointer if found.
-		if sym_ptr, ok = symbol_address(handle, prefixed_name); ok {
-			(^rawptr)(field_ptr)^ = sym_ptr
-			count += 1
-		}
+		sym_ptr := symbol_address(handle, prefixed_name) or_continue
+		(^rawptr)(field_ptr)^ = sym_ptr
+		count += 1
 	}
 	return count, count > 0
 }

+ 1 - 1
core/dynlib/lib_windows.odin

@@ -4,7 +4,7 @@ package dynlib
 
 import win32 "core:sys/windows"
 import "core:strings"
-import "core:runtime"
+import "base:runtime"
 import "core:reflect"
 
 _load_library :: proc(path: string, global_symbols := false) -> (Library, bool) {

+ 1 - 1
core/encoding/endian/endian.odin

@@ -1,6 +1,6 @@
 package encoding_endian
 
-import "core:intrinsics"
+import "base:intrinsics"
 import "core:math/bits"
 
 Byte_Order :: enum u8 {

+ 4 - 1
core/encoding/json/marshal.odin

@@ -2,7 +2,7 @@ package json
 
 import "core:mem"
 import "core:math/bits"
-import "core:runtime"
+import "base:runtime"
 import "core:strconv"
 import "core:strings"
 import "core:reflect"
@@ -228,6 +228,9 @@ marshal_to_writer :: proc(w: io.Writer, v: any, opt: ^Marshal_Options) -> (err:
 	case runtime.Type_Info_Matrix:
 		return .Unsupported_Type
 
+	case runtime.Type_Info_Bit_Field:
+		return .Unsupported_Type
+
 	case runtime.Type_Info_Array:
 		opt_write_start(w, opt, '[') or_return
 		for i in 0..<info.count {

+ 1 - 2
core/encoding/json/unmarshal.odin

@@ -5,7 +5,7 @@ import "core:math"
 import "core:reflect"
 import "core:strconv"
 import "core:strings"
-import "core:runtime"
+import "base:runtime"
 
 Unmarshal_Data_Error :: enum {
 	Invalid_Data,
@@ -492,7 +492,6 @@ unmarshal_object :: proc(p: ^Parser, v: any, end_token: Token_Kind) -> (err: Unm
 			}
 		}
 
-		return nil
 	case:
 		return UNSUPPORTED_TYPE
 	}

+ 2 - 2
core/encoding/xml/xml_reader.odin

@@ -29,11 +29,11 @@ package xml
 
 import "core:bytes"
 import "core:encoding/entity"
-import "core:intrinsics"
+import "base:intrinsics"
 import "core:mem"
 import "core:os"
 import "core:strings"
-import "core:runtime"
+import "base:runtime"
 
 likely :: intrinsics.expect
 

+ 116 - 54
core/fmt/fmt.odin

@@ -1,15 +1,15 @@
 package fmt
 
+import "base:intrinsics"
+import "base:runtime"
 import "core:math/bits"
 import "core:mem"
 import "core:io"
 import "core:reflect"
-import "core:runtime"
 import "core:strconv"
 import "core:strings"
 import "core:time"
 import "core:unicode/utf8"
-import "core:intrinsics"
 
 // Internal data structure that stores the required information for formatted printing
 Info :: struct {
@@ -253,18 +253,24 @@ bprintf :: proc(buf: []byte, fmt: string, args: ..any) -> string {
 // - args: A variadic list of arguments to be formatted
 // - loc: The location of the caller
 //
-// Returns: True if the condition is met, otherwise triggers a runtime assertion with a formatted message
-//
-assertf :: proc(condition: bool, fmt: string, args: ..any, loc := #caller_location) -> bool {
+@(disabled=ODIN_DISABLE_ASSERT)
+assertf :: proc(condition: bool, fmt: string, args: ..any, loc := #caller_location) {
 	if !condition {
-		p := context.assertion_failure_proc
-		if p == nil {
-			p = runtime.default_assertion_failure_proc
+		// NOTE(dragos): We are using the same trick as in builtin.assert
+		// to improve performance to make the CPU not
+		// execute speculatively, making it about an order of
+		// magnitude faster
+		@(cold)
+		internal :: proc(loc: runtime.Source_Code_Location, fmt: string, args: ..any) {
+			p := context.assertion_failure_proc
+			if p == nil {
+				p = runtime.default_assertion_failure_proc
+			}
+			message := tprintf(fmt, ..args)
+			p("Runtime assertion", message, loc)
 		}
-		message := tprintf(fmt, ..args)
-		p("Runtime assertion", message, loc)
+		internal(loc, fmt, ..args)
 	}
-	return condition
 }
 // Runtime panic with a formatted message
 //
@@ -948,24 +954,10 @@ _fmt_int :: proc(fi: ^Info, u: u64, base: int, is_signed: bool, bit_size: int, d
 	start := 0
 
 	flags: strconv.Int_Flags
-	if fi.hash && !fi.zero { flags |= {.Prefix} }
-	if fi.plus             { flags |= {.Plus}   }
+	if fi.hash { flags |= {.Prefix} }
+	if fi.plus { flags |= {.Plus}   }
 	s := strconv.append_bits(buf[start:], u, base, is_signed, bit_size, digits, flags)
 
-	if fi.hash && fi.zero && fi.indent == 0 {
-		c: byte = 0
-		switch base {
-		case 2:  c = 'b'
-		case 8:  c = 'o'
-		case 12: c = 'z'
-		case 16: c = 'x'
-		}
-		if c != 0 {
-			io.write_byte(fi.writer, '0', &fi.n)
-			io.write_byte(fi.writer, c, &fi.n)
-		}
-	}
-
 	prev_zero := fi.zero
 	defer fi.zero = prev_zero
 	fi.zero = false
@@ -1416,34 +1408,9 @@ fmt_soa_pointer :: proc(fi: ^Info, p: runtime.Raw_Soa_Pointer, verb: rune) {
 //
 // Returns: The string representation of the enum value and a boolean indicating success.
 //
+@(require_results)
 enum_value_to_string :: proc(val: any) -> (string, bool) {
-	v := val
-	v.id = runtime.typeid_base(v.id)
-	type_info := type_info_of(v.id)
-
-	#partial switch e in type_info.variant {
-	case: return "", false
-	case runtime.Type_Info_Enum:
-		Enum_Value :: runtime.Type_Info_Enum_Value
-
-		ev_, ok := reflect.as_i64(val)
-		ev := Enum_Value(ev_)
-
-		if ok {
-			if len(e.values) == 0 {
-				return "", true
-			} else {
-				for val, idx in e.values {
-					if val == ev {
-						return e.names[idx], true
-					}
-				}
-			}
-			return "", false
-		}
-	}
-
-	return "", false
+	return reflect.enum_name_from_value_any(val)
 }
 // Returns the enum value of a string representation.
 //
@@ -2206,6 +2173,8 @@ fmt_named :: proc(fi: ^Info, v: any, verb: rune, info: runtime.Type_Info_Named)
 	#partial switch b in info.base.variant {
 	case runtime.Type_Info_Struct:
 		fmt_struct(fi, v, verb, b, info.name)
+	case runtime.Type_Info_Bit_Field:
+		fmt_bit_field(fi, v, verb, b, info.name)
 	case runtime.Type_Info_Bit_Set:
 		fmt_bit_set(fi, v, verb = verb)
 	case:
@@ -2316,6 +2285,96 @@ fmt_matrix :: proc(fi: ^Info, v: any, verb: rune, info: runtime.Type_Info_Matrix
 		fmt_write_indent(fi)
 	}
 }
+
+fmt_bit_field :: proc(fi: ^Info, v: any, verb: rune, info: runtime.Type_Info_Bit_Field, type_name: string) {
+	read_bits :: proc(ptr: [^]byte, offset, size: uintptr) -> (res: u64) {
+		for i in 0..<size {
+			j := i+offset
+			B := ptr[j/8]
+			k := j&7
+			if B & (u8(1)<<k) != 0 {
+				res |= u64(1)<<u64(i)
+			}
+		}
+		return
+	}
+
+	handle_bit_field_tag :: proc(data: rawptr, info: reflect.Type_Info_Bit_Field, idx: int, verb: ^rune) -> (do_continue: bool) {
+		tag := info.tags[idx]
+		if vt, ok := reflect.struct_tag_lookup(reflect.Struct_Tag(tag), "fmt"); ok {
+			value := strings.trim_space(string(vt))
+			switch value {
+			case "": return false
+			case "-": return true
+			}
+			r, w := utf8.decode_rune_in_string(value)
+			value = value[w:]
+			if value == "" || value[0] == ',' {
+				verb^ = r
+			}
+		}
+		return false
+	}
+
+	io.write_string(fi.writer, type_name if len(type_name) != 0 else "bit_field", &fi.n)
+	io.write_string(fi.writer, "{", &fi.n)
+
+	hash   := fi.hash;   defer fi.hash = hash
+	indent := fi.indent; defer fi.indent -= 1
+	do_trailing_comma := hash
+
+	fi.indent += 1
+
+	if hash	{
+		io.write_byte(fi.writer, '\n', &fi.n)
+	}
+	defer {
+		if hash {
+			for _ in 0..<indent { io.write_byte(fi.writer, '\t', &fi.n) }
+		}
+		io.write_byte(fi.writer, '}', &fi.n)
+	}
+
+
+	field_count := -1
+	for name, i in info.names {
+		field_verb := verb
+		if handle_bit_field_tag(v.data, info, i, &field_verb) {
+			continue
+		}
+
+		field_count += 1
+
+		if !do_trailing_comma && field_count > 0 {
+			io.write_string(fi.writer, ", ")
+		}
+		if hash {
+			fmt_write_indent(fi)
+		}
+
+		io.write_string(fi.writer, name, &fi.n)
+		io.write_string(fi.writer, " = ", &fi.n)
+
+		bit_offset := info.bit_offsets[i]
+		bit_size := info.bit_sizes[i]
+
+		value := read_bits(([^]byte)(v.data), bit_offset, bit_size)
+		type := info.types[i]
+
+		if !reflect.is_unsigned(runtime.type_info_core(type)) {
+			// Sign Extension
+			m := u64(1<<(bit_size-1))
+			value = (value ~ m) - m
+		}
+
+		fmt_value(fi, any{&value, type.id}, field_verb)
+		if do_trailing_comma { io.write_string(fi.writer, ",\n", &fi.n) }
+
+	}
+}
+
+
+
 // Formats a value based on its type and formatting verb
 //
 // Inputs:
@@ -2644,6 +2703,9 @@ fmt_value :: proc(fi: ^Info, v: any, verb: rune) {
 
 	case runtime.Type_Info_Matrix:
 		fmt_matrix(fi, v, verb, info)
+
+	case runtime.Type_Info_Bit_Field:
+		fmt_bit_field(fi, v, verb, info, "")
 	}
 }
 // Formats a complex number based on the given formatting verb

+ 1 - 1
core/fmt/fmt_os.odin

@@ -2,7 +2,7 @@
 //+build !js
 package fmt
 
-import "core:runtime"
+import "base:runtime"
 import "core:os"
 import "core:io"
 import "core:bufio"

+ 1 - 1
core/hash/crc32.odin

@@ -1,6 +1,6 @@
 package hash
 
-import "core:intrinsics"
+import "base:intrinsics"
 
 @(optimization_mode="speed")
 crc32 :: proc(data: []byte, seed := u32(0)) -> u32 #no_bounds_check {

+ 1 - 1
core/hash/hash.odin

@@ -1,7 +1,7 @@
 package hash
 
 import "core:mem"
-import "core:intrinsics"
+import "base:intrinsics"
 
 @(optimization_mode="speed")
 adler32 :: proc(data: []byte, seed := u32(1)) -> u32 #no_bounds_check {

+ 2 - 2
core/hash/xxhash/common.odin

@@ -9,8 +9,8 @@
 */
 package xxhash
 
-import "core:intrinsics"
-import "core:runtime"
+import "base:intrinsics"
+import "base:runtime"
 
 mem_copy  :: runtime.mem_copy
 byte_swap :: intrinsics.byte_swap

+ 1 - 1
core/hash/xxhash/streaming.odin

@@ -10,7 +10,7 @@
 package xxhash
 
 import "core:mem"
-import "core:intrinsics"
+import "base:intrinsics"
 
 /*
 	===   XXH3 128-bit streaming   ===

+ 1 - 1
core/hash/xxhash/xxhash_3.odin

@@ -9,7 +9,7 @@
 */
 package xxhash
 
-import "core:intrinsics"
+import "base:intrinsics"
 
 /*
 *************************************************************************

+ 1 - 1
core/hash/xxhash/xxhash_32.odin

@@ -9,7 +9,7 @@
 */
 package xxhash
 
-import "core:intrinsics"
+import "base:intrinsics"
 
 /*
 	32-bit hash functions

+ 1 - 1
core/hash/xxhash/xxhash_64.odin

@@ -9,7 +9,7 @@
 */
 package xxhash
 
-import "core:intrinsics"
+import "base:intrinsics"
 
 /*
 	64-bit hash functions

+ 1 - 1
core/image/common.odin

@@ -13,7 +13,7 @@ package image
 import "core:bytes"
 import "core:mem"
 import "core:compress"
-import "core:runtime"
+import "base:runtime"
 
 /*
 	67_108_864 pixels max by default.

+ 1 - 1
core/image/netpbm/netpbm.odin

@@ -8,7 +8,7 @@ import "core:mem"
 import "core:strconv"
 import "core:strings"
 import "core:unicode"
-import "core:runtime"
+import "base:runtime"
 
 Image        :: image.Image
 Format       :: image.Netpbm_Format

+ 1 - 1
core/image/png/helpers.odin

@@ -16,7 +16,7 @@ import coretime "core:time"
 import "core:strings"
 import "core:bytes"
 import "core:mem"
-import "core:runtime"
+import "base:runtime"
 
 /*
 	Cleanup of image-specific data.

+ 2 - 2
core/image/png/png.odin

@@ -22,8 +22,8 @@ import "core:hash"
 import "core:bytes"
 import "core:io"
 import "core:mem"
-import "core:intrinsics"
-import "core:runtime"
+import "base:intrinsics"
+import "base:runtime"
 
 // Limit chunk sizes.
 // By default: IDAT = 8k x 8k x 16-bits + 8k filter bytes.

+ 1 - 1
core/io/io.odin

@@ -3,7 +3,7 @@
 // operations into an abstracted stream interface.
 package io
 
-import "core:intrinsics"
+import "base:intrinsics"
 import "core:unicode/utf8"
 
 // Seek whence values

+ 1 - 0
core/log/file_console_logger.odin

@@ -1,3 +1,4 @@
+//+build !freestanding
 package log
 
 import "core:fmt"

+ 37 - 1
core/log/log.odin

@@ -1,6 +1,6 @@
 package log
 
-import "core:runtime"
+import "base:runtime"
 import "core:fmt"
 
 
@@ -116,6 +116,42 @@ panicf :: proc(fmt_str: string, args: ..any, location := #caller_location) -> !
 	runtime.panic("log.panicf", location)
 }
 
+@(disabled=ODIN_DISABLE_ASSERT)
+assert :: proc(condition: bool, message := "", loc := #caller_location) {
+	if !condition {
+		@(cold)
+		internal :: proc(message: string, loc: runtime.Source_Code_Location) {
+			p := context.assertion_failure_proc
+			if p == nil {
+				p = runtime.default_assertion_failure_proc
+			}
+			log(.Fatal, message, location=loc)
+			p("runtime assertion", message, loc)
+		}
+		internal(message, loc)
+	}
+}
+
+@(disabled=ODIN_DISABLE_ASSERT)
+assertf :: proc(condition: bool, fmt_str: string, args: ..any, loc := #caller_location) {
+	if !condition {
+		// NOTE(dragos): We are using the same trick as in builtin.assert
+		// to improve performance to make the CPU not
+		// execute speculatively, making it about an order of
+		// magnitude faster
+		@(cold)
+		internal :: proc(loc: runtime.Source_Code_Location, fmt_str: string, args: ..any) {
+			p := context.assertion_failure_proc
+			if p == nil {
+				p = runtime.default_assertion_failure_proc
+			}
+			message := fmt.tprintf(fmt_str, ..args)
+			log(.Fatal, message, location=loc)
+			p("Runtime assertion", message, loc)
+		}
+		internal(loc, fmt_str, ..args)
+	}
+}
 
 
 

+ 1 - 1
core/log/log_allocator.odin

@@ -1,6 +1,6 @@
 package log
 
-import "core:runtime"
+import "base:runtime"
 import "core:fmt"
 
 Log_Allocator_Format :: enum {

部分文件因为文件数量过多而无法显示