Przeglądaj źródła

Merge branch 'master' into fix/freebsd-syscall

gingerBill 3 lat temu
rodzic
commit
02a8bba02e
100 zmienionych plików z 7263 dodań i 2348 usunięć
  1. 1 0
      .github/FUNDING.yml
  2. 64 14
      .github/workflows/ci.yml
  3. 7 7
      .github/workflows/nightly.yml
  4. 7 0
      .gitignore
  5. 1 1
      LICENSE
  6. 6 45
      Makefile
  7. 10 8
      README.md
  8. 29 0
      bin/nasm/windows/LICENSE
  9. BIN
      bin/nasm/windows/nasm.exe
  10. BIN
      bin/nasm/windows/ndisasm.exe
  11. 1 1
      build.bat
  12. 150 0
      build_odin.sh
  13. 3 6
      core/bufio/scanner.odin
  14. 82 82
      core/builtin/builtin.odin
  15. 8 0
      core/bytes/buffer.odin
  16. 58 39
      core/bytes/bytes.odin
  17. 6 4
      core/c/c.odin
  18. 3 3
      core/c/frontend/preprocessor/preprocess.odin
  19. 3 1
      core/c/libc/complex.odin
  20. 3 1
      core/c/libc/ctype.odin
  21. 32 3
      core/c/libc/errno.odin
  22. 13 11
      core/c/libc/math.odin
  23. 4 3
      core/c/libc/setjmp.odin
  24. 18 3
      core/c/libc/signal.odin
  25. 111 146
      core/c/libc/stdatomic.odin
  26. 88 6
      core/c/libc/stdio.odin
  27. 22 4
      core/c/libc/stdlib.odin
  28. 3 1
      core/c/libc/string.odin
  29. 9 4
      core/c/libc/threads.odin
  30. 13 6
      core/c/libc/time.odin
  31. 2 0
      core/c/libc/types.odin
  32. 3 1
      core/c/libc/uchar.odin
  33. 3 1
      core/c/libc/wchar.odin
  34. 18 4
      core/c/libc/wctype.odin
  35. 16 6
      core/compress/common.odin
  36. 1 1
      core/compress/gzip/example.odin
  37. 8 7
      core/compress/gzip/gzip.odin
  38. 148 0
      core/compress/shoco/model.odin
  39. 318 0
      core/compress/shoco/shoco.odin
  40. 43 40
      core/compress/zlib/zlib.odin
  41. 0 216
      core/container/array.odin
  42. 266 0
      core/container/bit_array/bit_array.odin
  43. 53 0
      core/container/bit_array/doc.odin
  44. 0 80
      core/container/bloom_filter.odin
  45. 173 0
      core/container/intrusive/list/intrusive_list.odin
  46. 201 0
      core/container/lru/lru_cache.odin
  47. 0 377
      core/container/map.odin
  48. 0 121
      core/container/priority_queue.odin
  49. 143 0
      core/container/priority_queue/priority_queue.odin
  50. 0 175
      core/container/queue.odin
  51. 219 0
      core/container/queue/queue.odin
  52. 0 74
      core/container/ring.odin
  53. 0 240
      core/container/set.odin
  54. 0 95
      core/container/small_array.odin
  55. 117 0
      core/container/small_array/small_array.odin
  56. 98 0
      core/container/topological_sort/topological_sort.odin
  57. 7 1
      core/crypto/README.md
  58. 35 0
      core/crypto/_fiat/README.md
  59. 24 0
      core/crypto/_fiat/fiat.odin
  60. 138 0
      core/crypto/_fiat/field_curve25519/field.odin
  61. 616 0
      core/crypto/_fiat/field_curve25519/field51.odin
  62. 66 0
      core/crypto/_fiat/field_poly1305/field.odin
  63. 356 0
      core/crypto/_fiat/field_poly1305/field4344.odin
  64. 2 2
      core/crypto/_sha3/_sha3.odin
  65. 117 28
      core/crypto/blake/blake.odin
  66. 33 7
      core/crypto/blake2b/blake2b.odin
  67. 33 7
      core/crypto/blake2s/blake2s.odin
  68. 581 0
      core/crypto/chacha20/chacha20.odin
  69. 146 0
      core/crypto/chacha20poly1305/chacha20poly1305.odin
  70. 52 0
      core/crypto/crypto.odin
  71. 29 7
      core/crypto/gost/gost.odin
  72. 117 28
      core/crypto/groestl/groestl.odin
  73. 400 95
      core/crypto/haval/haval.odin
  74. 117 28
      core/crypto/jh/jh.odin
  75. 130 37
      core/crypto/keccak/keccak.odin
  76. 43 21
      core/crypto/md2/md2.odin
  77. 31 9
      core/crypto/md4/md4.odin
  78. 31 9
      core/crypto/md5/md5.odin
  79. 163 0
      core/crypto/poly1305/poly1305.odin
  80. 7 0
      core/crypto/rand_generic.odin
  81. 37 0
      core/crypto/rand_linux.odin
  82. 12 0
      core/crypto/rand_openbsd.odin
  83. 23 0
      core/crypto/rand_windows.odin
  84. 113 28
      core/crypto/ripemd/ripemd.odin
  85. 30 7
      core/crypto/sha1/sha1.odin
  86. 121 30
      core/crypto/sha2/sha2.odin
  87. 125 36
      core/crypto/sha3/sha3.odin
  88. 66 19
      core/crypto/shake/shake.odin
  89. 335 0
      core/crypto/siphash/siphash.odin
  90. 33 10
      core/crypto/sm3/sm3.odin
  91. 58 14
      core/crypto/streebog/streebog.odin
  92. 88 21
      core/crypto/tiger/tiger.odin
  93. 88 21
      core/crypto/tiger2/tiger2.odin
  94. 29 7
      core/crypto/whirlpool/whirlpool.odin
  95. 126 0
      core/crypto/x25519/x25519.odin
  96. 12 0
      core/dynlib/lib.odin
  97. 15 14
      core/dynlib/lib_unix.odin
  98. 5 4
      core/dynlib/lib_windows.odin
  99. 65 21
      core/encoding/csv/reader.odin
  100. 23 0
      core/encoding/endian/doc.odin

+ 1 - 0
.github/FUNDING.yml

@@ -1,3 +1,4 @@
 # These are supported funding model platforms
 # These are supported funding model platforms
 
 
+github: odin-lang
 patreon: gingerbill
 patreon: gingerbill

+ 64 - 14
.github/workflows/ci.yml

@@ -1,5 +1,5 @@
 name: CI
 name: CI
-on: [push, pull_request]
+on: [push, pull_request, workflow_dispatch]
 
 
 jobs:
 jobs:
   build_linux:
   build_linux:
@@ -7,9 +7,9 @@ jobs:
     steps:
     steps:
       - uses: actions/checkout@v1
       - uses: actions/checkout@v1
       - name: Download LLVM, botan
       - name: Download LLVM, botan
-        run: sudo apt-get install llvm-11 clang-11 llvm libbotan-2-dev botan
+        run: sudo apt-get install llvm-11 clang-11 libbotan-2-dev botan
       - name: build odin
       - name: build odin
-        run: make release
+        run: ./build_odin.sh release
       - name: Odin version
       - name: Odin version
         run: ./odin version
         run: ./odin version
         timeout-minutes: 1
         timeout-minutes: 1
@@ -17,13 +17,16 @@ jobs:
         run: ./odin report
         run: ./odin report
         timeout-minutes: 1
         timeout-minutes: 1
       - name: Odin check
       - name: Odin check
-        run: ./odin check examples/demo/demo.odin -vet
+        run: ./odin check examples/demo -vet
         timeout-minutes: 10
         timeout-minutes: 10
       - name: Odin run
       - name: Odin run
-        run: ./odin run examples/demo/demo.odin
+        run: ./odin run examples/demo
         timeout-minutes: 10
         timeout-minutes: 10
       - name: Odin run -debug
       - name: Odin run -debug
-        run: ./odin run examples/demo/demo.odin -debug
+        run: ./odin run examples/demo -debug
+        timeout-minutes: 10
+      - name: Odin check examples/all
+        run: ./odin check examples/all -strict-style
         timeout-minutes: 10
         timeout-minutes: 10
       - name: Core library tests
       - name: Core library tests
         run: |
         run: |
@@ -35,6 +38,20 @@ jobs:
           cd tests/vendor
           cd tests/vendor
           make
           make
         timeout-minutes: 10
         timeout-minutes: 10
+      - name: Odin issues tests
+        run: |
+          cd tests/issues
+          ./run.sh
+        timeout-minutes: 10
+      - name: Odin check examples/all for Linux i386
+        run: ./odin check examples/all -vet -strict-style -target:linux_i386
+        timeout-minutes: 10
+      - name: Odin check examples/all for FreeBSD amd64
+        run: ./odin check examples/all -vet -strict-style -target:freebsd_amd64
+        timeout-minutes: 10
+      - name: Odin check examples/all for OpenBSD amd64
+        run: ./odin check examples/all -vet -strict-style -target:openbsd_amd64
+        timeout-minutes: 10
   build_macOS:
   build_macOS:
     runs-on: macos-latest
     runs-on: macos-latest
     steps:
     steps:
@@ -46,7 +63,7 @@ jobs:
           TMP_PATH=$(xcrun --show-sdk-path)/user/include
           TMP_PATH=$(xcrun --show-sdk-path)/user/include
           echo "CPATH=$TMP_PATH" >> $GITHUB_ENV
           echo "CPATH=$TMP_PATH" >> $GITHUB_ENV
       - name: build odin
       - name: build odin
-        run: make release
+        run: ./build_odin.sh release
       - name: Odin version
       - name: Odin version
         run: ./odin version
         run: ./odin version
         timeout-minutes: 1
         timeout-minutes: 1
@@ -54,13 +71,16 @@ jobs:
         run: ./odin report
         run: ./odin report
         timeout-minutes: 1
         timeout-minutes: 1
       - name: Odin check
       - name: Odin check
-        run: ./odin check examples/demo/demo.odin -vet
+        run: ./odin check examples/demo -vet
         timeout-minutes: 10
         timeout-minutes: 10
       - name: Odin run
       - name: Odin run
-        run: ./odin run examples/demo/demo.odin
+        run: ./odin run examples/demo
         timeout-minutes: 10
         timeout-minutes: 10
       - name: Odin run -debug
       - name: Odin run -debug
-        run: ./odin run examples/demo/demo.odin -debug
+        run: ./odin run examples/demo -debug
+        timeout-minutes: 10
+      - name: Odin check examples/all
+        run: ./odin check examples/all -strict-style
         timeout-minutes: 10
         timeout-minutes: 10
       - name: Core library tests
       - name: Core library tests
         run: |
         run: |
@@ -72,8 +92,19 @@ jobs:
           cd tests/vendor
           cd tests/vendor
           make
           make
         timeout-minutes: 10
         timeout-minutes: 10
+      - name: Odin issues tests
+        run: |
+          cd tests/issues
+          ./run.sh
+        timeout-minutes: 10
+      - name: Odin check examples/all for Darwin arm64
+        run: ./odin check examples/all -vet -strict-style -target:darwin_arm64
+        timeout-minutes: 10
+      - name: Odin check examples/all for Linux arm64
+        run: ./odin check examples/all -vet -strict-style -target:linux_arm64
+        timeout-minutes: 10
   build_windows:
   build_windows:
-    runs-on: windows-latest
+    runs-on: windows-2019
     steps:
     steps:
       - uses: actions/checkout@v1
       - uses: actions/checkout@v1
       - name: build Odin
       - name: build Odin
@@ -91,19 +122,25 @@ jobs:
         shell: cmd
         shell: cmd
         run: |
         run: |
           call "C:\Program Files (x86)\Microsoft Visual Studio\2019\Enterprise\VC\Auxiliary\Build\vcvars64.bat
           call "C:\Program Files (x86)\Microsoft Visual Studio\2019\Enterprise\VC\Auxiliary\Build\vcvars64.bat
-          odin check examples/demo/demo.odin -vet
+          odin check examples/demo -vet
         timeout-minutes: 10
         timeout-minutes: 10
       - name: Odin run
       - name: Odin run
         shell: cmd
         shell: cmd
         run: |
         run: |
           call "C:\Program Files (x86)\Microsoft Visual Studio\2019\Enterprise\VC\Auxiliary\Build\vcvars64.bat
           call "C:\Program Files (x86)\Microsoft Visual Studio\2019\Enterprise\VC\Auxiliary\Build\vcvars64.bat
-          odin run examples/demo/demo.odin
+          odin run examples/demo
         timeout-minutes: 10
         timeout-minutes: 10
       - name: Odin run -debug
       - name: Odin run -debug
         shell: cmd
         shell: cmd
         run: |
         run: |
           call "C:\Program Files (x86)\Microsoft Visual Studio\2019\Enterprise\VC\Auxiliary\Build\vcvars64.bat
           call "C:\Program Files (x86)\Microsoft Visual Studio\2019\Enterprise\VC\Auxiliary\Build\vcvars64.bat
-          odin run examples/demo/demo.odin -debug
+          odin run examples/demo -debug
+        timeout-minutes: 10
+      - name: Odin check examples/all
+        shell: cmd
+        run: |
+          call "C:\Program Files (x86)\Microsoft Visual Studio\2019\Enterprise\VC\Auxiliary\Build\vcvars64.bat
+          odin check examples/all -strict-style
         timeout-minutes: 10
         timeout-minutes: 10
       - name: Core library tests
       - name: Core library tests
         shell: cmd
         shell: cmd
@@ -126,3 +163,16 @@ jobs:
           cd tests\core\math\big
           cd tests\core\math\big
           call build.bat
           call build.bat
         timeout-minutes: 10
         timeout-minutes: 10
+      - name: Odin issues tests
+        shell: cmd
+        run: |
+          call "C:\Program Files (x86)\Microsoft Visual Studio\2019\Enterprise\VC\Auxiliary\Build\vcvars64.bat
+          cd tests\issues
+          call run.bat
+        timeout-minutes: 10
+      - name: Odin check examples/all for Windows 32bits
+        shell: cmd
+        run: |
+          call "C:\Program Files (x86)\Microsoft Visual Studio\2019\Enterprise\VC\Auxiliary\Build\vcvars64.bat
+          odin check examples/all -strict-style -target:windows_i386
+        timeout-minutes: 10

+ 7 - 7
.github/workflows/nightly.yml

@@ -7,7 +7,7 @@ on:
 
 
 jobs:
 jobs:
   build_windows:
   build_windows:
-    runs-on: windows-latest
+    runs-on: windows-2019
     steps:
     steps:
       - uses: actions/checkout@v1
       - uses: actions/checkout@v1
       - name: build Odin
       - name: build Odin
@@ -19,7 +19,7 @@ jobs:
         shell: cmd
         shell: cmd
         run: |
         run: |
           call "C:\Program Files (x86)\Microsoft Visual Studio\2019\Enterprise\VC\Auxiliary\Build\vcvars64.bat
           call "C:\Program Files (x86)\Microsoft Visual Studio\2019\Enterprise\VC\Auxiliary\Build\vcvars64.bat
-          odin run examples/demo/demo.odin
+          odin run examples/demo
       - name: Copy artifacts
       - name: Copy artifacts
         run: |
         run: |
           rm bin/llvm/windows/LLVM-C.lib
           rm bin/llvm/windows/LLVM-C.lib
@@ -41,11 +41,11 @@ jobs:
     steps:
     steps:
       - uses: actions/checkout@v1
       - uses: actions/checkout@v1
       - name: (Linux) Download LLVM
       - name: (Linux) Download LLVM
-        run: sudo apt-get install llvm-11 clang-11 llvm
+        run: sudo apt-get install llvm-11 clang-11
       - name: build odin
       - name: build odin
         run: make nightly
         run: make nightly
       - name: Odin run
       - name: Odin run
-        run: ./odin run examples/demo/demo.odin
+        run: ./odin run examples/demo
       - name: Copy artifacts
       - name: Copy artifacts
         run: |
         run: |
           mkdir dist
           mkdir dist
@@ -72,7 +72,7 @@ jobs:
       - name: build odin
       - name: build odin
         run: make nightly
         run: make nightly
       - name: Odin run
       - name: Odin run
-        run: ./odin run examples/demo/demo.odin
+        run: ./odin run examples/demo
       - name: Copy artifacts
       - name: Copy artifacts
         run: |
         run: |
           mkdir dist
           mkdir dist
@@ -129,7 +129,7 @@ jobs:
         run: |
         run: |
           echo Authorizing B2 account
           echo Authorizing B2 account
           b2 authorize-account "$APPID" "$APPKEY"
           b2 authorize-account "$APPID" "$APPKEY"
-          
+
           echo Uploading artifcates to B2
           echo Uploading artifcates to B2
           chmod +x ./ci/upload_create_nightly.sh
           chmod +x ./ci/upload_create_nightly.sh
           ./ci/upload_create_nightly.sh "$BUCKET" windows-amd64 windows_artifacts/
           ./ci/upload_create_nightly.sh "$BUCKET" windows-amd64 windows_artifacts/
@@ -141,7 +141,7 @@ jobs:
 
 
           echo Creating nightly.json
           echo Creating nightly.json
           python3 ci/create_nightly_json.py "$BUCKET" > nightly.json
           python3 ci/create_nightly_json.py "$BUCKET" > nightly.json
-          
+
           echo Uploading nightly.json
           echo Uploading nightly.json
           b2 upload-file "$BUCKET" nightly.json nightly.json
           b2 upload-file "$BUCKET" nightly.json nightly.json
 
 

+ 7 - 0
.gitignore

@@ -7,6 +7,9 @@
 # User-specific files (MonoDevelop/Xamarin Studio)
 # User-specific files (MonoDevelop/Xamarin Studio)
 *.userprefs
 *.userprefs
 
 
+# For macOS
+.DS_Store
+
 # Build results
 # Build results
 [Dd]ebug/
 [Dd]ebug/
 [Dd]ebugPublic/
 [Dd]ebugPublic/
@@ -266,6 +269,8 @@ bin/
 # - Linux/MacOS
 # - Linux/MacOS
 odin
 odin
 odin.dSYM
 odin.dSYM
+*.bin
+demo.bin
 
 
 # shared collection
 # shared collection
 shared/
 shared/
@@ -276,3 +281,5 @@ shared/
 *.ll
 *.ll
 
 
 *.sublime-workspace
 *.sublime-workspace
+examples/bug/
+build.sh

+ 1 - 1
LICENSE

@@ -1,4 +1,4 @@
-Copyright (c) 2016-2021 Ginger Bill. All rights reserved.
+Copyright (c) 2016-2022 Ginger Bill. All rights reserved.
 
 
 Redistribution and use in source and binary forms, with or without
 Redistribution and use in source and binary forms, with or without
 modification, are permitted provided that the following conditions are met:
 modification, are permitted provided that the following conditions are met:

+ 6 - 45
Makefile

@@ -1,58 +1,19 @@
-GIT_SHA=$(shell git rev-parse --short HEAD)
-DISABLED_WARNINGS=-Wno-switch -Wno-macro-redefined -Wno-unused-value
-LDFLAGS=-pthread -ldl -lm -lstdc++
-CFLAGS=-std=c++14 -DGIT_SHA=\"$(GIT_SHA)\"
-CFLAGS:=$(CFLAGS) -DODIN_VERSION_RAW=\"dev-$(shell date +"%Y-%m")\"
-CC=clang
-
-OS=$(shell uname)
-
-ifeq ($(OS), Darwin)
-    LLVM_CONFIG=llvm-config
-    ifneq ($(shell llvm-config --version | grep '^11\.'),)
-        LLVM_CONFIG=llvm-config
-    else
-        $(error "Requirement: llvm-config must be version 11")
-    endif
-
-    LDFLAGS:=$(LDFLAGS) -liconv
-    CFLAGS:=$(CFLAGS) $(shell $(LLVM_CONFIG) --cxxflags --ldflags)
-    LDFLAGS:=$(LDFLAGS) -lLLVM-C
-endif
-ifeq ($(OS), Linux)
-    LLVM_CONFIG=llvm-config-11
-    ifneq ($(shell which llvm-config-11 2>/dev/null),)
-        LLVM_CONFIG=llvm-config-11
-    else ifneq ($(shell which llvm-config-11-64 2>/dev/null),)
-        LLVM_CONFIG=llvm-config-11-64
-    else
-        ifneq ($(shell llvm-config --version | grep '^11\.'),)
-            LLVM_CONFIG=llvm-config
-        else
-            $(error "Requirement: llvm-config must be version 11")
-        endif
-    endif
-
-    CFLAGS:=$(CFLAGS) $(shell $(LLVM_CONFIG) --cxxflags --ldflags)
-    LDFLAGS:=$(LDFLAGS) $(shell $(LLVM_CONFIG) --libs core native --system-libs)
-endif
-
-all: debug demo
+all: debug
 
 
 demo:
 demo:
-	./odin run examples/demo/demo.odin
+	./odin run examples/demo/demo.odin -file
 
 
 report:
 report:
 	./odin report
 	./odin report
 
 
 debug:
 debug:
-	$(CC) src/main.cpp src/libtommath.cpp $(DISABLED_WARNINGS) $(CFLAGS) -g $(LDFLAGS) -o odin
+	./build_odin.sh debug
 
 
 release:
 release:
-	$(CC) src/main.cpp src/libtommath.cpp $(DISABLED_WARNINGS) $(CFLAGS) -O3 $(LDFLAGS) -o odin
+	./build_odin.sh release
 
 
 release_native:
 release_native:
-	$(CC) src/main.cpp src/libtommath.cpp $(DISABLED_WARNINGS) $(CFLAGS) -O3 -march=native $(LDFLAGS) -o odin
+	./build_odin.sh release-native
 
 
 nightly:
 nightly:
-	$(CC) src/main.cpp src/libtommath.cpp $(DISABLED_WARNINGS) $(CFLAGS) -DNIGHTLY -O3 $(LDFLAGS) -o odin
+	./build_odin.sh nightly

+ 10 - 8
README.md

@@ -11,7 +11,7 @@
         <img src="https://img.shields.io/badge/platforms-Windows%20|%20Linux%20|%20macOS-green.svg">
         <img src="https://img.shields.io/badge/platforms-Windows%20|%20Linux%20|%20macOS-green.svg">
     </a>
     </a>
     <br>
     <br>
-    <a href="https://discord.gg/hnwN2Rj">
+    <a href="https://discord.gg/odinlang">
         <img src="https://img.shields.io/discord/568138951836172421?logo=discord">
         <img src="https://img.shields.io/discord/568138951836172421?logo=discord">
     </a>
     </a>
     <a href="https://github.com/odin-lang/odin/actions">
     <a href="https://github.com/odin-lang/odin/actions">
@@ -58,6 +58,10 @@ main :: proc() {
 
 
 Instructions for downloading and installing the Odin compiler and libraries.
 Instructions for downloading and installing the Odin compiler and libraries.
 
 
+#### [Nightly Builds](https://odin-lang.org/docs/nightly/)
+
+Get the latest nightly builds of Odin.
+
 ### Learning Odin
 ### Learning Odin
 
 
 #### [Overview of Odin](https://odin-lang.org/docs/overview)
 #### [Overview of Odin](https://odin-lang.org/docs/overview)
@@ -68,6 +72,10 @@ An overview of the Odin programming language.
 
 
 Answers to common questions about Odin.
 Answers to common questions about Odin.
 
 
+#### [Packages](https://pkg.odin-lang.org/)
+
+Documentation for all the official packages part of the [core](https://pkg.odin-lang.org/core/) and [vendor](https://pkg.odin-lang.org/vendor/) library collections.
+
 #### [The Odin Wiki](https://github.com/odin-lang/Odin/wiki)
 #### [The Odin Wiki](https://github.com/odin-lang/Odin/wiki)
 
 
 A wiki maintained by the Odin community.
 A wiki maintained by the Odin community.
@@ -76,15 +84,9 @@ A wiki maintained by the Odin community.
 
 
 Get live support and talk with other odiners on the Odin Discord.
 Get live support and talk with other odiners on the Odin Discord.
 
 
-### References
-
-#### [Language Specification](https://odin-lang.org/docs/spec/)
-
-The official Odin Language specification.
-
 ### Articles
 ### Articles
 
 
-#### [The Odin Blog](https://odin-lang.org/blog)
+#### [The Odin Blog](https://odin-lang.org/news/)
 
 
 The official blog of the Odin programming language, featuring announcements, news, and in-depth articles by the Odin team and guests.
 The official blog of the Odin programming language, featuring announcements, news, and in-depth articles by the Odin team and guests.
 
 

+ 29 - 0
bin/nasm/windows/LICENSE

@@ -0,0 +1,29 @@
+NASM is now licensed under the 2-clause BSD license, also known as the
+simplified BSD license.
+
+    Copyright 1996-2010 the NASM Authors - All rights reserved.
+
+    Redistribution and use in source and binary forms, with or without
+    modification, are permitted provided that the following
+    conditions are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above
+      copyright notice, this list of conditions and the following
+      disclaimer in the documentation and/or other materials provided
+      with the distribution.
+      
+      THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
+      CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
+      INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+      MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+      DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+      CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+      SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+      NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+      LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+      HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+      CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+      OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+      EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

BIN
bin/nasm/windows/nasm.exe


BIN
bin/nasm/windows/ndisasm.exe


+ 1 - 1
build.bat

@@ -58,7 +58,7 @@ set libs= ^
 set linker_flags= -incremental:no -opt:ref -subsystem:console
 set linker_flags= -incremental:no -opt:ref -subsystem:console
 
 
 if %release_mode% EQU 0 ( rem Debug
 if %release_mode% EQU 0 ( rem Debug
-	set linker_flags=%linker_flags% -debug
+	set linker_flags=%linker_flags% -debug /NATVIS:src\odin_compiler.natvis
 ) else ( rem Release
 ) else ( rem Release
 	set linker_flags=%linker_flags% -debug
 	set linker_flags=%linker_flags% -debug
 )
 )

+ 150 - 0
build_odin.sh

@@ -0,0 +1,150 @@
+#!/bin/bash
+set -eu
+
+GIT_SHA=$(git rev-parse --short HEAD)
+DISABLED_WARNINGS="-Wno-switch -Wno-macro-redefined -Wno-unused-value"
+LDFLAGS="-pthread -lm -lstdc++"
+CFLAGS="-std=c++14 -DGIT_SHA=\"$GIT_SHA\""
+CFLAGS="$CFLAGS -DODIN_VERSION_RAW=\"dev-$(date +"%Y-%m")\""
+CC=clang
+OS=$(uname)
+
+panic() {
+	printf "%s\n" "$1"
+	exit 1
+}
+
+version() { echo "$@" | awk -F. '{ printf("%d%03d%03d%03d\n", $1,$2,$3,$4); }'; }
+
+config_darwin() {
+	ARCH=$(uname -m)
+	LLVM_CONFIG=llvm-config
+
+	# allow for arm only llvm's with version 13
+	if [ ARCH == arm64 ]; then
+		MIN_LLVM_VERSION=("13.0.0")
+	else
+		# allow for x86 / amd64 all llvm versions begining from 11
+		MIN_LLVM_VERSION=("11.1.0")
+	fi
+
+	if [ $(version $($LLVM_CONFIG --version)) -lt $(version $MIN_LLVM_VERSION) ]; then
+		if [ ARCH == arm64 ]; then
+			panic "Requirement: llvm-config must be base version 13 for arm64"
+		else
+			panic "Requirement: llvm-config must be base version greater than 11 for amd64/x86"
+		fi
+	fi
+
+	LDFLAGS="$LDFLAGS -liconv -ldl"
+	CFLAGS="$CFLAGS $($LLVM_CONFIG --cxxflags --ldflags)"
+	LDFLAGS="$LDFLAGS -lLLVM-C"
+}
+
+config_freebsd() {
+	LLVM_CONFIG=/usr/local/bin/llvm-config11
+
+	CFLAGS="$CFLAGS $($LLVM_CONFIG --cxxflags --ldflags)"
+	LDFLAGS="$LDFLAGS $($LLVM_CONFIG --libs core native --system-libs)"
+}
+
+config_openbsd() {
+	LLVM_CONFIG=/usr/local/bin/llvm-config
+
+	LDFLAGS="$LDFLAGS -liconv"
+	CFLAGS="$CFLAGS $($LLVM_CONFIG --cxxflags --ldflags)"
+	LDFLAGS="$LDFLAGS $($LLVM_CONFIG --libs core native --system-libs)"
+}
+
+config_linux() {
+	if which llvm-config > /dev/null 2>&1; then
+		LLVM_CONFIG=llvm-config
+	elif which llvm-config-11 > /dev/null 2>&1; then
+		LLVM_CONFIG=llvm-config-11
+	elif which llvm-config-11-64 > /dev/null 2>&1; then
+		LLVM_CONFIG=llvm-config-11-64
+	else
+		panic "Unable to find LLVM-config"
+	fi
+
+	MIN_LLVM_VERSION=("11.0.0")
+	if [ $(version $($LLVM_CONFIG --version)) -lt $(version $MIN_LLVM_VERSION) ]; then
+		echo "Tried to use " $(which $LLVM_CONFIG) "version" $($LLVM_CONFIG --version)
+		panic "Requirement: llvm-config must be base version greater than 11"
+	fi
+
+	LDFLAGS="$LDFLAGS -ldl"
+	CFLAGS="$CFLAGS $($LLVM_CONFIG --cxxflags --ldflags)"
+	LDFLAGS="$LDFLAGS $($LLVM_CONFIG --libs core native --system-libs)"
+}
+
+build_odin() {
+	case $1 in
+	debug)
+		EXTRAFLAGS="-g"
+		;;
+	release)
+		EXTRAFLAGS="-O3"
+		;;
+	release-native)
+		EXTRAFLAGS="-O3 -march=native"
+		;;
+	nightly)
+		EXTRAFLAGS="-DNIGHTLY -O3"
+		;;
+	*)
+		panic "Build mode unsupported!"
+	esac
+
+	set -x
+	$CC src/main.cpp src/libtommath.cpp $DISABLED_WARNINGS $CFLAGS $EXTRAFLAGS $LDFLAGS -o odin
+	set +x
+}
+
+run_demo() {
+	./odin run examples/demo/demo.odin -file
+}
+
+case $OS in
+Linux)
+	config_linux
+	;;
+Darwin)
+	config_darwin
+	;;
+OpenBSD)
+	config_openbsd
+	;;
+FreeBSD)
+	config_freebsd
+	;;
+*)
+	panic "Platform unsupported!"
+esac
+
+if [[ $# -eq 0 ]]; then
+	build_odin debug
+	run_demo
+	exit 0
+fi
+
+if [[ $# -eq 1 ]]; then
+	case $1 in
+	report)
+		if [[ ! -f "./odin" ]]; then
+			build_odin debug
+		fi
+
+		./odin report
+		exit 0
+		;;
+	*)
+		build_odin $1
+		;;
+	esac
+
+	run_demo
+	exit 0
+else
+	panic "Too many arguments!"
+fi

+ 3 - 6
core/bufio/scanner.odin

@@ -8,6 +8,7 @@ import "core:intrinsics"
 
 
 // Extra errors returns by scanning procedures
 // Extra errors returns by scanning procedures
 Scanner_Extra_Error :: enum i32 {
 Scanner_Extra_Error :: enum i32 {
+	None,
 	Negative_Advance,
 	Negative_Advance,
 	Advanced_Too_Far,
 	Advanced_Too_Far,
 	Bad_Read_Count,
 	Bad_Read_Count,
@@ -15,7 +16,7 @@ Scanner_Extra_Error :: enum i32 {
 	Too_Short,
 	Too_Short,
 }
 }
 
 
-Scanner_Error :: union {
+Scanner_Error :: union #shared_nil {
 	io.Error,
 	io.Error,
 	Scanner_Extra_Error,
 	Scanner_Extra_Error,
 }
 }
@@ -68,7 +69,7 @@ scanner_destroy :: proc(s: ^Scanner) {
 // Returns the first non-EOF error that was encounted by the scanner
 // Returns the first non-EOF error that was encounted by the scanner
 scanner_error :: proc(s: ^Scanner) -> Scanner_Error {
 scanner_error :: proc(s: ^Scanner) -> Scanner_Error {
 	switch s._err {
 	switch s._err {
-	case .EOF, .None:
+	case .EOF, nil:
 		return nil
 		return nil
 	}
 	}
 	return s._err
 	return s._err
@@ -93,10 +94,6 @@ scanner_text :: proc(s: ^Scanner) -> string {
 // scanner_scan advances the scanner
 // scanner_scan advances the scanner
 scanner_scan :: proc(s: ^Scanner) -> bool {
 scanner_scan :: proc(s: ^Scanner) -> bool {
 	set_err :: proc(s: ^Scanner, err: Scanner_Error) {
 	set_err :: proc(s: ^Scanner, err: Scanner_Error) {
-		err := err
-		if err == .None {
-			err = nil
-		}
 		switch s._err {
 		switch s._err {
 		case nil, .EOF:
 		case nil, .EOF:
 			s._err = err
 			s._err = err

+ 82 - 82
core/builtin/builtin.odin

@@ -1,90 +1,90 @@
 // This is purely for documentation
 // This is purely for documentation
 package builtin
 package builtin
 
 
-nil   :: nil;
-false :: 0!==0;
-true  :: 0==0;
-
-ODIN_OS      :: ODIN_OS;
-ODIN_ARCH    :: ODIN_ARCH;
-ODIN_ENDIAN  :: ODIN_ENDIAN;
-ODIN_VENDOR  :: ODIN_VENDOR;
-ODIN_VERSION :: ODIN_VERSION;
-ODIN_ROOT    :: ODIN_ROOT;
-ODIN_DEBUG   :: ODIN_DEBUG;
-
-byte :: u8; // alias
-
-bool          :: bool;
-b8            :: b8;
-b16           :: b16;
-b32           :: b32;
-b64           :: b64;
-
-i8            :: i8;
-u8            :: u8;
-i16           :: i16;
-u16           :: u16;
-i32           :: i32;
-u32           :: u32;
-i64           :: i64;
-u64           :: u64;
-
-i128          :: i128;
-u128          :: u128;
-
-rune          :: rune;
-
-f16           :: f16;
-f32           :: f32;
-f64           :: f64;
-
-complex32     :: complex32;
-complex64     :: complex64;
-complex128    :: complex128;
-
-quaternion64  :: quaternion64;
-quaternion128 :: quaternion128;
-quaternion256 :: quaternion256;
-
-int           :: int;
-uint          :: uint;
-uintptr       :: uintptr;
-
-rawptr        :: rawptr;
-string        :: string;
-cstring       :: cstring;
-any           :: any;
-
-typeid        :: typeid;
+nil   :: nil
+false :: 0!=0
+true  :: 0==0
+
+ODIN_OS      :: ODIN_OS
+ODIN_ARCH    :: ODIN_ARCH
+ODIN_ENDIAN  :: ODIN_ENDIAN
+ODIN_VENDOR  :: ODIN_VENDOR
+ODIN_VERSION :: ODIN_VERSION
+ODIN_ROOT    :: ODIN_ROOT
+ODIN_DEBUG   :: ODIN_DEBUG
+
+byte :: u8 // alias
+
+bool          :: bool
+b8            :: b8
+b16           :: b16
+b32           :: b32
+b64           :: b64
+
+i8            :: i8
+u8            :: u8
+i16           :: i16
+u16           :: u16
+i32           :: i32
+u32           :: u32
+i64           :: i64
+u64           :: u64
+
+i128          :: i128
+u128          :: u128
+
+rune          :: rune
+
+f16           :: f16
+f32           :: f32
+f64           :: f64
+
+complex32     :: complex32
+complex64     :: complex64
+complex128    :: complex128
+
+quaternion64  :: quaternion64
+quaternion128 :: quaternion128
+quaternion256 :: quaternion256
+
+int           :: int
+uint          :: uint
+uintptr       :: uintptr
+
+rawptr        :: rawptr
+string        :: string
+cstring       :: cstring
+any           :: any
+
+typeid        :: typeid
 
 
 // Endian Specific Types
 // Endian Specific Types
-i16le         :: i16le;
-u16le         :: u16le;
-i32le         :: i32le;
-u32le         :: u32le;
-i64le         :: i64le;
-u64le         :: u64le;
-i128le        :: i128le;
-u128le        :: u128le;
-
-i16be         :: i16be;
-u16be         :: u16be;
-i32be         :: i32be;
-u32be         :: u32be;
-i64be         :: i64be;
-u64be         :: u64be;
-i128be        :: i128be;
-u128be        :: u128be;
-
-
-f16le         :: f16le;
-f32le         :: f32le;
-f64le         :: f64le;
-
-f16be         :: f16be;
-f32be         :: f32be;
-f64be         :: f64be;
+i16le         :: i16le
+u16le         :: u16le
+i32le         :: i32le
+u32le         :: u32le
+i64le         :: i64le
+u64le         :: u64le
+i128le        :: i128le
+u128le        :: u128le
+
+i16be         :: i16be
+u16be         :: u16be
+i32be         :: i32be
+u32be         :: u32be
+i64be         :: i64be
+u64be         :: u64be
+i128be        :: i128be
+u128be        :: u128be
+
+
+f16le         :: f16le
+f32le         :: f32le
+f64le         :: f64le
+
+f16be         :: f16be
+f32be         :: f32be
+f64be         :: f64be
 
 
 
 
 
 

+ 8 - 0
core/bytes/buffer.odin

@@ -161,6 +161,10 @@ buffer_write :: proc(b: ^Buffer, p: []byte) -> (n: int, err: io.Error) {
 	return copy(b.buf[m:], p), nil
 	return copy(b.buf[m:], p), nil
 }
 }
 
 
+buffer_write_ptr :: proc(b: ^Buffer, ptr: rawptr, size: int) -> (n: int, err: io.Error) {
+	return buffer_write(b, ([^]byte)(ptr)[:size])
+}
+
 buffer_write_string :: proc(b: ^Buffer, s: string) -> (n: int, err: io.Error) {
 buffer_write_string :: proc(b: ^Buffer, s: string) -> (n: int, err: io.Error) {
 	b.last_read = .Invalid
 	b.last_read = .Invalid
 	m, ok := _buffer_try_grow(b, len(s))
 	m, ok := _buffer_try_grow(b, len(s))
@@ -229,6 +233,10 @@ buffer_read :: proc(b: ^Buffer, p: []byte) -> (n: int, err: io.Error) {
 	return
 	return
 }
 }
 
 
+buffer_read_ptr :: proc(b: ^Buffer, ptr: rawptr, size: int) -> (n: int, err: io.Error) {
+	return buffer_read(b, ([^]byte)(ptr)[:size])
+}
+
 buffer_read_at :: proc(b: ^Buffer, p: []byte, offset: int) -> (n: int, err: io.Error) {
 buffer_read_at :: proc(b: ^Buffer, p: []byte, offset: int) -> (n: int, err: io.Error) {
 	b.last_read = .Invalid
 	b.last_read = .Invalid
 
 

+ 58 - 39
core/bytes/bytes.odin

@@ -5,13 +5,19 @@ import "core:unicode"
 import "core:unicode/utf8"
 import "core:unicode/utf8"
 
 
 clone :: proc(s: []byte, allocator := context.allocator, loc := #caller_location) -> []byte {
 clone :: proc(s: []byte, allocator := context.allocator, loc := #caller_location) -> []byte {
-	c := make([]byte, len(s)+1, allocator, loc)
+	c := make([]byte, len(s), allocator, loc)
 	copy(c, s)
 	copy(c, s)
-	c[len(s)] = 0
 	return c[:len(s)]
 	return c[:len(s)]
 }
 }
 
 
-ptr_from_slice :: proc(str: []byte) -> ^byte {
+clone_safe :: proc(s: []byte, allocator := context.allocator, loc := #caller_location) -> (data: []byte, err: mem.Allocator_Error) {
+	c := make([]byte, len(s), allocator, loc) or_return
+	copy(c, s)
+	return c[:len(s)], nil
+}
+
+ptr_from_slice :: ptr_from_bytes
+ptr_from_bytes :: proc(str: []byte) -> ^byte {
 	d := transmute(mem.Raw_String)str
 	d := transmute(mem.Raw_String)str
 	return d.data
 	return d.data
 }
 }
@@ -135,6 +141,25 @@ join :: proc(a: [][]byte, sep: []byte, allocator := context.allocator) -> []byte
 	return b
 	return b
 }
 }
 
 
+join_safe :: proc(a: [][]byte, sep: []byte, allocator := context.allocator) -> (data: []byte, err: mem.Allocator_Error) {
+	if len(a) == 0 {
+		return nil, nil
+	}
+
+	n := len(sep) * (len(a) - 1)
+	for s in a {
+		n += len(s)
+	}
+
+	b := make([]byte, n, allocator) or_return
+	i := copy(b, a[0])
+	for s in a[1:] {
+		i += copy(b[i:], sep)
+		i += copy(b[i:], s)
+	}
+	return b, nil
+}
+
 concatenate :: proc(a: [][]byte, allocator := context.allocator) -> []byte {
 concatenate :: proc(a: [][]byte, allocator := context.allocator) -> []byte {
 	if len(a) == 0 {
 	if len(a) == 0 {
 		return nil
 		return nil
@@ -152,6 +177,24 @@ concatenate :: proc(a: [][]byte, allocator := context.allocator) -> []byte {
 	return b
 	return b
 }
 }
 
 
+concatenate_safe :: proc(a: [][]byte, allocator := context.allocator) -> (data: []byte, err: mem.Allocator_Error) {
+	if len(a) == 0 {
+		return nil, nil
+	}
+
+	n := 0
+	for s in a {
+		n += len(s)
+	}
+	b := make([]byte, n, allocator) or_return
+	i := 0
+	for s in a {
+		i += copy(b[i:], s)
+	}
+	return b, nil
+}
+
+
 @private
 @private
 _split :: proc(s, sep: []byte, sep_save, n: int, allocator := context.allocator) -> [][]byte {
 _split :: proc(s, sep: []byte, sep_save, n: int, allocator := context.allocator) -> [][]byte {
 	s, n := s, n
 	s, n := s, n
@@ -219,61 +262,37 @@ split_after_n :: proc(s, sep: []byte, n: int, allocator := context.allocator) ->
 
 
 
 
 @private
 @private
-_split_iterator :: proc(s: ^[]byte, sep: []byte, sep_save, n: int) -> (res: []byte, ok: bool) {
-	s, n := s, n
-
-	if n == 0 {
-		return
-	}
-
-	if sep == nil {
+_split_iterator :: proc(s: ^[]byte, sep: []byte, sep_save: int) -> (res: []byte, ok: bool) {
+	if len(sep) == 0 {
 		res = s[:]
 		res = s[:]
 		ok = true
 		ok = true
 		s^ = s[len(s):]
 		s^ = s[len(s):]
 		return
 		return
 	}
 	}
 
 
-	if n < 0 {
-		n = count(s^, sep) + 1
-	}
-
-	n -= 1
-
-	i := 0
-	for ; i < n; i += 1 {
-		m := index(s^, sep)
-		if m < 0 {
-			break
-		}
+	m := index(s^, sep)
+	if m < 0 {
+		// not found
+		res = s[:]
+		ok = len(res) != 0
+		s^ = s[len(s):]
+	} else {
 		res = s[:m+sep_save]
 		res = s[:m+sep_save]
 		ok = true
 		ok = true
 		s^ = s[m+len(sep):]
 		s^ = s[m+len(sep):]
-		return
 	}
 	}
-	res = s[:]
-	ok = res != nil
-	s^ = s[len(s):]
 	return
 	return
 }
 }
 
 
 
 
 split_iterator :: proc(s: ^[]byte, sep: []byte) -> ([]byte, bool) {
 split_iterator :: proc(s: ^[]byte, sep: []byte) -> ([]byte, bool) {
-	return _split_iterator(s, sep, 0, -1)
-}
-
-split_n_iterator :: proc(s: ^[]byte, sep: []byte, n: int) -> ([]byte, bool) {
-	return _split_iterator(s, sep, 0, n)
+	return _split_iterator(s, sep, 0)
 }
 }
 
 
 split_after_iterator :: proc(s: ^[]byte, sep: []byte) -> ([]byte, bool) {
 split_after_iterator :: proc(s: ^[]byte, sep: []byte) -> ([]byte, bool) {
-	return _split_iterator(s, sep, len(sep), -1)
+	return _split_iterator(s, sep, len(sep))
 }
 }
 
 
-split_after_n_iterator :: proc(s: ^[]byte, sep: []byte, n: int) -> ([]byte, bool) {
-	return _split_iterator(s, sep, len(sep), n)
-}
-
-
 
 
 index_byte :: proc(s: []byte, c: byte) -> int {
 index_byte :: proc(s: []byte, c: byte) -> int {
 	for i := 0; i < len(s); i += 1 {
 	for i := 0; i < len(s); i += 1 {
@@ -1143,7 +1162,7 @@ fields_proc :: proc(s: []byte, f: proc(rune) -> bool, allocator := context.alloc
 	}
 	}
 
 
 	if start >= 0 {
 	if start >= 0 {
-		append(&subslices, s[start : end])
+		append(&subslices, s[start : len(s)])
 	}
 	}
 
 
 	return subslices[:]
 	return subslices[:]

+ 6 - 4
core/c/c.odin

@@ -3,22 +3,24 @@ package c
 import builtin "core:builtin"
 import builtin "core:builtin"
 
 
 char           :: builtin.u8  // assuming -funsigned-char
 char           :: builtin.u8  // assuming -funsigned-char
+
+schar          :: builtin.i8
 short          :: builtin.i16
 short          :: builtin.i16
 int            :: builtin.i32
 int            :: builtin.i32
-long           :: builtin.i32 when (ODIN_OS == "windows" || size_of(builtin.rawptr) == 4) else builtin.i64
+long           :: builtin.i32 when (ODIN_OS == .Windows || size_of(builtin.rawptr) == 4) else builtin.i64
 longlong       :: builtin.i64
 longlong       :: builtin.i64
 
 
 uchar          :: builtin.u8
 uchar          :: builtin.u8
 ushort         :: builtin.u16
 ushort         :: builtin.u16
 uint           :: builtin.u32
 uint           :: builtin.u32
-ulong          :: builtin.u32 when (ODIN_OS == "windows" || size_of(builtin.rawptr) == 4) else builtin.u64
+ulong          :: builtin.u32 when (ODIN_OS == .Windows || size_of(builtin.rawptr) == 4) else builtin.u64
 ulonglong      :: builtin.u64
 ulonglong      :: builtin.u64
 
 
 bool           :: builtin.bool
 bool           :: builtin.bool
 
 
 size_t         :: builtin.uint
 size_t         :: builtin.uint
 ssize_t        :: builtin.int
 ssize_t        :: builtin.int
-wchar_t        :: builtin.u16 when (ODIN_OS == "windows") else builtin.u32
+wchar_t        :: builtin.u16 when (ODIN_OS == .Windows) else builtin.u32
 
 
 float          :: builtin.f32
 float          :: builtin.f32
 double         :: builtin.f64
 double         :: builtin.f64
@@ -46,7 +48,7 @@ int_least64_t  :: builtin.i64
 uint_least64_t :: builtin.u64
 uint_least64_t :: builtin.u64
 
 
 // Same on Windows, Linux, and FreeBSD
 // Same on Windows, Linux, and FreeBSD
-when ODIN_ARCH == "386" || ODIN_ARCH == "amd64" {
+when ODIN_ARCH == .i386 || ODIN_ARCH == .amd64 {
 	int_fast8_t    :: builtin.i8
 	int_fast8_t    :: builtin.i8
 	uint_fast8_t   :: builtin.u8
 	uint_fast8_t   :: builtin.u8
 	int_fast16_t   :: builtin.i32
 	int_fast16_t   :: builtin.i32

+ 3 - 3
core/c/frontend/preprocessor/preprocess.odin

@@ -519,7 +519,7 @@ join_adjacent_string_literals :: proc(cpp: ^Preprocessor, initial_tok: ^Token) {
 
 
 
 
 quote_string :: proc(s: string) -> []byte {
 quote_string :: proc(s: string) -> []byte {
-	b := strings.make_builder(0, len(s)+2)
+	b := strings.builder_make(0, len(s)+2)
 	io.write_quoted_string(strings.to_writer(&b), s, '"')
 	io.write_quoted_string(strings.to_writer(&b), s, '"')
 	return b.buf[:]
 	return b.buf[:]
 }
 }
@@ -956,7 +956,7 @@ substitute_token :: proc(cpp: ^Preprocessor, tok: ^Token, args: ^Macro_Arg) -> ^
 			continue
 			continue
 		}
 		}
 
 
-		if tok.lit == "__VA__OPT__" && tok.next.lit == "(" {
+		if tok.lit == "__VA_OPT__" && tok.next.lit == "(" {
 			opt_arg := read_macro_arg_one(cpp, &tok, tok.next.next, true)
 			opt_arg := read_macro_arg_one(cpp, &tok, tok.next.next, true)
 			if has_varargs(args) {
 			if has_varargs(args) {
 				for t := opt_arg.tok; t.kind != .EOF; t = t.next {
 				for t := opt_arg.tok; t.kind != .EOF; t = t.next {
@@ -1276,7 +1276,7 @@ preprocess_internal :: proc(cpp: ^Preprocessor, tok: ^Token) -> ^Token {
 				if start.file != nil {
 				if start.file != nil {
 					dir = filepath.dir(start.file.name)
 					dir = filepath.dir(start.file.name)
 				}
 				}
-				path := filepath.join(dir, filename)
+				path := filepath.join({dir, filename})
 				if os.exists(path) {
 				if os.exists(path) {
 					tok = include_file(cpp, tok, path, start.next.next)
 					tok = include_file(cpp, tok, path, start.next.next)
 					continue
 					continue

+ 3 - 1
core/c/libc/complex.odin

@@ -2,8 +2,10 @@ package libc
 
 
 // 7.3 Complex arithmetic
 // 7.3 Complex arithmetic
 
 
-when ODIN_OS == "windows" {
+when ODIN_OS == .Windows {
 	foreign import libc "system:libucrt.lib"
 	foreign import libc "system:libucrt.lib"
+} else when ODIN_OS == .Darwin {
+	foreign import libc "system:System.framework"
 } else {
 } else {
 	foreign import libc "system:c"
 	foreign import libc "system:c"
 }
 }

+ 3 - 1
core/c/libc/ctype.odin

@@ -1,7 +1,9 @@
 package libc
 package libc
 
 
-when ODIN_OS == "windows" {
+when ODIN_OS == .Windows {
 	foreign import libc "system:libucrt.lib"
 	foreign import libc "system:libucrt.lib"
+} else when ODIN_OS == .Darwin {
+	foreign import libc "system:System.framework"
 } else {
 } else {
 	foreign import libc "system:c"
 	foreign import libc "system:c"
 }
 }

+ 32 - 3
core/c/libc/errno.odin

@@ -2,8 +2,10 @@ package libc
 
 
 // 7.5 Errors
 // 7.5 Errors
 
 
-when ODIN_OS == "windows" {
+when ODIN_OS == .Windows {
 	foreign import libc "system:libucrt.lib"
 	foreign import libc "system:libucrt.lib"
+} else when ODIN_OS == .Darwin {
+	foreign import libc "system:System.framework"
 } else {
 } else {
 	foreign import libc "system:c"
 	foreign import libc "system:c"
 }
 }
@@ -12,7 +14,7 @@ when ODIN_OS == "windows" {
 //	EDOM,
 //	EDOM,
 //	EILSEQ
 //	EILSEQ
 //	ERANGE
 //	ERANGE
-when ODIN_OS == "linux" || ODIN_OS == "freebsd" {
+when ODIN_OS == .Linux || ODIN_OS == .FreeBSD {
 	@(private="file")
 	@(private="file")
 	@(default_calling_convention="c")
 	@(default_calling_convention="c")
 	foreign libc {
 	foreign libc {
@@ -25,7 +27,20 @@ when ODIN_OS == "linux" || ODIN_OS == "freebsd" {
 	ERANGE :: 34
 	ERANGE :: 34
 }
 }
 
 
-when ODIN_OS == "windows" {
+when ODIN_OS == .OpenBSD {
+	@(private="file")
+	@(default_calling_convention="c")
+	foreign libc {
+		@(link_name="__errno")
+		_get_errno :: proc() -> ^int ---
+	}
+
+	EDOM   :: 33
+	EILSEQ :: 84
+	ERANGE :: 34
+}
+
+when ODIN_OS == .Windows {
 	@(private="file")
 	@(private="file")
 	@(default_calling_convention="c")
 	@(default_calling_convention="c")
 	foreign libc {
 	foreign libc {
@@ -38,6 +53,20 @@ when ODIN_OS == "windows" {
 	ERANGE :: 34
 	ERANGE :: 34
 }
 }
 
 
+when ODIN_OS == .Darwin {
+	@(private="file")
+	@(default_calling_convention="c")
+	foreign libc {
+		@(link_name="__error")
+		_get_errno :: proc() -> ^int ---
+	}
+
+	// Unknown
+	EDOM   :: 33
+	EILSEQ :: 92
+	ERANGE :: 34
+}
+
 // Odin has no way to make an identifier "errno" behave as a function call to
 // Odin has no way to make an identifier "errno" behave as a function call to
 // read the value, or to produce an lvalue such that you can assign a different
 // read the value, or to produce an lvalue such that you can assign a different
 // error value to errno. To work around this, just expose it as a function like
 // error value to errno. To work around this, just expose it as a function like

+ 13 - 11
core/c/libc/math.odin

@@ -4,8 +4,10 @@ package libc
 
 
 import "core:intrinsics"
 import "core:intrinsics"
 
 
-when ODIN_OS == "windows" {
+when ODIN_OS == .Windows {
 	foreign import libc "system:libucrt.lib"
 	foreign import libc "system:libucrt.lib"
+} else when ODIN_OS == .Darwin {
+	foreign import libc "system:System.framework"
 } else {
 } else {
 	foreign import libc "system:c"
 	foreign import libc "system:c"
 }
 }
@@ -209,19 +211,19 @@ _signbitf :: #force_inline proc(x: float) -> int {
 	return int(transmute(uint32_t)x >> 31)
 	return int(transmute(uint32_t)x >> 31)
 }
 }
 
 
-isfinite :: #force_inline proc(x: $T) where intrinsics.type_is_float(T) {
+isfinite :: #force_inline proc(x: $T) -> bool where intrinsics.type_is_float(T) {
 	return fpclassify(x) == FP_INFINITE
 	return fpclassify(x) == FP_INFINITE
 }
 }
 
 
-isinf :: #force_inline proc(x: $T) where intrinsics.type_is_float(T) {
+isinf :: #force_inline proc(x: $T) -> bool where intrinsics.type_is_float(T) {
 	return fpclassify(x) > FP_INFINITE
 	return fpclassify(x) > FP_INFINITE
 }
 }
 
 
-isnan :: #force_inline proc(x: $T) where intrinsics.type_is_float(T) {
+isnan :: #force_inline proc(x: $T) -> bool where intrinsics.type_is_float(T) {
 	return fpclassify(x) == FP_NAN
 	return fpclassify(x) == FP_NAN
 }
 }
 
 
-isnormal :: #force_inline proc(x: $T) where intrinsics.type_is_float(T) {
+isnormal :: #force_inline proc(x: $T) -> bool where intrinsics.type_is_float(T) {
 	return fpclassify(x) == FP_NORMAL
 	return fpclassify(x) == FP_NORMAL
 }
 }
 
 
@@ -229,27 +231,27 @@ isnormal :: #force_inline proc(x: $T) where intrinsics.type_is_float(T) {
 // implemented as the relational comparisons, as that would produce an invalid
 // implemented as the relational comparisons, as that would produce an invalid
 // "sticky" state that propagates and affects maths results. These need
 // "sticky" state that propagates and affects maths results. These need
 // to be implemented natively in Odin assuming isunordered to prevent that.
 // to be implemented natively in Odin assuming isunordered to prevent that.
-isgreater :: #force_inline proc(x, y: $T) where intrinsics.type_is_float(T) {
+isgreater :: #force_inline proc(x, y: $T) -> bool where intrinsics.type_is_float(T) {
 	return !isunordered(x, y) && x > y
 	return !isunordered(x, y) && x > y
 }
 }
 
 
-isgreaterequal :: #force_inline proc(x, y: $T) where intrinsics.type_is_float(T) {
+isgreaterequal :: #force_inline proc(x, y: $T) -> bool where intrinsics.type_is_float(T) {
 	return !isunordered(x, y) && x >= y
 	return !isunordered(x, y) && x >= y
 }
 }
 
 
-isless :: #force_inline proc(x, y: $T) where intrinsics.type_is_float(T) {
+isless :: #force_inline proc(x, y: $T) -> bool where intrinsics.type_is_float(T) {
 	return !isunordered(x, y) && x < y
 	return !isunordered(x, y) && x < y
 }
 }
 
 
-islessequal :: #force_inline proc(x, y: $T) where intrinsics.type_is_float(T) {
+islessequal :: #force_inline proc(x, y: $T) -> bool where intrinsics.type_is_float(T) {
 	return !isunordered(x, y) && x <= y
 	return !isunordered(x, y) && x <= y
 }
 }
 
 
-islessgreater :: #force_inline proc(x, y: $T) where intrinsics.type_is_float(T) {
+islessgreater :: #force_inline proc(x, y: $T) -> bool where intrinsics.type_is_float(T) {
 	return !isunordered(x, y) && x <= y
 	return !isunordered(x, y) && x <= y
 }
 }
 
 
-isunordered :: #force_inline proc(x, y: $T) where intrinsics.type_is_float(T) {
+isunordered :: #force_inline proc(x, y: $T) -> bool where intrinsics.type_is_float(T) {
 	if isnan(x) {
 	if isnan(x) {
 		// Force evaluation of y to propagate exceptions for ordering semantics.
 		// Force evaluation of y to propagate exceptions for ordering semantics.
 		// To ensure correct semantics of IEEE 754 this cannot be compiled away.
 		// To ensure correct semantics of IEEE 754 this cannot be compiled away.

+ 4 - 3
core/c/libc/setjmp.odin

@@ -2,13 +2,14 @@ package libc
 
 
 // 7.13 Nonlocal jumps
 // 7.13 Nonlocal jumps
 
 
-when ODIN_OS == "windows" {
+when ODIN_OS == .Windows {
 	foreign import libc "system:libucrt.lib"
 	foreign import libc "system:libucrt.lib"
+} else when ODIN_OS == .Darwin {
+	foreign import libc "system:System.framework"
 } else {
 } else {
 	foreign import libc "system:c"
 	foreign import libc "system:c"
 }
 }
-
-when ODIN_OS == "windows" {
+when ODIN_OS == .Windows {
 	@(default_calling_convention="c")
 	@(default_calling_convention="c")
 	foreign libc {
 	foreign libc {
 		// 7.13.1 Save calling environment
 		// 7.13.1 Save calling environment

+ 18 - 3
core/c/libc/signal.odin

@@ -2,8 +2,10 @@ package libc
 
 
 // 7.14 Signal handling
 // 7.14 Signal handling
 
 
-when ODIN_OS == "windows" {
+when ODIN_OS == .Windows {
 	foreign import libc "system:libucrt.lib"
 	foreign import libc "system:libucrt.lib"
+} else when ODIN_OS == .Darwin {
+	foreign import libc "system:System.framework"
 } else {
 } else {
 	foreign import libc "system:c"
 	foreign import libc "system:c"
 }
 }
@@ -19,7 +21,7 @@ foreign libc {
 	raise  :: proc(sig: int) -> int ---
 	raise  :: proc(sig: int) -> int ---
 }
 }
 
 
-when ODIN_OS == "windows" {
+when ODIN_OS == .Windows {
 	SIG_ERR :: rawptr(~uintptr(0)) 
 	SIG_ERR :: rawptr(~uintptr(0)) 
 	SIG_DFL :: rawptr(uintptr(0))
 	SIG_DFL :: rawptr(uintptr(0))
 	SIG_IGN :: rawptr(uintptr(1))
 	SIG_IGN :: rawptr(uintptr(1))
@@ -32,7 +34,20 @@ when ODIN_OS == "windows" {
 	SIGTERM :: 15
 	SIGTERM :: 15
 }
 }
 
 
-when ODIN_OS == "linux" || ODIN_OS == "freebsd" || ODIN_OS == "darwin" {
+when ODIN_OS == .Linux || ODIN_OS == .FreeBSD {
+	SIG_ERR  :: rawptr(~uintptr(0))
+	SIG_DFL  :: rawptr(uintptr(0))
+	SIG_IGN  :: rawptr(uintptr(1)) 
+
+	SIGABRT  :: 6
+	SIGFPE   :: 8
+	SIGILL   :: 4
+	SIGINT   :: 2
+	SIGSEGV  :: 11
+	SIGTERM  :: 15
+}
+
+when ODIN_OS == .Darwin {
 	SIG_ERR  :: rawptr(~uintptr(0))
 	SIG_ERR  :: rawptr(~uintptr(0))
 	SIG_DFL  :: rawptr(uintptr(0))
 	SIG_DFL  :: rawptr(uintptr(0))
 	SIG_IGN  :: rawptr(uintptr(1)) 
 	SIG_IGN  :: rawptr(uintptr(1)) 

+ 111 - 146
core/c/libc/stdatomic.odin

@@ -47,29 +47,30 @@ kill_dependency :: #force_inline proc(value: $T) -> T {
 
 
 // 7.17.4 Fences
 // 7.17.4 Fences
 atomic_thread_fence :: #force_inline proc(order: memory_order) {
 atomic_thread_fence :: #force_inline proc(order: memory_order) {
-	switch (order) {
-	case .relaxed:
-		return
-	case .consume:
-		intrinsics.atomic_fence_acq()
-	case .acquire:
-		intrinsics.atomic_fence_acq()
-	case .release:
-		intrinsics.atomic_fence_rel()
-	case .acq_rel:
-		intrinsics.atomic_fence_acqrel()
-	case .seq_cst:
-		intrinsics.atomic_fence_acqrel()
+	assert(order != .relaxed)
+	assert(order != .consume)
+	#partial switch order {
+	case .acquire: intrinsics.atomic_thread_fence(.Acquire)
+	case .release: intrinsics.atomic_thread_fence(.Release)
+	case .acq_rel: intrinsics.atomic_thread_fence(.Acq_Rel)
+	case .seq_cst: intrinsics.atomic_thread_fence(.Seq_Cst)
 	}
 	}
 }
 }
 
 
 atomic_signal_fence :: #force_inline proc(order: memory_order) {
 atomic_signal_fence :: #force_inline proc(order: memory_order) {
-	atomic_thread_fence(order)
+	assert(order != .relaxed)
+	assert(order != .consume)
+	#partial switch order {
+	case .acquire: intrinsics.atomic_signal_fence(.Acquire)
+	case .release: intrinsics.atomic_signal_fence(.Release)
+	case .acq_rel: intrinsics.atomic_signal_fence(.Acq_Rel)
+	case .seq_cst: intrinsics.atomic_signal_fence(.Seq_Cst)
+	}
 }
 }
 
 
 // 7.17.5 Lock-free property
 // 7.17.5 Lock-free property
 atomic_is_lock_free :: #force_inline proc(obj: ^$T) -> bool {
 atomic_is_lock_free :: #force_inline proc(obj: ^$T) -> bool {
-	return size_of(T) <= 8 && (intrinsics.type_is_integer(T) || intrinsics.type_is_pointer(T))
+	return intrinsics.atomic_type_is_lock_free(T)
 }
 }
 
 
 // 7.17.6 Atomic integer types
 // 7.17.6 Atomic integer types
@@ -121,13 +122,10 @@ atomic_store_explicit :: #force_inline proc(object: ^$T, desired: T, order: memo
 	assert(order != .acquire)
 	assert(order != .acquire)
 	assert(order != .acq_rel)
 	assert(order != .acq_rel)
 
 
-	#partial switch (order) {
-	case .relaxed:
-		intrinsics.atomic_store_relaxed(object, desired)
-	case .release:
-		intrinsics.atomic_store_rel(object, desired)
-	case .seq_cst:
-		intrinsics.atomic_store(object, desired)
+	#partial switch order {
+	case .relaxed: intrinsics.atomic_store_explicit(object, desired, .Relaxed)
+	case .release: intrinsics.atomic_store_explicit(object, desired, .Release)
+	case .seq_cst: intrinsics.atomic_store_explicit(object, desired, .Seq_Cst)
 	}
 	}
 }
 }
 
 
@@ -139,36 +137,26 @@ atomic_load_explicit :: #force_inline proc(object: ^$T, order: memory_order) {
 	assert(order != .release)
 	assert(order != .release)
 	assert(order != .acq_rel)
 	assert(order != .acq_rel)
 
 
-	#partial switch (order) {
-	case .relaxed:
-		return intrinsics.atomic_load_relaxed(object)
-	case .consume:
-		return intrinsics.atomic_load_acq(object)
-	case .acquire:
-		return intrinsics.atomic_load_acq(object)
-	case .seq_cst:
-		return intrinsics.atomic_load(object)
+	#partial switch order {
+	case .relaxed: return intrinsics.atomic_load_explicit(object, .Relaxed)
+	case .consume: return intrinsics.atomic_load_explicit(object, .Consume)
+	case .acquire: return intrinsics.atomic_load_explicit(object, .Acquire)
+	case .seq_cst: return intrinsics.atomic_load_explicit(object, .Seq_Cst)
 	}
 	}
 }
 }
 
 
 atomic_exchange :: #force_inline proc(object: ^$T, desired: T) -> T {
 atomic_exchange :: #force_inline proc(object: ^$T, desired: T) -> T {
-	return intrinsics.atomic_xchg(object, desired)
+	return intrinsics.atomic_exchange(object, desired)
 }
 }
 
 
 atomic_exchange_explicit :: #force_inline proc(object: ^$T, desired: T, order: memory_order) -> T {
 atomic_exchange_explicit :: #force_inline proc(object: ^$T, desired: T, order: memory_order) -> T {
-	switch (order) {
-	case .relaxed:
-		return intrinsics.atomic_xchg_relaxed(object, desired)
-	case .consume:
-		return intrinsics.atomic_xchg_acq(object, desired)
-	case .acquire:
-		return intrinsics.atomic_xchg_acq(object, desired)
-	case .release:
-		return intrinsics.atomic_xchg_rel(object, desired)
-	case .acq_rel:
-		return intrinsics.atomic_xchg_acqrel(object, desired)
-	case .seq_cst:
-		return intrinsics.atomic_xchg(object, desired)
+	switch order {
+	case .relaxed: return intrinsics.atomic_exchange_explicit(object, desired, .Relaxed)
+	case .consume: return intrinsics.atomic_exchange_explicit(object, desired, .Consume)
+	case .acquire: return intrinsics.atomic_exchange_explicit(object, desired, .Acquire)
+	case .release: return intrinsics.atomic_exchange_explicit(object, desired, .Release)
+	case .acq_rel: return intrinsics.atomic_exchange_explicit(object, desired, .Acq_Rel)
+	case .seq_cst: return intrinsics.atomic_exchange_explicit(object, desired, .Seq_Cst)
 	}
 	}
 	return false
 	return false
 }
 }
@@ -189,102 +177,104 @@ atomic_exchange_explicit :: #force_inline proc(object: ^$T, desired: T, order: m
 // 	[success = seq_cst, failure = acquire] => failacq
 // 	[success = seq_cst, failure = acquire] => failacq
 // 	[success = acquire, failure = relaxed] => acq_failrelaxed
 // 	[success = acquire, failure = relaxed] => acq_failrelaxed
 // 	[success = acq_rel, failure = relaxed] => acqrel_failrelaxed
 // 	[success = acq_rel, failure = relaxed] => acqrel_failrelaxed
-atomic_compare_exchange_strong :: #force_inline proc(object, expected: ^$T, desired: T) {
-	value, ok := intrinsics.atomic_cxchg(object, expected^, desired)
+atomic_compare_exchange_strong :: #force_inline proc(object, expected: ^$T, desired: T) -> bool {
+	value, ok := intrinsics.atomic_compare_exchange_strong(object, expected^, desired)
 	if !ok { expected^ = value } 
 	if !ok { expected^ = value } 
 	return ok
 	return ok
 }
 }
 
 
-atomic_compare_exchange_strong_explicit :: #force_inline proc(object, expected: ^$T, desired: T, success, failure: memory_order) {
+atomic_compare_exchange_strong_explicit :: #force_inline proc(object, expected: ^$T, desired: T, success, failure: memory_order) -> bool {
 	assert(failure != .release)
 	assert(failure != .release)
 	assert(failure != .acq_rel)
 	assert(failure != .acq_rel)
 
 
 	value: T; ok: bool
 	value: T; ok: bool
-	#partial switch (failure) {
+	#partial switch failure {
 	case .seq_cst:
 	case .seq_cst:
 		assert(success != .relaxed)
 		assert(success != .relaxed)
-		#partial switch (success) {
+		#partial switch success {
 		case .seq_cst:
 		case .seq_cst:
-			value, ok := intrinsics.atomic_cxchg(object, expected^, desired)
+			value, ok = intrinsics.atomic_compare_exchange_strong_explicit(object, expected^, desired, .Seq_Cst, .Seq_Cst)
 		case .acquire:
 		case .acquire:
-			value, ok := intrinsics.atomic_cxchg_acq(object, expected^, desired)
+			value, ok = intrinsics.atomic_compare_exchange_strong_explicit(object, expected^, desired, .Acquire, .Seq_Cst)
 		case .consume:
 		case .consume:
-			value, ok := intrinsics.atomic_cxchg_acq(object, expected^, desired)
+			value, ok = intrinsics.atomic_compare_exchange_strong_explicit(object, expected^, desired, .Consume, .Seq_Cst)
 		case .release:
 		case .release:
-			value, ok := intrinsics.atomic_cxchg_rel(object, expected^, desired)
+			value, ok = intrinsics.atomic_compare_exchange_strong_explicit(object, expected^, desired, .Release, .Seq_Cst)
 		case .acq_rel:
 		case .acq_rel:
-			value, ok := intrinsics.atomic_cxchg_acqrel(object, expected^, desired)
+			value, ok = intrinsics.atomic_compare_exchange_strong_explicit(object, expected^, desired, .Acq_Rel, .Seq_Cst)
 		}
 		}
 	case .relaxed:
 	case .relaxed:
 		assert(success != .release)
 		assert(success != .release)
-		#partial switch (success) {
+		#partial switch success {
 		case .relaxed:
 		case .relaxed:
-			value, ok := intrinsics.atomic_cxchg_relaxed(object, expected^, desired)
+			value, ok = intrinsics.atomic_compare_exchange_strong_explicit(object, expected^, desired, .Relaxed, .Relaxed)
 		case .seq_cst:
 		case .seq_cst:
-			value, ok := intrinsics.atomic_cxchg_failrelaxed(object, expected^, desired)
+			value, ok = intrinsics.atomic_compare_exchange_strong_explicit(object, expected^, desired, .Seq_Cst, .Relaxed)
 		case .acquire:
 		case .acquire:
-			value, ok := intrinsics.atomic_cxchg_acq_failrelaxed(object, expected^, desired)
+			value, ok = intrinsics.atomic_compare_exchange_strong_explicit(object, expected^, desired, .Acquire, .Relaxed)
 		case .consume:
 		case .consume:
-			value, ok := intrinsics.atomic_cxchg_acq_failrelaxed(object, expected^, desired)
+			value, ok = intrinsics.atomic_compare_exchange_strong_explicit(object, expected^, desired, .Consume, .Relaxed)
 		case .acq_rel:
 		case .acq_rel:
-			value, ok := intrinsics.atomic_cxchg_acqrel_failrelaxed(object, expected^, desired)
+			value, ok = intrinsics.atomic_compare_exchange_strong_explicit(object, expected^, desired, .Acq_Rel, .Relaxed)
 		}
 		}
 	case .consume:
 	case .consume:
-		fallthrough
+		assert(success == .seq_cst)
+		value, ok = intrinsics.atomic_compare_exchange_strong_explicit(object, expected^, desired, .Seq_Cst, .Consume)
 	case .acquire:
 	case .acquire:
 		assert(success == .seq_cst)
 		assert(success == .seq_cst)
-		value, ok := intrinsics.atomic_cxchg_failacq(object, expected^, desired)
+		value, ok = intrinsics.atomic_compare_exchange_strong_explicit(object, expected^, desired, .Seq_Cst, .Acquire)
 
 
 	}
 	}
 	if !ok { expected^ = value }
 	if !ok { expected^ = value }
 	return ok
 	return ok
 }
 }
 
 
-atomic_compare_exchange_weak :: #force_inline proc(object, expected: ^$T, desired: T) {
-	value, ok := intrinsics.atomic_cxchgweak(object, expected^, desired)
+atomic_compare_exchange_weak :: #force_inline proc(object, expected: ^$T, desired: T) -> bool {
+	value, ok := intrinsics.atomic_compare_exchange_weak(object, expected^, desired)
 	if !ok { expected^ = value }
 	if !ok { expected^ = value }
 	return ok
 	return ok
 }
 }
 
 
-atomic_compare_exchange_weak_explicit :: #force_inline proc(object, expected: ^$T, desited: T, success, failure: memory_order) {
+atomic_compare_exchange_weak_explicit :: #force_inline proc(object, expected: ^$T, desited: T, success, failure: memory_order) -> bool {
 	assert(failure != .release)
 	assert(failure != .release)
 	assert(failure != .acq_rel)
 	assert(failure != .acq_rel)
 
 
 	value: T; ok: bool
 	value: T; ok: bool
-	#partial switch (failure) {
+	#partial switch failure {
 	case .seq_cst:
 	case .seq_cst:
 		assert(success != .relaxed)
 		assert(success != .relaxed)
-		#partial switch (success) {
+		#partial switch success {
 		case .seq_cst:
 		case .seq_cst:
-			value, ok := intrinsics.atomic_cxchgweak(object, expected^, desired)
+			value, ok = intrinsics.atomic_compare_exchange_weak_explicit(object, expected^, desired, .Seq_Cst, .Seq_Cst)
 		case .acquire:
 		case .acquire:
-			value, ok := intrinsics.atomic_cxchgweak_acq(object, expected^, desired)
+			value, ok = intrinsics.atomic_compare_exchange_weak_explicit(object, expected^, desired, .Acquire, .Seq_Cst)
 		case .consume:
 		case .consume:
-			value, ok := intrinsics.atomic_cxchgweak_acq(object, expected^, desired)
+			value, ok = intrinsics.atomic_compare_exchange_weak_explicit(object, expected^, desired, .Consume, .Seq_Cst)
 		case .release:
 		case .release:
-			value, ok := intrinsics.atomic_cxchgweak_rel(object, expected^, desired)
+			value, ok = intrinsics.atomic_compare_exchange_weak_explicit(object, expected^, desired, .Release, .Seq_Cst)
 		case .acq_rel:
 		case .acq_rel:
-			value, ok := intrinsics.atomic_cxchgweak_acqrel(object, expected^, desired)
+			value, ok = intrinsics.atomic_compare_exchange_weak_explicit(object, expected^, desired, .Acq_Rel, .Seq_Cst)
 		}
 		}
 	case .relaxed:
 	case .relaxed:
 		assert(success != .release)
 		assert(success != .release)
-		#partial switch (success) {
+		#partial switch success {
 		case .relaxed:
 		case .relaxed:
-			value, ok := intrinsics.atomic_cxchgweak_relaxed(object, expected^, desired)
+			value, ok = intrinsics.atomic_compare_exchange_weak_explicit(object, expected^, desired, .Relaxed, .Relaxed)
 		case .seq_cst:
 		case .seq_cst:
-			value, ok := intrinsics.atomic_cxchgweak_failrelaxed(object, expected^, desired)
+			value, ok = intrinsics.atomic_compare_exchange_weak_explicit(object, expected^, desired, .Seq_Cst, .Relaxed)
 		case .acquire:
 		case .acquire:
-			value, ok := intrinsics.atomic_cxchgweak_acq_failrelaxed(object, expected^, desired)
+			value, ok = intrinsics.atomic_compare_exchange_weak_explicit(object, expected^, desired, .Acquire, .Relaxed)
 		case .consume:
 		case .consume:
-			value, ok := intrinsics.atomic_cxchgweak_acq_failrelaxed(object, expected^, desired)
+			value, ok = intrinsics.atomic_compare_exchange_weak_explicit(object, expected^, desired, .Consume, .Relaxed)
 		case .acq_rel:
 		case .acq_rel:
-			value, ok := intrinsics.atomic_cxchgweak_acqrel_failrelaxed(object, expected^, desired)
+			value, ok = intrinsics.atomic_compare_exchange_weak_explicit(object, expected^, desired, .Acq_Rel, .Relaxed)
 		}
 		}
 	case .consume:
 	case .consume:
-		fallthrough
+		assert(success == .seq_cst)
+		value, ok = intrinsics.atomic_compare_exchange_weak_explicit(object, expected^, desired, .Seq_Cst, .Consume)
 	case .acquire:
 	case .acquire:
 		assert(success == .seq_cst)
 		assert(success == .seq_cst)
-		value, ok := intrinsics.atomic_cxchgweak_failacq(object, expected^, desired)
+		value, ok = intrinsics.atomic_compare_exchange_weak_explicit(object, expected^, desired, .Seq_Cst, .Acquire)
 
 
 	}
 	}
 	if !ok { expected^ = value }
 	if !ok { expected^ = value }
@@ -297,19 +287,14 @@ atomic_fetch_add :: #force_inline proc(object: ^$T, operand: T) -> T {
 }
 }
 
 
 atomic_fetch_add_explicit :: #force_inline proc(object: ^$T, operand: T, order: memory_order) -> T {
 atomic_fetch_add_explicit :: #force_inline proc(object: ^$T, operand: T, order: memory_order) -> T {
-	switch (order) {
-	case .relaxed:
-		return intrinsics.atomic_add_relaxed(object, operand)
-	case .consume:
-		return intrinsics.atomic_add_acq(object, operand)
-	case .acquire:
-		return intrinsics.atomic_add_acq(object, operand)
-	case .release:
-		return intrinsics.atomic_add_rel(object, operand)
-	case .acq_rel:
-		return intrinsics.atomic_add_acqrel(object, operand)
-	case .seq_cst:
-		return intrinsics.atomic_add(object, operand)
+	switch order {
+	case .relaxed: return intrinsics.atomic_add_explicit(object, operand, .Relaxed)
+	case .consume: return intrinsics.atomic_add_explicit(object, operand, .Consume)
+	case .acquire: return intrinsics.atomic_add_explicit(object, operand, .Acquire)
+	case .release: return intrinsics.atomic_add_explicit(object, operand, .Release)
+	case .acq_rel: return intrinsics.atomic_add_explicit(object, operand, .Acq_Rel)
+	case: fallthrough
+	case .seq_cst: return intrinsics.atomic_add_explicit(object, operand, .Seq_Cst)
 	}
 	}
 }
 }
 
 
@@ -318,19 +303,14 @@ atomic_fetch_sub :: #force_inline proc(object: ^$T, operand: T) -> T {
 }
 }
 
 
 atomic_fetch_sub_explicit :: #force_inline proc(object: ^$T, operand: T, order: memory_order) -> T {
 atomic_fetch_sub_explicit :: #force_inline proc(object: ^$T, operand: T, order: memory_order) -> T {
-	switch (order) {
-	case .relaxed:
-		return intrinsics.atomic_sub_relaxed(object, operand)
-	case .consume:
-		return intrinsics.atomic_sub_acq(object, operand)
-	case .acquire:
-		return intrinsics.atomic_sub_acq(object, operand)
-	case .release:
-		return intrinsics.atomic_sub_rel(object, operand)
-	case .acq_rel:
-		return intrinsics.atomic_sub_acqrel(object, operand)
-	case .seq_cst:
-		return intrinsics.atomic_sub(object, operand)
+	switch order {
+	case .relaxed: return intrinsics.atomic_sub_explicit(object, operand, .Relaxed)
+	case .consume: return intrinsics.atomic_sub_explicit(object, operand, .Consume)
+	case .acquire: return intrinsics.atomic_sub_explicit(object, operand, .Acquire)
+	case .release: return intrinsics.atomic_sub_explicit(object, operand, .Release)
+	case .acq_rel: return intrinsics.atomic_sub_explicit(object, operand, .Acq_Rel)
+	case: fallthrough
+	case .seq_cst: return intrinsics.atomic_sub_explicit(object, operand, .Seq_Cst)
 	}
 	}
 }
 }
 
 
@@ -339,19 +319,14 @@ atomic_fetch_or :: #force_inline proc(object: ^$T, operand: T) -> T {
 }
 }
 
 
 atomic_fetch_or_explicit :: #force_inline proc(object: ^$T, operand: T, order: memory_order) -> T {
 atomic_fetch_or_explicit :: #force_inline proc(object: ^$T, operand: T, order: memory_order) -> T {
-	switch (order) {
-	case .relaxed:
-		return intrinsics.atomic_or_relaxed(object, operand)
-	case .consume:
-		return intrinsics.atomic_or_acq(object, operand)
-	case .acquire:
-		return intrinsics.atomic_or_acq(object, operand)
-	case .release:
-		return intrinsics.atomic_or_rel(object, operand)
-	case .acq_rel:
-		return intrinsics.atomic_or_acqrel(object, operand)
-	case .seq_cst:
-		return intrinsics.atomic_or(object, operand)
+	switch order {
+	case .relaxed: return intrinsics.atomic_or_explicit(object, operand, .Relaxed)
+	case .consume: return intrinsics.atomic_or_explicit(object, operand, .Consume)
+	case .acquire: return intrinsics.atomic_or_explicit(object, operand, .Acquire)
+	case .release: return intrinsics.atomic_or_explicit(object, operand, .Release)
+	case .acq_rel: return intrinsics.atomic_or_explicit(object, operand, .Acq_Rel)
+	case: fallthrough
+	case .seq_cst: return intrinsics.atomic_or_explicit(object, operand, .Seq_Cst)
 	}
 	}
 }
 }
 
 
@@ -360,19 +335,14 @@ atomic_fetch_xor :: #force_inline proc(object: ^$T, operand: T) -> T {
 }
 }
 
 
 atomic_fetch_xor_explicit :: #force_inline proc(object: ^$T, operand: T, order: memory_order) -> T {
 atomic_fetch_xor_explicit :: #force_inline proc(object: ^$T, operand: T, order: memory_order) -> T {
-	switch (order) {
-	case .relaxed:
-		return intrinsics.atomic_xor_relaxed(object, operand)
-	case .consume:
-		return intrinsics.atomic_xor_acq(object, operand)
-	case .acquire:
-		return intrinsics.atomic_xor_acq(object, operand)
-	case .release:
-		return intrinsics.atomic_xor_rel(object, operand)
-	case .acq_rel:
-		return intrinsics.atomic_xor_acqrel(object, operand)
-	case .seq_cst:
-		return intrinsics.atomic_xor(object, operand)
+	switch order {
+	case .relaxed: return intrinsics.atomic_xor_explicit(object, operand, .Relaxed)
+	case .consume: return intrinsics.atomic_xor_explicit(object, operand, .Consume)
+	case .acquire: return intrinsics.atomic_xor_explicit(object, operand, .Acquire)
+	case .release: return intrinsics.atomic_xor_explicit(object, operand, .Release)
+	case .acq_rel: return intrinsics.atomic_xor_explicit(object, operand, .Acq_Rel)
+	case: fallthrough
+	case .seq_cst: return intrinsics.atomic_xor_explicit(object, operand, .Seq_Cst)
 	}
 	}
 }
 }
 
 
@@ -380,19 +350,14 @@ atomic_fetch_and :: #force_inline proc(object: ^$T, operand: T) -> T {
 	return intrinsics.atomic_and(object, operand)
 	return intrinsics.atomic_and(object, operand)
 }
 }
 atomic_fetch_and_explicit :: #force_inline proc(object: ^$T, operand: T, order: memory_order) -> T {
 atomic_fetch_and_explicit :: #force_inline proc(object: ^$T, operand: T, order: memory_order) -> T {
-	switch (order) {
-	case .relaxed:
-		return intrinsics.atomic_and_relaxed(object, operand)
-	case .consume:
-		return intrinsics.atomic_and_acq(object, operand)
-	case .acquire:
-		return intrinsics.atomic_and_acq(object, operand)
-	case .release:
-		return intrinsics.atomic_and_rel(object, operand)
-	case .acq_rel:
-		return intrinsics.atomic_and_acqrel(object, operand)
-	case .seq_cst:
-		return intrinsics.atomic_and(object, operand)
+	switch order {
+	case .relaxed: return intrinsics.atomic_and_explicit(object, operand, .Relaxed)
+	case .consume: return intrinsics.atomic_and_explicit(object, operand, .Consume)
+	case .acquire: return intrinsics.atomic_and_explicit(object, operand, .Acquire)
+	case .release: return intrinsics.atomic_and_explicit(object, operand, .Release)
+	case .acq_rel: return intrinsics.atomic_and_explicit(object, operand, .Acq_Rel)
+	case: fallthrough
+	case .seq_cst: return intrinsics.atomic_and_explicit(object, operand, .Seq_Cst)
 	}
 	}
 }
 }
 
 

+ 88 - 6
core/c/libc/stdio.odin

@@ -1,7 +1,9 @@
 package libc
 package libc
 
 
-when ODIN_OS == "windows" {
+when ODIN_OS == .Windows {
 	foreign import libc "system:libucrt.lib"
 	foreign import libc "system:libucrt.lib"
+} else when ODIN_OS == .Darwin {
+	foreign import libc "system:System.framework"
 } else {
 } else {
 	foreign import libc "system:c"
 	foreign import libc "system:c"
 }
 }
@@ -11,7 +13,7 @@ when ODIN_OS == "windows" {
 FILE :: struct {}
 FILE :: struct {}
 
 
 // MSVCRT compatible.
 // MSVCRT compatible.
-when ODIN_OS == "windows" {
+when ODIN_OS == .Windows {
 	_IOFBF       :: 0x0000
 	_IOFBF       :: 0x0000
 	_IONBF       :: 0x0004
 	_IONBF       :: 0x0004
 	_IOLBF       :: 0x0040
 	_IOLBF       :: 0x0040
@@ -46,7 +48,7 @@ when ODIN_OS == "windows" {
 }
 }
 
 
 // GLIBC and MUSL compatible.
 // GLIBC and MUSL compatible.
-when ODIN_OS == "linux" {
+when ODIN_OS == .Linux {
 	fpos_t        :: struct #raw_union { _: [16]char, _: longlong, _: double, }
 	fpos_t        :: struct #raw_union { _: [16]char, _: longlong, _: double, }
 
 
 	_IOFBF        :: 0
 	_IOFBF        :: 0
@@ -67,7 +69,7 @@ when ODIN_OS == "linux" {
 	SEEK_CUR      :: 1
 	SEEK_CUR      :: 1
 	SEEK_END      :: 2
 	SEEK_END      :: 2
 
 
-	TMP_MAX       :: 10000
+	TMP_MAX       :: 308915776
 
 
 	foreign libc {
 	foreign libc {
 		stderr: ^FILE
 		stderr: ^FILE
@@ -76,6 +78,86 @@ when ODIN_OS == "linux" {
 	}
 	}
 }
 }
 
 
+when ODIN_OS == .OpenBSD {
+	fpos_t :: distinct i64
+
+	_IOFBF :: 0
+	_IOLBF :: 1
+	_IONBF :: 1
+
+	BUFSIZ :: 1024
+
+	EOF :: int(-1)
+
+	FOPEN_MAX	:: 20
+	FILENAME_MAX	:: 1024
+
+	SEEK_SET :: 0
+	SEEK_CUR :: 1
+	SEEK_END :: 2
+
+	foreign libc {
+		stderr: ^FILE
+		stdin:  ^FILE
+		stdout: ^FILE
+	}
+}
+
+when ODIN_OS == .FreeBSD {
+	fpos_t :: distinct i64
+
+	_IOFBF :: 0
+	_IOLBF :: 1
+	_IONBF :: 1
+
+	BUFSIZ :: 1024
+
+	EOF :: int(-1)
+
+	FOPEN_MAX	:: 20
+	FILENAME_MAX	:: 1024
+
+	SEEK_SET :: 0
+	SEEK_CUR :: 1
+	SEEK_END :: 2
+
+	foreign libc {
+		stderr: ^FILE
+		stdin:  ^FILE
+		stdout: ^FILE
+	}
+}
+
+when ODIN_OS == .Darwin {
+	fpos_t :: distinct i64
+	
+	_IOFBF        :: 0
+	_IOLBF        :: 1
+	_IONBF        :: 2
+
+	BUFSIZ        :: 1024
+
+	EOF           :: int(-1)
+
+	FOPEN_MAX     :: 20
+
+	FILENAME_MAX  :: 1024
+
+	L_tmpnam      :: 1024
+
+	SEEK_SET      :: 0
+	SEEK_CUR      :: 1
+	SEEK_END      :: 2
+
+	TMP_MAX       :: 308915776
+
+	foreign libc {
+		@(link_name="__stderrp") stderr: ^FILE
+		@(link_name="__stdinp")  stdin:  ^FILE
+		@(link_name="__stdoutp") stdout: ^FILE
+	}
+}
+
 @(default_calling_convention="c")
 @(default_calling_convention="c")
 foreign libc {
 foreign libc {
 	// 7.21.4 Operations on files
 	// 7.21.4 Operations on files
@@ -114,10 +196,10 @@ foreign libc {
 	getc      :: proc(stream: ^FILE) -> int ---
 	getc      :: proc(stream: ^FILE) -> int ---
 	getchar   :: proc() -> int ---
 	getchar   :: proc() -> int ---
 	putc      :: proc(c: int, stream: ^FILE) -> int ---
 	putc      :: proc(c: int, stream: ^FILE) -> int ---
-	putchar   :: proc() -> int ---
+	putchar   :: proc(c: int) -> int ---
 	puts      :: proc(s: cstring) -> int ---
 	puts      :: proc(s: cstring) -> int ---
 	ungetc    :: proc(c: int, stream: ^FILE) -> int ---
 	ungetc    :: proc(c: int, stream: ^FILE) -> int ---
-	fread     :: proc(ptr: rawptr, size: size_t, stream: ^FILE) -> size_t ---
+	fread     :: proc(ptr: rawptr, size: size_t, nmemb: size_t, stream: ^FILE) -> size_t ---
 	fwrite    :: proc(ptr: rawptr, size: size_t, nmemb: size_t, stream: ^FILE) -> size_t ---
 	fwrite    :: proc(ptr: rawptr, size: size_t, nmemb: size_t, stream: ^FILE) -> size_t ---
 
 
 	// 7.21.9 File positioning functions
 	// 7.21.9 File positioning functions

+ 22 - 4
core/c/libc/stdlib.odin

@@ -2,13 +2,15 @@ package libc
 
 
 // 7.22 General utilities
 // 7.22 General utilities
 
 
-when ODIN_OS == "windows" {
+when ODIN_OS == .Windows {
 	foreign import libc "system:libucrt.lib"
 	foreign import libc "system:libucrt.lib"
+} else when ODIN_OS == .Darwin {
+	foreign import libc "system:System.framework"
 } else {
 } else {
 	foreign import libc "system:c"
 	foreign import libc "system:c"
 }
 }
 
 
-when ODIN_OS == "windows" {
+when ODIN_OS == .Windows {
 	RAND_MAX :: 0x7fff
 	RAND_MAX :: 0x7fff
 
 
 	@(private="file")
 	@(private="file")
@@ -22,7 +24,7 @@ when ODIN_OS == "windows" {
 	}
 	}
 }
 }
 
 
-when ODIN_OS == "linux" {
+when ODIN_OS == .Linux {
 	RAND_MAX :: 0x7fffffff
 	RAND_MAX :: 0x7fffffff
 
 
 	// GLIBC and MUSL only
 	// GLIBC and MUSL only
@@ -33,7 +35,23 @@ when ODIN_OS == "linux" {
 	}
 	}
 
 
 	MB_CUR_MAX :: #force_inline proc() -> size_t {
 	MB_CUR_MAX :: #force_inline proc() -> size_t {
-		return __ctype_get_mb_cur_max()
+		return size_t(__ctype_get_mb_cur_max())
+	}
+}
+
+
+when ODIN_OS == .Darwin {
+	RAND_MAX :: 0x7fffffff
+
+	// GLIBC and MUSL only
+	@(private="file")
+	@(default_calling_convention="c")
+	foreign libc {
+		___mb_cur_max :: proc() -> int ---
+	}
+
+	MB_CUR_MAX :: #force_inline proc() -> size_t {
+		return size_t(___mb_cur_max())
 	}
 	}
 }
 }
 
 

+ 3 - 1
core/c/libc/string.odin

@@ -4,8 +4,10 @@ import "core:runtime"
 
 
 // 7.24 String handling
 // 7.24 String handling
 
 
-when ODIN_OS == "windows" {
+when ODIN_OS == .Windows {
 	foreign import libc "system:libucrt.lib"
 	foreign import libc "system:libucrt.lib"
+} else when ODIN_OS == .Darwin {
+	foreign import libc "system:System.framework"
 } else {
 } else {
 	foreign import libc "system:c"
 	foreign import libc "system:c"
 }
 }

+ 9 - 4
core/c/libc/threads.odin

@@ -5,10 +5,10 @@ package libc
 thrd_start_t :: proc "c" (rawptr) -> int
 thrd_start_t :: proc "c" (rawptr) -> int
 tss_dtor_t   :: proc "c" (rawptr)
 tss_dtor_t   :: proc "c" (rawptr)
 
 
-when ODIN_OS == "windows" {
+when ODIN_OS == .Windows {
 	foreign import libc {
 	foreign import libc {
 		"system:libucrt.lib", 
 		"system:libucrt.lib", 
-		"system:msvcprt.lib"
+		"system:msvcprt.lib",
 	}
 	}
 
 
 	thrd_success        :: 0                             // _Thrd_success
 	thrd_success        :: 0                             // _Thrd_success
@@ -74,10 +74,10 @@ when ODIN_OS == "windows" {
 }
 }
 
 
 // GLIBC and MUSL compatible constants and types.
 // GLIBC and MUSL compatible constants and types.
-when ODIN_OS == "linux" {
+when ODIN_OS == .Linux {
 	foreign import libc {
 	foreign import libc {
 		"system:c",
 		"system:c",
-		"system:pthread"
+		"system:pthread",
 	}
 	}
 
 
 	thrd_success        :: 0
 	thrd_success        :: 0
@@ -136,3 +136,8 @@ when ODIN_OS == "linux" {
 		tss_set       :: proc(key: tss_t, val: rawptr) -> int ---
 		tss_set       :: proc(key: tss_t, val: rawptr) -> int ---
 	}
 	}
 }
 }
+
+
+when ODIN_OS == .Darwin {
+	// TODO: find out what this is meant to be!
+}

+ 13 - 6
core/c/libc/time.odin

@@ -2,15 +2,17 @@ package libc
 
 
 // 7.27 Date and time
 // 7.27 Date and time
 
 
-when ODIN_OS == "windows" {
+when ODIN_OS == .Windows {
 	foreign import libc "system:libucrt.lib"
 	foreign import libc "system:libucrt.lib"
+} else when ODIN_OS == .Darwin {
+	foreign import libc "system:System.framework"
 } else {
 } else {
 	foreign import libc "system:c"
 	foreign import libc "system:c"
 }
 }
 
 
 // We enforce 64-bit time_t and timespec as there is no reason to use 32-bit as
 // We enforce 64-bit time_t and timespec as there is no reason to use 32-bit as
 // we approach the 2038 problem. Windows has defaulted to this since VC8 (2005).
 // we approach the 2038 problem. Windows has defaulted to this since VC8 (2005).
-when ODIN_OS == "windows" {
+when ODIN_OS == .Windows {
 	foreign libc {
 	foreign libc {
 		// 7.27.2 Time manipulation functions
 		// 7.27.2 Time manipulation functions
 		                               clock        :: proc() -> clock_t ---
 		                               clock        :: proc() -> clock_t ---
@@ -43,7 +45,7 @@ when ODIN_OS == "windows" {
 	}
 	}
 }
 }
 
 
-when ODIN_OS == "linux" || ODIN_OS == "freebsd" {
+when ODIN_OS == .Linux || ODIN_OS == .FreeBSD || ODIN_OS == .Darwin || ODIN_OS == .OpenBSD {
 	@(default_calling_convention="c")
 	@(default_calling_convention="c")
 	foreign libc {
 	foreign libc {
 		// 7.27.2 Time manipulation functions
 		// 7.27.2 Time manipulation functions
@@ -61,7 +63,12 @@ when ODIN_OS == "linux" || ODIN_OS == "freebsd" {
 		strftime     :: proc(s: [^]char, maxsize: size_t, format: cstring, timeptr: ^tm) -> size_t ---
 		strftime     :: proc(s: [^]char, maxsize: size_t, format: cstring, timeptr: ^tm) -> size_t ---
 	}
 	}
 
 
-	CLOCKS_PER_SEC :: 1000000
+	when ODIN_OS == .OpenBSD {
+		CLOCKS_PER_SEC :: 100
+	} else {
+		CLOCKS_PER_SEC :: 1000000
+	}
+
 	TIME_UTC       :: 1
 	TIME_UTC       :: 1
 
 
 	time_t         :: distinct i64
 	time_t         :: distinct i64
@@ -75,7 +82,7 @@ when ODIN_OS == "linux" || ODIN_OS == "freebsd" {
 
 
 	tm :: struct {
 	tm :: struct {
 		tm_sec, tm_min, tm_hour, tm_mday, tm_mon, tm_year, tm_wday, tm_yday, tm_isdst: int,
 		tm_sec, tm_min, tm_hour, tm_mday, tm_mon, tm_year, tm_wday, tm_yday, tm_isdst: int,
-		_: long,
-		_: rawptr,
+		tm_gmtoff: long,
+		tm_zone: rawptr,
 	}
 	}
 }
 }

+ 2 - 0
core/c/libc/types.odin

@@ -3,6 +3,8 @@ package libc
 import "core:c"
 import "core:c"
 
 
 char           :: c.char // assuming -funsigned-char
 char           :: c.char // assuming -funsigned-char
+
+schar          :: c.schar
 short          :: c.short
 short          :: c.short
 int            :: c.int
 int            :: c.int
 long           :: c.long
 long           :: c.long

+ 3 - 1
core/c/libc/uchar.odin

@@ -2,8 +2,10 @@ package libc
 
 
 // 7.28 Unicode utilities
 // 7.28 Unicode utilities
 
 
-when ODIN_OS == "windows" {
+when ODIN_OS == .Windows {
 	foreign import libc "system:libucrt.lib"
 	foreign import libc "system:libucrt.lib"
+} else when ODIN_OS == .Darwin {
+	foreign import libc "system:System.framework"
 } else {
 } else {
 	foreign import libc "system:c"
 	foreign import libc "system:c"
 }
 }

+ 3 - 1
core/c/libc/wchar.odin

@@ -2,8 +2,10 @@ package libc
 
 
 // 7.29 Extended multibyte and wide character utilities
 // 7.29 Extended multibyte and wide character utilities
 
 
-when ODIN_OS == "windows" {
+when ODIN_OS == .Windows {
 	foreign import libc "system:libucrt.lib"
 	foreign import libc "system:libucrt.lib"
+} else when ODIN_OS == .Darwin {
+	foreign import libc "system:System.framework"
 } else {
 } else {
 	foreign import libc "system:c"
 	foreign import libc "system:c"
 }
 }

+ 18 - 4
core/c/libc/wctype.odin

@@ -2,20 +2,34 @@ package libc
 
 
 // 7.30 Wide character classification and mapping utilities
 // 7.30 Wide character classification and mapping utilities
 
 
-when ODIN_OS == "windows" {
+when ODIN_OS == .Windows {
 	foreign import libc "system:libucrt.lib"
 	foreign import libc "system:libucrt.lib"
+} else when ODIN_OS == .Darwin {
+	foreign import libc "system:System.framework"
 } else {
 } else {
 	foreign import libc "system:c"
 	foreign import libc "system:c"
 }
 }
 
 
-when ODIN_OS == "windows" {
+when ODIN_OS == .Windows {
 	wctrans_t :: distinct wchar_t
 	wctrans_t :: distinct wchar_t
 	wctype_t  :: distinct ushort
 	wctype_t  :: distinct ushort
-}
 
 
-when ODIN_OS == "linux" {
+} else when ODIN_OS == .Linux {
+	wctrans_t :: distinct intptr_t
+	wctype_t  :: distinct ulong
+
+} else when ODIN_OS == .Darwin {
+	wctrans_t :: distinct int
+	wctype_t  :: distinct u32
+
+} else when ODIN_OS == .OpenBSD {
 	wctrans_t :: distinct rawptr
 	wctrans_t :: distinct rawptr
+	wctype_t  :: distinct rawptr
+
+} else when ODIN_OS == .FreeBSD {
+	wctrans_t :: distinct int
 	wctype_t  :: distinct ulong
 	wctype_t  :: distinct ulong
+	
 }
 }
 
 
 @(default_calling_convention="c")
 @(default_calling_convention="c")

+ 16 - 6
core/compress/common.odin

@@ -5,6 +5,9 @@
 	List of contributors:
 	List of contributors:
 		Jeroen van Rijn: Initial implementation, optimization.
 		Jeroen van Rijn: Initial implementation, optimization.
 */
 */
+
+
+// package compress is a collection of utilities to aid with other compression packages
 package compress
 package compress
 
 
 import "core:io"
 import "core:io"
@@ -44,7 +47,7 @@ when size_of(uintptr) == 8 {
 }
 }
 
 
 
 
-Error :: union {
+Error :: union #shared_nil {
 	General_Error,
 	General_Error,
 	Deflate_Error,
 	Deflate_Error,
 	ZLIB_Error,
 	ZLIB_Error,
@@ -55,6 +58,7 @@ Error :: union {
 }
 }
 
 
 General_Error :: enum {
 General_Error :: enum {
+	None = 0,
 	File_Not_Found,
 	File_Not_Found,
 	Cannot_Open_File,
 	Cannot_Open_File,
 	File_Too_Short,
 	File_Too_Short,
@@ -73,6 +77,7 @@ General_Error :: enum {
 }
 }
 
 
 GZIP_Error :: enum {
 GZIP_Error :: enum {
+	None = 0,
 	Invalid_GZIP_Signature,
 	Invalid_GZIP_Signature,
 	Reserved_Flag_Set,
 	Reserved_Flag_Set,
 	Invalid_Extra_Data,
 	Invalid_Extra_Data,
@@ -97,6 +102,7 @@ GZIP_Error :: enum {
 }
 }
 
 
 ZIP_Error :: enum {
 ZIP_Error :: enum {
+	None = 0,
 	Invalid_ZIP_File_Signature,
 	Invalid_ZIP_File_Signature,
 	Unexpected_Signature,
 	Unexpected_Signature,
 	Insert_Next_Disk,
 	Insert_Next_Disk,
@@ -104,6 +110,7 @@ ZIP_Error :: enum {
 }
 }
 
 
 ZLIB_Error :: enum {
 ZLIB_Error :: enum {
+	None = 0,
 	Unsupported_Window_Size,
 	Unsupported_Window_Size,
 	FDICT_Unsupported,
 	FDICT_Unsupported,
 	Unsupported_Compression_Level,
 	Unsupported_Compression_Level,
@@ -111,6 +118,7 @@ ZLIB_Error :: enum {
 }
 }
 
 
 Deflate_Error :: enum {
 Deflate_Error :: enum {
+	None = 0,
 	Huffman_Bad_Sizes,
 	Huffman_Bad_Sizes,
 	Huffman_Bad_Code_Lengths,
 	Huffman_Bad_Code_Lengths,
 	Inflate_Error,
 	Inflate_Error,
@@ -120,7 +128,6 @@ Deflate_Error :: enum {
 	BType_3,
 	BType_3,
 }
 }
 
 
-
 // General I/O context for ZLIB, LZW, etc.
 // General I/O context for ZLIB, LZW, etc.
 Context_Memory_Input :: struct #packed {
 Context_Memory_Input :: struct #packed {
 	input_data:        []u8,
 	input_data:        []u8,
@@ -136,7 +143,12 @@ Context_Memory_Input :: struct #packed {
 	size_packed:       i64,
 	size_packed:       i64,
 	size_unpacked:     i64,
 	size_unpacked:     i64,
 }
 }
-#assert(size_of(Context_Memory_Input) == 64)
+when size_of(rawptr) == 8 {
+	#assert(size_of(Context_Memory_Input) == 64)
+} else {
+	// e.g. `-target:windows_i386`
+	#assert(size_of(Context_Memory_Input) == 52)
+}
 
 
 Context_Stream_Input :: struct #packed {
 Context_Stream_Input :: struct #packed {
 	input_data:        []u8,
 	input_data:        []u8,
@@ -171,8 +183,6 @@ Context_Stream_Input :: struct #packed {
 	This simplifies end-of-stream handling where bits may be left in the bit buffer.
 	This simplifies end-of-stream handling where bits may be left in the bit buffer.
 */
 */
 
 
-// TODO: Make these return compress.Error errors.
-
 input_size_from_memory :: proc(z: ^Context_Memory_Input) -> (res: i64, err: Error) {
 input_size_from_memory :: proc(z: ^Context_Memory_Input) -> (res: i64, err: Error) {
 	return i64(len(z.input_data)), nil
 	return i64(len(z.input_data)), nil
 }
 }
@@ -470,4 +480,4 @@ discard_to_next_byte_lsb_from_stream :: proc(z: ^Context_Stream_Input) {
 	consume_bits_lsb(z, discard)
 	consume_bits_lsb(z, discard)
 }
 }
 
 
-discard_to_next_byte_lsb :: proc{discard_to_next_byte_lsb_from_memory, discard_to_next_byte_lsb_from_stream};
+discard_to_next_byte_lsb :: proc{discard_to_next_byte_lsb_from_memory, discard_to_next_byte_lsb_from_stream}

+ 1 - 1
core/compress/gzip/example.odin

@@ -45,7 +45,7 @@ main :: proc() {
 
 
 	if len(args) < 2 {
 	if len(args) < 2 {
 		stderr("No input file specified.\n")
 		stderr("No input file specified.\n")
-		err := load(slice=TEST, buf=&buf, known_gzip_size=len(TEST))
+		err := load(data=TEST, buf=&buf, known_gzip_size=len(TEST))
 		if err == nil {
 		if err == nil {
 			stdout("Displaying test vector: ")
 			stdout("Displaying test vector: ")
 			stdout(bytes.buffer_to_string(&buf))
 			stdout(bytes.buffer_to_string(&buf))

+ 8 - 7
core/compress/gzip/gzip.odin

@@ -66,7 +66,8 @@ OS :: enum u8 {
 	_Unknown     = 14,
 	_Unknown     = 14,
 	Unknown      = 255,
 	Unknown      = 255,
 }
 }
-OS_Name :: #partial [OS]string{
+OS_Name :: #sparse[OS]string{
+	._Unknown     = "",
 	.FAT          = "FAT",
 	.FAT          = "FAT",
 	.Amiga        = "Amiga",
 	.Amiga        = "Amiga",
 	.VMS          = "VMS/OpenVMS",
 	.VMS          = "VMS/OpenVMS",
@@ -99,9 +100,9 @@ E_GZIP    :: compress.GZIP_Error
 E_ZLIB    :: compress.ZLIB_Error
 E_ZLIB    :: compress.ZLIB_Error
 E_Deflate :: compress.Deflate_Error
 E_Deflate :: compress.Deflate_Error
 
 
-GZIP_MAX_PAYLOAD_SIZE :: int(max(u32le))
+GZIP_MAX_PAYLOAD_SIZE :: i64(max(u32le))
 
 
-load :: proc{load_from_slice, load_from_file, load_from_context}
+load :: proc{load_from_bytes, load_from_file, load_from_context}
 
 
 load_from_file :: proc(filename: string, buf: ^bytes.Buffer, expected_output_size := -1, allocator := context.allocator) -> (err: Error) {
 load_from_file :: proc(filename: string, buf: ^bytes.Buffer, expected_output_size := -1, allocator := context.allocator) -> (err: Error) {
 	context.allocator = allocator
 	context.allocator = allocator
@@ -111,16 +112,16 @@ load_from_file :: proc(filename: string, buf: ^bytes.Buffer, expected_output_siz
 
 
 	err = E_General.File_Not_Found
 	err = E_General.File_Not_Found
 	if ok {
 	if ok {
-		err = load_from_slice(data, buf, len(data), expected_output_size)
+		err = load_from_bytes(data, buf, len(data), expected_output_size)
 	}
 	}
 	return
 	return
 }
 }
 
 
-load_from_slice :: proc(slice: []u8, buf: ^bytes.Buffer, known_gzip_size := -1, expected_output_size := -1, allocator := context.allocator) -> (err: Error) {
+load_from_bytes :: proc(data: []byte, buf: ^bytes.Buffer, known_gzip_size := -1, expected_output_size := -1, allocator := context.allocator) -> (err: Error) {
 	buf := buf
 	buf := buf
 
 
 	z := &compress.Context_Memory_Input{
 	z := &compress.Context_Memory_Input{
-		input_data = slice,
+		input_data = data,
 		output = buf,
 		output = buf,
 	}
 	}
 	return load_from_context(z, buf, known_gzip_size, expected_output_size, allocator)
 	return load_from_context(z, buf, known_gzip_size, expected_output_size, allocator)
@@ -135,7 +136,7 @@ load_from_context :: proc(z: ^$C, buf: ^bytes.Buffer, known_gzip_size := -1, exp
 
 
 	z.output = buf
 	z.output = buf
 
 
-	if expected_output_size > GZIP_MAX_PAYLOAD_SIZE {
+	if i64(expected_output_size) > i64(GZIP_MAX_PAYLOAD_SIZE) {
 		return E_GZIP.Payload_Size_Exceeds_Max_Payload
 		return E_GZIP.Payload_Size_Exceeds_Max_Payload
 	}
 	}
 
 

+ 148 - 0
core/compress/shoco/model.odin

@@ -0,0 +1,148 @@
+/*
+	This file was generated, so don't edit this by hand.
+	Transliterated from https://github.com/Ed-von-Schleck/shoco/blob/master/shoco_model.h,
+	which is an English word model.
+*/
+
+// package shoco is an implementation of the shoco short string compressor
+package shoco
+
+DEFAULT_MODEL :: Shoco_Model {
+	min_char = 39,
+	max_char = 122,
+	characters_by_id = {
+		'e', 'a', 'i', 'o', 't', 'h', 'n', 'r', 's', 'l', 'u', 'c', 'w', 'm', 'd', 'b', 'p', 'f', 'g', 'v', 'y', 'k', '-', 'H', 'M', 'T', '\'', 'B', 'x', 'I', 'W', 'L',
+	},
+	ids_by_character = {
+		-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 26, -1, -1, -1, -1, -1, 22, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 27, -1, -1, -1, -1, -1, 23, 29, -1, -1, 31, 24, -1, -1, -1, -1, -1, -1, 25, -1, -1, 30, -1, -1, -1, -1, -1, -1, -1, -1, -1, 1, 15, 11, 14, 0, 17, 18, 5, 2, -1, 21, 9, 13, 6, 3, 16, -1, 7, 8, 4, 10, 19, 12, 28, 20, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+	},
+	successors_by_bigram = {
+		7, 4, 12, -1, 6, -1, 1, 0, 3, 5, -1, 9, -1, 8, 2, -1, 15, 14, -1, 10, 11, -1, -1, -1, -1, -1, -1, -1, 13, -1, -1, -1,
+		1, -1, 6, -1, 1, -1, 0, 3, 2, 4, 15, 11, -1, 9, 5, 10, 13, -1, 12, 8, 7, 14, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+		9, 11, -1, 4, 2, -1, 0, 8, 1, 5, -1, 6, -1, 3, 7, 15, -1, 12, 10, 13, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+		-1, -1, 14, 7, 5, -1, 1, 2, 8, 9, 0, 15, 6, 4, 11, -1, 12, 3, -1, 10, -1, 13, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+		2, 4, 3, 1, 5, 0, -1, 6, 10, 9, 7, 12, 11, -1, -1, -1, -1, 13, -1, -1, 8, -1, 15, -1, -1, -1, 14, -1, -1, -1, -1, -1,
+		0, 1, 2, 3, 4, -1, -1, 5, 9, 10, 6, -1, -1, 8, 15, 11, -1, 14, -1, -1, 7, -1, 13, -1, -1, -1, 12, -1, -1, -1, -1, -1,
+		2, 8, 7, 4, 3, -1, 9, -1, 6, 11, -1, 5, -1, -1, 0, -1, -1, 14, 1, 15, 10, 12, -1, -1, -1, -1, 13, -1, -1, -1, -1, -1,
+		0, 3, 1, 2, 6, -1, 9, 8, 4, 12, 13, 10, -1, 11, 7, -1, -1, 15, 14, -1, 5, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+		0, 6, 3, 4, 1, 2, -1, -1, 5, 10, 7, 9, 11, 12, -1, -1, 8, 14, -1, -1, 15, 13, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+		0, 6, 2, 5, 9, -1, -1, -1, 10, 1, 8, -1, 12, 14, 4, -1, 15, 7, -1, 13, 3, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+		8, 10, 9, 15, 1, -1, 4, 0, 3, 2, -1, 6, -1, 12, 11, 13, 7, 14, 5, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+		1, 3, 6, 0, 4, 2, -1, 7, 13, 8, 9, 11, -1, -1, 15, -1, -1, -1, -1, -1, 10, 5, 14, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+		3, 0, 1, 4, -1, 2, 5, 6, 7, 8, -1, 14, -1, -1, 9, 15, -1, 12, -1, -1, -1, 10, 11, -1, -1, -1, 13, -1, -1, -1, -1, -1,
+		0, 1, 3, 2, 15, -1, 12, -1, 7, 14, 4, -1, -1, 9, -1, 8, 5, 10, -1, -1, 6, -1, 13, -1, -1, -1, 11, -1, -1, -1, -1, -1,
+		0, 3, 1, 2, -1, -1, 12, 6, 4, 9, 7, -1, -1, 14, 8, -1, -1, 15, 11, 13, 5, -1, 10, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+		0, 5, 7, 2, 10, 13, -1, 6, 8, 1, 3, -1, -1, 14, 15, 11, -1, -1, -1, 12, 4, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+		0, 2, 6, 3, 7, 10, -1, 1, 9, 4, 8, -1, -1, 15, -1, 12, 5, -1, -1, -1, 11, -1, 13, -1, -1, -1, 14, -1, -1, -1, -1, -1,
+		1, 3, 4, 0, 7, -1, 12, 2, 11, 8, 6, 13, -1, -1, -1, -1, -1, 5, -1, -1, 10, 15, 9, -1, -1, -1, 14, -1, -1, -1, -1, -1,
+		1, 3, 5, 2, 13, 0, 9, 4, 7, 6, 8, -1, -1, 15, -1, 11, -1, -1, 10, -1, 14, -1, 12, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+		0, 2, 1, 3, -1, -1, -1, 6, -1, -1, 5, -1, -1, -1, -1, -1, -1, -1, -1, -1, 4, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+		1, 11, 4, 0, 3, -1, 13, 12, 2, 7, -1, -1, 15, 10, 5, 8, 14, -1, -1, -1, -1, -1, 9, -1, -1, -1, 6, -1, -1, -1, -1, -1,
+		0, 9, 2, 14, 15, 4, 1, 13, 3, 5, -1, -1, 10, -1, -1, -1, -1, 6, 12, -1, 7, -1, 8, -1, -1, -1, 11, -1, -1, -1, -1, -1,
+		-1, 2, 14, -1, 1, 5, 8, 7, 4, 12, -1, 6, 9, 11, 13, 3, 10, 15, -1, -1, -1, -1, 0, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+		0, 1, 3, 2, -1, -1, -1, -1, -1, -1, 4, -1, -1, -1, -1, -1, -1, -1, -1, -1, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+		4, 3, 1, 5, -1, -1, -1, 0, -1, -1, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, 2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+		2, 8, 4, 1, -1, 0, -1, 6, -1, -1, 5, -1, 7, -1, -1, -1, -1, -1, -1, -1, 10, -1, -1, 9, -1, -1, -1, -1, -1, -1, -1, -1,
+		12, 5, -1, -1, 1, -1, -1, 7, 0, 3, -1, 2, -1, 4, 6, -1, -1, -1, -1, 8, -1, -1, 15, -1, 13, 9, -1, -1, -1, -1, -1, 11,
+		1, 3, 2, 4, -1, -1, -1, 5, -1, 7, 0, -1, -1, -1, -1, -1, -1, -1, -1, -1, 6, -1, -1, -1, -1, -1, -1, -1, -1, 8, -1, -1,
+		5, 3, 4, 12, 1, 6, -1, -1, -1, -1, 8, 2, -1, -1, -1, -1, 0, 9, -1, -1, 11, -1, 10, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+		-1, -1, -1, -1, 0, -1, 1, 12, 3, -1, -1, -1, -1, 5, -1, -1, -1, 2, -1, -1, -1, -1, -1, -1, -1, -1, 4, -1, -1, 6, -1, 10,
+		2, 3, 1, 4, -1, 0, -1, 5, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 7, -1, -1, -1, -1, -1, -1, -1, -1, 6, -1, -1,
+		5, 1, 3, 0, -1, -1, -1, -1, -1, -1, 4, -1, -1, -1, -1, -1, -1, -1, -1, -1, 2, -1, -1, -1, -1, -1, 9, -1, -1, 6, -1, 7,
+	},
+	successors_reversed = {
+		's', 't', 'c', 'l', 'm', 'a', 'd', 'r', 'v', 'T', 'A', 'L', 'e', 'M', 'Y', '-',
+		'\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00',
+		'\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00',
+		'\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00',
+		'\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00',
+		'\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00',
+		'-', 't', 'a', 'b', 's', 'h', 'c', 'r', 'n', 'w', 'p', 'm', 'l', 'd', 'i', 'f',
+		'\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00',
+		'\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00',
+		'\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00',
+		'\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00',
+		'\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00',
+		'\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00',
+		'\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00',
+		'\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00',
+		'\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00',
+		'\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00',
+		'\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00',
+		'\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00',
+		'\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00',
+		'\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00',
+		'\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00',
+		'\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00',
+		'\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00',
+		'\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00',
+		'\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00',
+		'\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00',
+		'u', 'e', 'i', 'a', 'o', 'r', 'y', 'l', 'I', 'E', 'R', '\x00', '\x00', '\x00', '\x00', '\x00',
+		'\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00',
+		'\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00',
+		'\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00',
+		'\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00',
+		'\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00',
+		'e', 'a', 'o', 'i', 'u', 'A', 'y', 'E', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00',
+		't', 'n', 'f', 's', '\'', 'm', 'I', 'N', 'A', 'E', 'L', 'Z', 'r', 'V', 'R', 'C',
+		'\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00',
+		'\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00',
+		'o', 'a', 'y', 'i', 'u', 'e', 'I', 'L', 'D', '\'', 'E', 'Y', '\x00', '\x00', '\x00', '\x00',
+		'r', 'i', 'y', 'a', 'e', 'o', 'u', 'Y', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00',
+		'\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00',
+		'\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00',
+		'\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00',
+		'\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00',
+		'\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00',
+		'\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00',
+		'h', 'o', 'e', 'E', 'i', 'u', 'r', 'w', 'a', 'H', 'y', 'R', 'Z', '\x00', '\x00', '\x00',
+		'\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00',
+		'\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00',
+		'h', 'i', 'e', 'a', 'o', 'r', 'I', 'y', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00',
+		'\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00',
+		'\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00',
+		'\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00',
+		'\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00',
+		'\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00',
+		'\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00',
+		'\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00',
+		'\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00',
+		'\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00',
+		'n', 't', 's', 'r', 'l', 'd', 'i', 'y', 'v', 'm', 'b', 'c', 'g', 'p', 'k', 'u',
+		'e', 'l', 'o', 'u', 'y', 'a', 'r', 'i', 's', 'j', 't', 'b', 'v', 'h', 'm', 'd',
+		'o', 'e', 'h', 'a', 't', 'k', 'i', 'r', 'l', 'u', 'y', 'c', 'q', 's', '-', 'd',
+		'e', 'i', 'o', 'a', 's', 'y', 'r', 'u', 'd', 'l', '-', 'g', 'n', 'v', 'm', 'f',
+		'r', 'n', 'd', 's', 'a', 'l', 't', 'e', 'm', 'c', 'v', 'y', 'i', 'x', 'f', 'p',
+		'o', 'e', 'r', 'a', 'i', 'f', 'u', 't', 'l', '-', 'y', 's', 'n', 'c', '\'', 'k',
+		'h', 'e', 'o', 'a', 'r', 'i', 'l', 's', 'u', 'n', 'g', 'b', '-', 't', 'y', 'm',
+		'e', 'a', 'i', 'o', 't', 'r', 'u', 'y', 'm', 's', 'l', 'b', '\'', '-', 'f', 'd',
+		'n', 's', 't', 'm', 'o', 'l', 'c', 'd', 'r', 'e', 'g', 'a', 'f', 'v', 'z', 'b',
+		'\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00',
+		'e', 'n', 'i', 's', 'h', 'l', 'f', 'y', '-', 'a', 'w', '\'', 'g', 'r', 'o', 't',
+		'e', 'l', 'i', 'y', 'd', 'o', 'a', 'f', 'u', 't', 's', 'k', 'w', 'v', 'm', 'p',
+		'e', 'a', 'o', 'i', 'u', 'p', 'y', 's', 'b', 'm', 'f', '\'', 'n', '-', 'l', 't',
+		'd', 'g', 'e', 't', 'o', 'c', 's', 'i', 'a', 'n', 'y', 'l', 'k', '\'', 'f', 'v',
+		'u', 'n', 'r', 'f', 'm', 't', 'w', 'o', 's', 'l', 'v', 'd', 'p', 'k', 'i', 'c',
+		'e', 'r', 'a', 'o', 'l', 'p', 'i', 't', 'u', 's', 'h', 'y', 'b', '-', '\'', 'm',
+		'\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00',
+		'e', 'i', 'o', 'a', 's', 'y', 't', 'd', 'r', 'n', 'c', 'm', 'l', 'u', 'g', 'f',
+		'e', 't', 'h', 'i', 'o', 's', 'a', 'u', 'p', 'c', 'l', 'w', 'm', 'k', 'f', 'y',
+		'h', 'o', 'e', 'i', 'a', 't', 'r', 'u', 'y', 'l', 's', 'w', 'c', 'f', '\'', '-',
+		'r', 't', 'l', 's', 'n', 'g', 'c', 'p', 'e', 'i', 'a', 'd', 'm', 'b', 'f', 'o',
+		'e', 'i', 'a', 'o', 'y', 'u', 'r', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00',
+		'a', 'i', 'h', 'e', 'o', 'n', 'r', 's', 'l', 'd', 'k', '-', 'f', '\'', 'c', 'b',
+		'p', 't', 'c', 'a', 'i', 'e', 'h', 'q', 'u', 'f', '-', 'y', 'o', '\x00', '\x00', '\x00',
+		'o', 'e', 's', 't', 'i', 'd', '\'', 'l', 'b', '-', 'm', 'a', 'r', 'n', 'p', 'w',
+	},
+
+	character_count = 32,
+	successor_count = 16,
+
+	max_successor_n = 7,
+	packs = {
+		{ 0x80000000, 1, 2, { 26, 24, 24, 24, 24, 24, 24, 24 }, { 15,  3,  0,  0, 0, 0, 0, 0 }, 0xc0, 0x80 },
+		{ 0xc0000000, 2, 4, { 25, 22, 19, 16, 16, 16, 16, 16 }, { 15,  7,  7,  7, 0, 0, 0, 0 }, 0xe0, 0xc0 },
+		{ 0xe0000000, 4, 8, { 23, 19, 15, 11,  8,  5,  2,  0 }, { 31, 15, 15, 15, 7, 7, 7, 3 }, 0xf0, 0xe0 },
+	},
+}

+ 318 - 0
core/compress/shoco/shoco.odin

@@ -0,0 +1,318 @@
+/*
+	Copyright 2022 Jeroen van Rijn <[email protected]>.
+	Made available under Odin's BSD-3 license.
+
+	List of contributors:
+		Jeroen van Rijn: Initial implementation.
+
+	An implementation of [shoco](https://github.com/Ed-von-Schleck/shoco) by Christian Schramm.
+*/
+
+// package shoco is an implementation of the shoco short string compressor
+package shoco
+
+import "core:intrinsics"
+import "core:compress"
+
+Shoco_Pack :: struct {
+	word:           u32,
+	bytes_packed:   i8,
+	bytes_unpacked: i8,
+	offsets:        [8]u16,
+	masks:          [8]i16,
+	header_mask:    u8,
+	header:         u8,
+}
+
+Shoco_Model :: struct {
+	min_char:             u8,
+	max_char:             u8,
+	characters_by_id:     []u8,
+	ids_by_character:     [256]i16,
+	successors_by_bigram: []i8,
+	successors_reversed:  []u8,
+
+	character_count:      u8,
+	successor_count:      u8,
+	max_successor_n:      i8,
+	packs:                []Shoco_Pack,
+}
+
+compress_bound :: proc(uncompressed_size: int) -> (worst_case_compressed_size: int) {
+	// Worst case compression happens when input is non-ASCII (128-255)
+	// Encoded as 0x00 + the byte in question.
+	return uncompressed_size * 2
+}
+
+decompress_bound :: proc(compressed_size: int, model := DEFAULT_MODEL) -> (maximum_decompressed_size: int) {
+	// Best case compression is 2:1
+	most: f64
+	for pack in model.packs {
+		val := f64(compressed_size) / f64(pack.bytes_packed) * f64(pack.bytes_unpacked)
+		most = max(most, val)
+	}
+	return int(most)
+}
+
+find_best_encoding :: proc(indices: []i16, n_consecutive: i8, model := DEFAULT_MODEL) -> (res: int) {
+	for p := len(model.packs); p > 0; p -= 1 {
+		pack := model.packs[p - 1]
+		if n_consecutive >= pack.bytes_unpacked {
+			have_index := true
+			for i := 0; i < int(pack.bytes_unpacked); i += 1 {
+				if indices[i] > pack.masks[i] {
+					have_index = false
+					break
+				}
+			}
+			if have_index {
+				return p - 1
+			}
+		}
+	}
+	return -1
+}
+
+validate_model :: proc(model: Shoco_Model) -> (int, compress.Error) {
+	if len(model.characters_by_id) != int(model.character_count) {
+		return 0, .Unknown_Compression_Method
+	}
+
+	if len(model.successors_by_bigram) != int(model.character_count) * int(model.character_count) {
+		return 0, .Unknown_Compression_Method
+	}
+
+	if len(model.successors_reversed) != int(model.successor_count) * int(model.max_char - model.min_char) {
+		return 0, .Unknown_Compression_Method
+	}
+
+	// Model seems legit.
+	return 0, nil
+}
+
+// Decompresses into provided buffer.
+decompress_slice_to_output_buffer :: proc(input: []u8, output: []u8, model := DEFAULT_MODEL) -> (size: int, err: compress.Error) {
+	inp, inp_end := 0, len(input)
+	out, out_end := 0, len(output)
+
+	validate_model(model) or_return
+
+	for inp < inp_end {
+		val  := transmute(i8)input[inp]
+		mark := int(-1)
+
+		for val < 0 {
+			val <<= 1
+			mark += 1
+		}
+
+		if mark > len(model.packs) {
+			return out, .Unknown_Compression_Method
+		}
+
+		if mark < 0 {
+			if out >= out_end {
+				return out, .Output_Too_Short
+			}
+
+			// Ignore the sentinel value for non-ASCII chars
+			if input[inp] == 0x00 {
+				inp += 1
+				if inp >= inp_end {
+					return out, .Stream_Too_Short
+				}
+			}
+			output[out] = input[inp]
+			inp, out = inp + 1, out + 1
+
+		} else {
+			pack := model.packs[mark]
+
+			if out + int(pack.bytes_unpacked) > out_end {
+				return out, .Output_Too_Short
+			} else if inp + int(pack.bytes_packed) > inp_end {
+				return out, .Stream_Too_Short
+			}
+
+			code := intrinsics.unaligned_load((^u32)(&input[inp]))
+			when ODIN_ENDIAN == .Little {
+				code = intrinsics.byte_swap(code)
+			}
+
+			// Unpack the leading char
+			offset := pack.offsets[0]
+			mask   := pack.masks[0]
+
+			last_chr := model.characters_by_id[(code >> offset) & u32(mask)]
+			output[out] = last_chr
+
+			// Unpack the successor chars
+			for i := 1; i < int(pack.bytes_unpacked); i += 1 {
+				offset = pack.offsets[i]
+				mask   = pack.masks[i]
+
+				index_major := u32(last_chr - model.min_char) * u32(model.successor_count)
+				index_minor := (code >> offset) & u32(mask)
+
+				last_chr = model.successors_reversed[index_major + index_minor]
+
+				output[out + i] = last_chr
+			}
+
+			out += int(pack.bytes_unpacked)
+			inp += int(pack.bytes_packed)
+		}
+	}
+
+	return out, nil
+}
+
+decompress_slice_to_string :: proc(input: []u8, model := DEFAULT_MODEL, allocator := context.allocator) -> (res: string, err: compress.Error) {
+	context.allocator = allocator
+
+	if len(input) == 0 {
+		return "", .Stream_Too_Short
+	}
+
+	max_output_size := decompress_bound(len(input), model)
+
+	buf: [dynamic]u8
+	if !resize(&buf, max_output_size) {
+		return "", .Out_Of_Memory
+	}
+
+	length, result := decompress_slice_to_output_buffer(input, buf[:])
+	resize(&buf, length)
+	return string(buf[:]), result
+}
+decompress :: proc{decompress_slice_to_output_buffer, decompress_slice_to_string}
+
+compress_string_to_buffer :: proc(input: string, output: []u8, model := DEFAULT_MODEL, allocator := context.allocator) -> (size: int, err: compress.Error) {
+	inp, inp_end := 0, len(input)
+	out, out_end := 0, len(output)
+	output := output
+
+	validate_model(model) or_return
+
+	indices := make([]i16, model.max_successor_n + 1)
+	defer delete(indices)
+
+	last_resort := false
+
+	encode: for inp < inp_end {
+		if last_resort {
+			last_resort = false
+
+			if input[inp] & 0x80 == 0x80 {
+				// Non-ASCII case
+				if out + 2 > out_end {
+					return out, .Output_Too_Short
+				}
+
+				// Put in a sentinel byte
+				output[out] = 0x00
+				out += 1
+			} else {
+				// An ASCII byte
+				if out + 1 > out_end {
+					return out, .Output_Too_Short
+				}
+			}
+			output[out] = input[inp]
+			out, inp = out + 1, inp + 1
+		} else {
+			// Find the longest string of known successors
+			indices[0] = model.ids_by_character[input[inp]]
+			last_chr_index := indices[0]
+
+			if last_chr_index < 0 {
+				last_resort = true
+				continue encode
+			}
+
+			rest := inp_end - inp
+			n_consecutive: i8 = 1
+			for ; n_consecutive <= model.max_successor_n; n_consecutive += 1 {
+				if inp_end > 0 && int(n_consecutive) == rest {
+					break
+				}
+
+				current_index := model.ids_by_character[input[inp + int(n_consecutive)]]
+				if current_index < 0 { // '\0' is always -1
+					break
+				}
+
+				successor_index := model.successors_by_bigram[last_chr_index * i16(model.character_count) + current_index]
+				if successor_index < 0 {
+					break
+				}
+
+				indices[n_consecutive] = i16(successor_index)
+				last_chr_index = current_index
+			}
+
+			if n_consecutive < 2 {
+				last_resort = true
+				continue encode
+			}
+
+			pack_n := find_best_encoding(indices, n_consecutive)
+			if pack_n >= 0 {
+				if out + int(model.packs[pack_n].bytes_packed) > out_end {
+					return out, .Output_Too_Short
+				}
+
+				pack := model.packs[pack_n]
+				code := pack.word
+
+				for i := 0; i < int(pack.bytes_unpacked); i += 1 {
+					code |= u32(indices[i]) << pack.offsets[i]
+				}
+
+				// In the little-endian world, we need to swap what's in the register to match the memory representation.
+				when ODIN_ENDIAN == .Little {
+					code = intrinsics.byte_swap(code)
+				}
+				out_ptr := raw_data(output[out:])
+
+				switch pack.bytes_packed {
+				case 4:
+					intrinsics.unaligned_store(transmute(^u32)out_ptr, code)
+				case 2:
+					intrinsics.unaligned_store(transmute(^u16)out_ptr, u16(code))
+				case 1:
+					intrinsics.unaligned_store(transmute(^u8)out_ptr,  u8(code))
+				case:
+					return out, .Unknown_Compression_Method
+				}
+
+				out += int(pack.bytes_packed)
+				inp += int(pack.bytes_unpacked)
+			} else {
+				last_resort = true
+				continue encode
+			}
+		}
+	}
+	return out, nil
+}
+
+compress_string :: proc(input: string, model := DEFAULT_MODEL, allocator := context.allocator) -> (output: []u8, err: compress.Error) {
+	context.allocator = allocator
+
+	if len(input) == 0 {
+		return {}, .Stream_Too_Short
+	}
+
+	max_output_size := compress_bound(len(input))
+
+	buf: [dynamic]u8
+	if !resize(&buf, max_output_size) {
+		return {}, .Out_Of_Memory
+	}
+
+	length, result := compress_string_to_buffer(input, buf[:])
+	resize(&buf, length)
+	return buf[:length], result
+}
+compress :: proc{compress_string_to_buffer, compress_string}

+ 43 - 40
core/compress/zlib/zlib.odin

@@ -47,10 +47,10 @@ Options :: struct {
 	level: u8,
 	level: u8,
 }
 }
 
 
-Error     :: compress.Error
-E_General :: compress.General_Error
-E_ZLIB    :: compress.ZLIB_Error
-E_Deflate :: compress.Deflate_Error
+Error         :: compress.Error
+General_Error :: compress.General_Error
+ZLIB_Error    :: compress.ZLIB_Error
+Deflate_Error :: compress.Deflate_Error
 
 
 DEFLATE_MAX_CHUNK_SIZE   :: 65535
 DEFLATE_MAX_CHUNK_SIZE   :: 65535
 DEFLATE_MAX_LITERAL_SIZE :: 65535
 DEFLATE_MAX_LITERAL_SIZE :: 65535
@@ -111,9 +111,9 @@ ZFAST_MASK :: ((1 << ZFAST_BITS) - 1)
 */
 */
 Huffman_Table :: struct {
 Huffman_Table :: struct {
 	fast:        [1 << ZFAST_BITS]u16,
 	fast:        [1 << ZFAST_BITS]u16,
-	firstcode:   [16]u16,
+	firstcode:   [17]u16,
 	maxcode:     [17]int,
 	maxcode:     [17]int,
-	firstsymbol: [16]u16,
+	firstsymbol: [17]u16,
 	size:        [288]u8,
 	size:        [288]u8,
 	value:       [288]u16,
 	value:       [288]u16,
 }
 }
@@ -244,7 +244,7 @@ allocate_huffman_table :: proc(allocator := context.allocator) -> (z: ^Huffman_T
 @(optimization_mode="speed")
 @(optimization_mode="speed")
 build_huffman :: proc(z: ^Huffman_Table, code_lengths: []u8) -> (err: Error) {
 build_huffman :: proc(z: ^Huffman_Table, code_lengths: []u8) -> (err: Error) {
 	sizes:     [HUFFMAN_MAX_BITS+1]int
 	sizes:     [HUFFMAN_MAX_BITS+1]int
-	next_code: [HUFFMAN_MAX_BITS]int
+	next_code: [HUFFMAN_MAX_BITS+1]int
 
 
 	k := int(0)
 	k := int(0)
 
 
@@ -256,21 +256,21 @@ build_huffman :: proc(z: ^Huffman_Table, code_lengths: []u8) -> (err: Error) {
 	}
 	}
 	sizes[0] = 0
 	sizes[0] = 0
 
 
-	for i in 1..<(HUFFMAN_MAX_BITS+1) {
+	for i in 1 ..< HUFFMAN_MAX_BITS {
 		if sizes[i] > (1 << uint(i)) {
 		if sizes[i] > (1 << uint(i)) {
-			return E_Deflate.Huffman_Bad_Sizes
+			return .Huffman_Bad_Sizes
 		}
 		}
 	}
 	}
 	code := int(0)
 	code := int(0)
 
 
-	for i in 1..<HUFFMAN_MAX_BITS {
+	for i in 1 ..= HUFFMAN_MAX_BITS {
 		next_code[i]     = code
 		next_code[i]     = code
 		z.firstcode[i]   = u16(code)
 		z.firstcode[i]   = u16(code)
 		z.firstsymbol[i] = u16(k)
 		z.firstsymbol[i] = u16(k)
 		code = code + sizes[i]
 		code = code + sizes[i]
 		if sizes[i] != 0 {
 		if sizes[i] != 0 {
 			if code - 1 >= (1 << u16(i)) {
 			if code - 1 >= (1 << u16(i)) {
-				return E_Deflate.Huffman_Bad_Code_Lengths
+				return .Huffman_Bad_Code_Lengths
 			}
 			}
 		}
 		}
 		z.maxcode[i] = code << (HUFFMAN_MAX_BITS - uint(i))
 		z.maxcode[i] = code << (HUFFMAN_MAX_BITS - uint(i))
@@ -314,15 +314,15 @@ decode_huffman_slowpath :: proc(z: ^$C, t: ^Huffman_Table) -> (r: u16, err: Erro
 		s += 1
 		s += 1
 	}
 	}
 	if s >= 16 {
 	if s >= 16 {
-		return 0, E_Deflate.Bad_Huffman_Code
+		return 0, .Bad_Huffman_Code
 	}
 	}
 	// code size is s, so:
 	// code size is s, so:
 	b := (k >> (16-s)) - int(t.firstcode[s]) + int(t.firstsymbol[s])
 	b := (k >> (16-s)) - int(t.firstcode[s]) + int(t.firstsymbol[s])
 	if b >= size_of(t.size) {
 	if b >= size_of(t.size) {
-		return 0, E_Deflate.Bad_Huffman_Code
+		return 0, .Bad_Huffman_Code
 	}
 	}
 	if t.size[b] != s {
 	if t.size[b] != s {
-		return 0, E_Deflate.Bad_Huffman_Code
+		return 0, .Bad_Huffman_Code
 	}
 	}
 
 
 	compress.consume_bits_lsb(z, s)
 	compress.consume_bits_lsb(z, s)
@@ -335,11 +335,11 @@ decode_huffman_slowpath :: proc(z: ^$C, t: ^Huffman_Table) -> (r: u16, err: Erro
 decode_huffman :: proc(z: ^$C, t: ^Huffman_Table) -> (r: u16, err: Error) #no_bounds_check {
 decode_huffman :: proc(z: ^$C, t: ^Huffman_Table) -> (r: u16, err: Error) #no_bounds_check {
 	if z.num_bits < 16 {
 	if z.num_bits < 16 {
 		if z.num_bits > 63 {
 		if z.num_bits > 63 {
-			return 0, E_ZLIB.Code_Buffer_Malformed
+			return 0, .Code_Buffer_Malformed
 		}
 		}
 		compress.refill_lsb(z)
 		compress.refill_lsb(z)
 		if z.num_bits > 63 {
 		if z.num_bits > 63 {
-			return 0, E_General.Stream_Too_Short
+			return 0, .Stream_Too_Short
 		}
 		}
 	}
 	}
 	#no_bounds_check b := t.fast[z.code_buffer & ZFAST_MASK]
 	#no_bounds_check b := t.fast[z.code_buffer & ZFAST_MASK]
@@ -361,7 +361,7 @@ parse_huffman_block :: proc(z: ^$C, z_repeat, z_offset: ^Huffman_Table) -> (err:
 		if value < 256 {
 		if value < 256 {
 			e := write_byte(z, u8(value))
 			e := write_byte(z, u8(value))
 			if e != .None {
 			if e != .None {
-				return E_General.Output_Too_Short
+				return .Output_Too_Short
 			}
 			}
 		} else {
 		} else {
 			if value == 256 {
 			if value == 256 {
@@ -377,7 +377,7 @@ parse_huffman_block :: proc(z: ^$C, z_repeat, z_offset: ^Huffman_Table) -> (err:
 
 
 			value, e = decode_huffman(z, z_offset)
 			value, e = decode_huffman(z, z_offset)
 			if e != nil {
 			if e != nil {
-				return E_Deflate.Bad_Huffman_Code
+				return .Bad_Huffman_Code
 			}
 			}
 
 
 			distance := Z_DIST_BASE[value]
 			distance := Z_DIST_BASE[value]
@@ -387,7 +387,7 @@ parse_huffman_block :: proc(z: ^$C, z_repeat, z_offset: ^Huffman_Table) -> (err:
 
 
 			if z.bytes_written < i64(distance) {
 			if z.bytes_written < i64(distance) {
 				// Distance is longer than we've decoded so far.
 				// Distance is longer than we've decoded so far.
-				return E_Deflate.Bad_Distance
+				return .Bad_Distance
 			}
 			}
 
 
 			/*
 			/*
@@ -405,14 +405,14 @@ parse_huffman_block :: proc(z: ^$C, z_repeat, z_offset: ^Huffman_Table) -> (err:
 					c := z.output.buf[z.bytes_written - i64(distance)]
 					c := z.output.buf[z.bytes_written - i64(distance)]
 					e := repl_byte(z, length, c)
 					e := repl_byte(z, length, c)
 					if e != .None {
 					if e != .None {
-						return E_General.Output_Too_Short
+						return .Output_Too_Short
 					}
 					}
 				}
 				}
 			} else {
 			} else {
 				if length > 0 {
 				if length > 0 {
 					e := repl_bytes(z, length, distance)
 					e := repl_bytes(z, length, distance)
 					if e != .None {
 					if e != .None {
-						return E_General.Output_Too_Short
+						return .Output_Too_Short
 					}
 					}
 				}
 				}
 			}
 			}
@@ -432,25 +432,25 @@ inflate_from_context :: proc(using ctx: ^compress.Context_Memory_Input, raw := f
 	if !raw {
 	if !raw {
 		size, size_err := compress.input_size(ctx)
 		size, size_err := compress.input_size(ctx)
 		if size < 6 || size_err != nil {
 		if size < 6 || size_err != nil {
-			return E_General.Stream_Too_Short
+			return .Stream_Too_Short
 		}
 		}
 
 
 		cmf, _ := compress.read_u8(ctx)
 		cmf, _ := compress.read_u8(ctx)
 
 
 		method := Compression_Method(cmf & 0xf)
 		method := Compression_Method(cmf & 0xf)
 		if method != .DEFLATE {
 		if method != .DEFLATE {
-			return E_General.Unknown_Compression_Method
+			return .Unknown_Compression_Method
 		}
 		}
 
 
 		if cinfo := (cmf >> 4) & 0xf; cinfo > 7 {
 		if cinfo := (cmf >> 4) & 0xf; cinfo > 7 {
-			return E_ZLIB.Unsupported_Window_Size
+			return .Unsupported_Window_Size
 		}
 		}
 		flg, _ := compress.read_u8(ctx)
 		flg, _ := compress.read_u8(ctx)
 
 
 		fcheck := flg & 0x1f
 		fcheck := flg & 0x1f
 		fcheck_computed := (cmf << 8 | flg) & 0x1f
 		fcheck_computed := (cmf << 8 | flg) & 0x1f
 		if fcheck != fcheck_computed {
 		if fcheck != fcheck_computed {
-			return E_General.Checksum_Failed
+			return .Checksum_Failed
 		}
 		}
 
 
 		/*
 		/*
@@ -458,7 +458,7 @@ inflate_from_context :: proc(using ctx: ^compress.Context_Memory_Input, raw := f
 			They're application specific and PNG doesn't use them.
 			They're application specific and PNG doesn't use them.
 		*/
 		*/
 		if fdict := (flg >> 5) & 1; fdict != 0 {
 		if fdict := (flg >> 5) & 1; fdict != 0 {
-			return E_ZLIB.FDICT_Unsupported
+			return .FDICT_Unsupported
 		}
 		}
 
 
 		// flevel  := Compression_Level((flg >> 6) & 3);
 		// flevel  := Compression_Level((flg >> 6) & 3);
@@ -485,7 +485,7 @@ inflate_from_context :: proc(using ctx: ^compress.Context_Memory_Input, raw := f
 		output_hash := hash.adler32(ctx.output.buf[:])
 		output_hash := hash.adler32(ctx.output.buf[:])
 
 
 		if output_hash != u32(adler) {
 		if output_hash != u32(adler) {
-			return E_General.Checksum_Failed
+			return .Checksum_Failed
 		}
 		}
 	}
 	}
 	return nil
 	return nil
@@ -538,23 +538,24 @@ inflate_raw :: proc(z: ^$C, expected_output_size := -1, allocator := context.all
 		final = compress.read_bits_lsb(z, 1)
 		final = compress.read_bits_lsb(z, 1)
 		type  = compress.read_bits_lsb(z, 2)
 		type  = compress.read_bits_lsb(z, 2)
 
 
-		// fmt.printf("Final: %v | Type: %v\n", final, type);
+		// fmt.printf("Final: %v | Type: %v\n", final, type)
 
 
 		switch type {
 		switch type {
 		case 0:
 		case 0:
+			// fmt.printf("Method 0: STORED\n")
 			// Uncompressed block
 			// Uncompressed block
 
 
 			// Discard bits until next byte boundary
 			// Discard bits until next byte boundary
 			compress.discard_to_next_byte_lsb(z)
 			compress.discard_to_next_byte_lsb(z)
 
 
-			uncompressed_len := i16(compress.read_bits_lsb(z, 16))
-			length_check     := i16(compress.read_bits_lsb(z, 16))
+			uncompressed_len := u16(compress.read_bits_lsb(z, 16))
+			length_check     := u16(compress.read_bits_lsb(z, 16))
 
 
-			// fmt.printf("LEN: %v, ~LEN: %v, NLEN: %v, ~NLEN: %v\n", uncompressed_len, ~uncompressed_len, length_check, ~length_check);
+			// fmt.printf("LEN: %v, ~LEN: %v, NLEN: %v, ~NLEN: %v\n", uncompressed_len, ~uncompressed_len, length_check, ~length_check)
 
 
 
 
 			if ~uncompressed_len != length_check {
 			if ~uncompressed_len != length_check {
-				return E_Deflate.Len_Nlen_Mismatch
+				return .Len_Nlen_Mismatch
 			}
 			}
 
 
 			/*
 			/*
@@ -567,10 +568,12 @@ inflate_raw :: proc(z: ^$C, expected_output_size := -1, allocator := context.all
 				write_byte(z, u8(lit))
 				write_byte(z, u8(lit))
 				uncompressed_len -= 1
 				uncompressed_len -= 1
 			}
 			}
+			assert(uncompressed_len == 0)
+
 		case 3:
 		case 3:
-			return E_Deflate.BType_3
+			return .BType_3
 		case:
 		case:
-			// log.debugf("Err: %v | Final: %v | Type: %v\n", err, final, type);
+			// fmt.printf("Err: %v | Final: %v | Type: %v\n", err, final, type)
 			if type == 1 {
 			if type == 1 {
 				// Use fixed code lengths.
 				// Use fixed code lengths.
 				build_huffman(z_repeat, Z_FIXED_LENGTH[:]) or_return
 				build_huffman(z_repeat, Z_FIXED_LENGTH[:]) or_return
@@ -601,7 +604,7 @@ inflate_raw :: proc(z: ^$C, expected_output_size := -1, allocator := context.all
 					c = decode_huffman(z, codelength_ht) or_return
 					c = decode_huffman(z, codelength_ht) or_return
 
 
 					if c < 0 || c >= 19 {
 					if c < 0 || c >= 19 {
-						return E_Deflate.Huffman_Bad_Code_Lengths
+						return .Huffman_Bad_Code_Lengths
 					}
 					}
 					if c < 16 {
 					if c < 16 {
 						lencodes[n] = u8(c)
 						lencodes[n] = u8(c)
@@ -613,7 +616,7 @@ inflate_raw :: proc(z: ^$C, expected_output_size := -1, allocator := context.all
 						case 16:
 						case 16:
 							c = u16(compress.read_bits_no_refill_lsb(z, 2) + 3)
 							c = u16(compress.read_bits_no_refill_lsb(z, 2) + 3)
 							if n == 0 {
 							if n == 0 {
-								return E_Deflate.Huffman_Bad_Code_Lengths
+								return .Huffman_Bad_Code_Lengths
 							}
 							}
 							fill = lencodes[n - 1]
 							fill = lencodes[n - 1]
 						case 17:
 						case 17:
@@ -621,11 +624,11 @@ inflate_raw :: proc(z: ^$C, expected_output_size := -1, allocator := context.all
 						case 18:
 						case 18:
 							c = u16(compress.read_bits_no_refill_lsb(z, 7) + 11)
 							c = u16(compress.read_bits_no_refill_lsb(z, 7) + 11)
 						case:
 						case:
-								return E_Deflate.Huffman_Bad_Code_Lengths
+								return .Huffman_Bad_Code_Lengths
 						}
 						}
 
 
 						if ntot - n < u32(c) {
 						if ntot - n < u32(c) {
-							return E_Deflate.Huffman_Bad_Code_Lengths
+							return .Huffman_Bad_Code_Lengths
 						}
 						}
 
 
 						nc := n + u32(c)
 						nc := n + u32(c)
@@ -636,7 +639,7 @@ inflate_raw :: proc(z: ^$C, expected_output_size := -1, allocator := context.all
 				}
 				}
 
 
 				if n != ntot {
 				if n != ntot {
-					return E_Deflate.Huffman_Bad_Code_Lengths
+					return .Huffman_Bad_Code_Lengths
 				}
 				}
 
 
 				build_huffman(z_repeat, lencodes[:hlit])     or_return
 				build_huffman(z_repeat, lencodes[:hlit])     or_return
@@ -674,4 +677,4 @@ inflate_from_byte_array_raw :: proc(input: []u8, buf: ^bytes.Buffer, raw := fals
 	return inflate_raw(z=&ctx, expected_output_size=expected_output_size)
 	return inflate_raw(z=&ctx, expected_output_size=expected_output_size)
 }
 }
 
 
-inflate :: proc{inflate_from_context, inflate_from_byte_array};
+inflate :: proc{inflate_from_context, inflate_from_byte_array}

+ 0 - 216
core/container/array.odin

@@ -1,216 +0,0 @@
-package container
-
-import "core:mem"
-import "core:runtime"
-
-Array :: struct($T: typeid) {
-	data:      ^T,
-	len:       int,
-	cap:       int,
-	allocator: mem.Allocator,
-}
-
-ARRAY_DEFAULT_CAPACITY :: 16
-
-/*
-array_init :: proc {
-	array_init_none,
-	array_init_len,
-	array_init_len_cap,
-}
-array_init
-array_delete
-array_len
-array_cap
-array_space
-array_slice
-array_get
-array_get_ptr
-array_set
-array_reserve
-array_resize
-array_push = array_append :: proc{
-	array_push_back,
-	array_push_back_elems,
-}
-array_push_front
-array_pop_back
-array_pop_front
-array_consume
-array_trim
-array_clear
-array_clone
-array_set_capacity
-array_grow
-*/
-
-
-array_init_none :: proc(a: ^$A/Array, allocator := context.allocator) {
-	array_init_len_cap(a, 0, ARRAY_DEFAULT_CAPACITY, allocator)
-}
-array_init_len :: proc(a: ^$A/Array, len: int, allocator := context.allocator) {
-	array_init_len_cap(a, len, len, allocator)
-}
-array_init_len_cap :: proc(a: ^$A/Array($T), len: int, cap: int, allocator := context.allocator) {
-	a.allocator = allocator
-	a.data = (^T)(mem.alloc(size_of(T)*cap, align_of(T), a.allocator))
-	a.len = len
-	a.cap = cap
-}
-
-array_init :: proc{array_init_none, array_init_len, array_init_len_cap}
-
-array_delete :: proc(a: $A/Array) {
-	mem.free(a.data, a.allocator)
-}
-
-array_len :: proc(a: $A/Array) -> int {
-	return a.len
-}
-
-array_cap :: proc(a: $A/Array) -> int {
-	return a.cap
-}
-
-array_space :: proc(a: $A/Array) -> int {
-	return a.cap - a.len
-}
-
-array_slice :: proc(a: $A/Array($T)) -> []T {
-	s := mem.Raw_Slice{a.data, a.len}
-	return transmute([]T)s
-}
-
-array_cap_slice :: proc(a: $A/Array($T)) -> []T {
-	s := mem.Raw_Slice{a.data, a.cap}
-	return transmute([]T)s
-}
-
-array_get :: proc(a: $A/Array($T), index: int, loc := #caller_location) -> T {
-	runtime.bounds_check_error_loc(loc, index, array_len(a))
-	return (^T)(uintptr(a.data) + size_of(T)*uintptr(index))^
-}
-array_get_ptr :: proc(a: $A/Array($T), index: int, loc := #caller_location) -> ^T {
-	runtime.bounds_check_error_loc(loc, index, array_len(a))
-	return (^T)(uintptr(a.data) + size_of(T)*uintptr(index))
-}
-
-array_set :: proc(a: ^$A/Array($T), index: int, item: T, loc := #caller_location)  {
-	runtime.bounds_check_error_loc(loc, index, array_len(a^))
-	(^T)(uintptr(a.data) + size_of(T)*uintptr(index))^ = item
-}
-
-
-array_reserve :: proc(a: ^$A/Array, capacity: int) {
-	if capacity > a.len {
-		array_set_capacity(a, capacity)
-	}
-}
-
-array_resize :: proc(a: ^$A/Array, length: int) {
-	if length > a.len {
-		array_set_capacity(a, length)
-	}
-	a.len = length
-}
-
-
-
-array_push_back :: proc(a: ^$A/Array($T), item: T) {
-	if array_space(a^) == 0 {
-		array_grow(a)
-	}
-
-	a.len += 1
-	array_set(a, a.len-1, item)
-}
-
-array_push_front :: proc(a: ^$A/Array($T), item: T) {
-	if array_space(a^) == 0 {
-		array_grow(a)
-	}
-
-	a.len += 1
-	data := array_slice(a^)
-	copy(data[1:], data[:])
-	data[0] = item
-}
-
-array_pop_back :: proc(a: ^$A/Array($T), loc := #caller_location) -> T {
-	assert(condition=a.len > 0, loc=loc)
-	item := array_get(a^, a.len-1)
-	a.len -= 1
-	return item
-}
-
-array_pop_front :: proc(a: ^$A/Array($T), loc := #caller_location) -> T {
-	assert(condition=a.len > 0, loc=loc)
-	item := array_get(a^, 0)
-	s := array_slice(a^)
-	copy(s[:], s[1:])
-	a.len -= 1
-	return item
-}
-
-
-array_consume :: proc(a: ^$A/Array($T), count: int, loc := #caller_location) {
-	assert(condition=a.len >= count, loc=loc)
-	a.len -= count
-}
-
-
-array_trim :: proc(a: ^$A/Array($T)) {
-	array_set_capacity(a, a.len)
-}
-
-array_clear :: proc(a: ^$A/Array($T)) {
-	array_resize(a, 0)
-}
-
-array_clone :: proc(a: $A/Array($T), allocator := context.allocator) -> A {
-	res: A
-	array_init(&res, array_len(a), array_len(a), allocator)
-	copy(array_slice(res), array_slice(a))
-	return res
-}
-
-array_push_back_elems :: proc(a: ^$A/Array($T), items: ..T) {
-	if array_space(a^) < len(items) {
-		array_grow(a, a.len + len(items))
-	}
-	offset := a.len
-	data := array_cap_slice(a^)
-	n := copy(data[a.len:], items)
-	a.len += n
-}
-
-array_push   :: proc{array_push_back, array_push_back_elems}
-array_append :: proc{array_push_back, array_push_back_elems}
-
-array_set_capacity :: proc(a: ^$A/Array($T), new_capacity: int) {
-	if new_capacity == a.cap {
-		return
-	}
-
-	if new_capacity < a.len {
-		array_resize(a, new_capacity)
-	}
-
-	new_data: ^T
-	if new_capacity > 0 {
-		if a.allocator.procedure == nil {
-			a.allocator = context.allocator
-		}
-		new_data = (^T)(mem.alloc(size_of(T)*new_capacity, align_of(T), a.allocator))
-		if new_data != nil {
-			mem.copy(new_data, a.data, size_of(T)*a.len)
-		}
-	}
-	mem.free(a.data, a.allocator)
-	a.data = new_data
-	a.cap = new_capacity
-}
-array_grow :: proc(a: ^$A/Array, min_capacity: int = 0) {
-	new_capacity := max(array_len(a^)*2 + 8, min_capacity)
-	array_set_capacity(a, new_capacity)
-}

+ 266 - 0
core/container/bit_array/bit_array.odin

@@ -0,0 +1,266 @@
+package dynamic_bit_array
+
+import "core:intrinsics"
+import "core:mem"
+
+/*
+	Note that these constants are dependent on the backing being a u64.
+*/
+@(private="file")
+INDEX_SHIFT :: 6
+
+@(private="file")
+INDEX_MASK  :: 63
+
+@(private="file")
+NUM_BITS :: 64
+
+Bit_Array :: struct {
+	bits:         [dynamic]u64,
+	bias:         int,
+	max_index:    int,
+	free_pointer: bool,
+}
+
+Bit_Array_Iterator :: struct {
+	array:    ^Bit_Array,
+	word_idx: int,
+	bit_idx:  uint,
+}
+
+/*
+	In:
+		- ba:   ^Bit_Array - the array to iterate over
+
+	Out:
+		- it:   ^Bit_Array_Iterator - the iterator that holds iteration state
+*/
+make_iterator :: proc (ba: ^Bit_Array) -> (it: Bit_Array_Iterator) {
+	return Bit_Array_Iterator { array = ba }
+}
+
+/*
+	In:
+		- it:    ^Bit_Array_Iterator - the iterator struct that holds the state.
+
+	Out:
+		- set:    bool - the state of the bit at `index`
+		- index:  int - the next bit of the Bit_Array referenced by `it`.
+		- ok:	  bool - `true` if the iterator returned a valid index,
+			  `false` if there were no more bits
+*/
+iterate_by_all :: proc (it: ^Bit_Array_Iterator) -> (set: bool, index: int, ok: bool) {
+	index = it.word_idx * NUM_BITS + int(it.bit_idx) + it.array.bias
+	if index > it.array.max_index { return false, 0, false }
+
+	word := it.array.bits[it.word_idx] if len(it.array.bits) > it.word_idx else 0
+	set = (word >> it.bit_idx & 1) == 1
+
+	it.bit_idx += 1
+	if it.bit_idx >= NUM_BITS {
+		it.bit_idx = 0
+		it.word_idx += 1
+	}
+
+	return set, index, true
+}
+
+/*
+	In:
+		- it:     ^Bit_Array_Iterator - the iterator struct that holds the state.
+
+	Out:
+		- index:  int - the next set bit of the Bit_Array referenced by `it`.
+		- ok:	  bool - `true` if the iterator returned a valid index,
+			  `false` if there were no more bits set
+*/
+iterate_by_set :: proc (it: ^Bit_Array_Iterator) -> (index: int, ok: bool) {
+	return iterate_internal_(it, true)
+}
+
+/*
+	In:
+		- it:	  ^Bit_Array_Iterator - the iterator struct that holds the state.
+
+	Out:
+		- index:  int - the next unset bit of the Bit_Array referenced by `it`.
+		- ok:	  bool - `true` if the iterator returned a valid index,
+			  `false` if there were no more unset bits
+*/
+iterate_by_unset:: proc (it: ^Bit_Array_Iterator) -> (index: int, ok: bool) {
+	return iterate_internal_(it, false)
+}
+
+@(private="file")
+iterate_internal_ :: proc (it: ^Bit_Array_Iterator, $ITERATE_SET_BITS: bool) -> (index: int, ok: bool) {
+	word := it.array.bits[it.word_idx] if len(it.array.bits) > it.word_idx else 0
+	when ! ITERATE_SET_BITS { word = ~word }
+
+	// if the word is empty or we have already gone over all the bits in it,
+	// b.bit_idx is greater than the index of any set bit in the word,
+	// meaning that word >> b.bit_idx == 0.
+	for it.word_idx < len(it.array.bits) && word >> it.bit_idx == 0 {
+		it.word_idx += 1
+		it.bit_idx = 0
+		word = it.array.bits[it.word_idx] if len(it.array.bits) > it.word_idx else 0
+		when ! ITERATE_SET_BITS { word = ~word }
+	}
+
+	// if we are iterating the set bits, reaching the end of the array means we have no more bits to check
+	when ITERATE_SET_BITS {
+		if it.word_idx >= len(it.array.bits) {
+			return 0, false
+		}
+	}
+
+	// reaching here means that the word has some set bits
+	it.bit_idx += uint(intrinsics.count_trailing_zeros(word >> it.bit_idx))
+	index = it.word_idx * NUM_BITS + int(it.bit_idx) + it.array.bias
+
+	it.bit_idx += 1
+	if it.bit_idx >= NUM_BITS {
+		it.bit_idx = 0
+		it.word_idx += 1
+	}
+	return index, index <= it.array.max_index
+}
+
+
+/*
+	In:
+		- ba:    ^Bit_Array - a pointer to the Bit Array
+		- index: The bit index. Can be an enum member.
+
+	Out:
+		- res:   The bit you're interested in.
+		- ok:    Whether the index was valid. Returns `false` if the index is smaller than the bias.
+
+	The `ok` return value may be ignored.
+*/
+get :: proc(ba: ^Bit_Array, #any_int index: uint, allocator := context.allocator) -> (res: bool, ok: bool) {
+	idx := int(index) - ba.bias
+
+	if ba == nil || int(index) < ba.bias { return false, false }
+	context.allocator = allocator
+
+	leg_index := idx >> INDEX_SHIFT
+	bit_index := idx &  INDEX_MASK
+
+	/*
+		If we `get` a bit that doesn't fit in the Bit Array, it's naturally `false`.
+		This early-out prevents unnecessary resizing.
+	*/
+	if leg_index + 1 > len(ba.bits) { return false, true }
+
+	val := u64(1 << uint(bit_index))
+	res = ba.bits[leg_index] & val == val
+
+	return res, true
+}
+
+/*
+	In:
+		- ba:    ^Bit_Array - a pointer to the Bit Array
+		- index: The bit index. Can be an enum member.
+
+	Out:
+		- ok:    Whether or not we managed to set requested bit.
+
+	`set` automatically resizes the Bit Array to accommodate the requested index if needed.
+*/
+set :: proc(ba: ^Bit_Array, #any_int index: uint, allocator := context.allocator) -> (ok: bool) {
+
+	idx := int(index) - ba.bias
+
+	if ba == nil || int(index) < ba.bias { return false }
+	context.allocator = allocator
+
+	leg_index := idx >> INDEX_SHIFT
+	bit_index := idx &  INDEX_MASK
+
+	resize_if_needed(ba, leg_index) or_return
+
+	ba.max_index = max(idx, ba.max_index)
+	ba.bits[leg_index] |= 1 << uint(bit_index)
+	return true
+}
+
+/*
+	In:
+		- ba:    ^Bit_Array - a pointer to the Bit Array
+		- index: The bit index. Can be an enum member.
+
+	Out:
+		- ok:    Whether or not we managed to unset requested bit.
+
+	`unset` automatically resizes the Bit Array to accommodate the requested index if needed.
+*/
+unset :: proc(ba: ^Bit_Array, #any_int index: uint, allocator := context.allocator) -> (ok: bool) {
+
+	idx := int(index) - ba.bias
+
+	if ba == nil || int(index) < ba.bias { return false }
+	context.allocator = allocator
+
+	leg_index := idx >> INDEX_SHIFT
+	bit_index := idx &  INDEX_MASK
+
+	resize_if_needed(ba, leg_index) or_return
+
+	ba.max_index = max(idx, ba.max_index)
+	ba.bits[leg_index] &= ~(1 << uint(bit_index))
+	return true
+}
+
+/*
+	A helper function to create a Bit Array with optional bias, in case your smallest index is non-zero (including negative).
+*/
+create :: proc(max_index: int, min_index := 0, allocator := context.allocator) -> (res: ^Bit_Array, ok: bool) #optional_ok {
+	context.allocator = allocator
+	size_in_bits := max_index - min_index
+
+	if size_in_bits < 1 { return {}, false }
+
+	legs := size_in_bits >> INDEX_SHIFT
+
+	res = new(Bit_Array)
+	res.bias         = min_index
+	res.max_index    = max_index
+	res.free_pointer = true
+	return res, resize_if_needed(res, legs)
+}
+
+/*
+	Sets all bits to `false`.
+*/
+clear :: proc(ba: ^Bit_Array) {
+	if ba == nil { return }
+	mem.zero_slice(ba.bits[:])
+}
+
+/*
+	Releases the memory used by the Bit Array.
+*/
+destroy :: proc(ba: ^Bit_Array) {
+	if ba == nil { return }
+	delete(ba.bits)
+	if ba.free_pointer { // Only free if this Bit_Array was created using `create`, not when on the stack.
+		free(ba)
+	}
+}
+
+/*
+	Resizes the Bit Array. For internal use.
+	If you want to reserve the memory for a given-sized Bit Array up front, you can use `create`.
+*/
+@(private="file")
+resize_if_needed :: proc(ba: ^Bit_Array, legs: int, allocator := context.allocator) -> (ok: bool) {
+	if ba == nil { return false }
+
+	context.allocator = allocator
+
+	if legs + 1 > len(ba.bits) {
+		resize(&ba.bits, legs + 1)
+	}
+	return len(ba.bits) > legs
+}

+ 53 - 0
core/container/bit_array/doc.odin

@@ -0,0 +1,53 @@
+package dynamic_bit_array
+
+/*
+	The Bit Array can be used in several ways:
+
+	-- By default you don't need to instantiate a Bit Array:
+
+		package test
+
+		import "core:fmt"
+		import "core:container/bit_array"
+
+		main :: proc() {
+			using bit_array
+
+			bits: Bit_Array
+
+			// returns `true`
+			fmt.println(set(&bits, 42))
+
+			// returns `false`, `false`, because this Bit Array wasn't created to allow negative indices.
+			was_set, was_retrieved := get(&bits, -1)
+			fmt.println(was_set, was_retrieved) 
+			destroy(&bits)
+		}
+
+	-- A Bit Array can optionally allow for negative indices, if the mininum value was given during creation:
+
+		package test
+
+		import "core:fmt"
+		import "core:container/bit_array"
+
+		main :: proc() {
+			Foo :: enum int {
+				Negative_Test = -42,
+				Bar           = 420,
+				Leaves        = 69105,
+			}
+
+			using bit_array
+
+			bits := create(int(max(Foo)), int(min(Foo)))
+			defer destroy(bits)
+
+			fmt.printf("Set(Bar):           %v\n",     set(bits, Foo.Bar))
+			fmt.printf("Get(Bar):           %v, %v\n", get(bits, Foo.Bar))
+			fmt.printf("Set(Negative_Test): %v\n",     set(bits, Foo.Negative_Test))
+			fmt.printf("Get(Leaves):        %v, %v\n", get(bits, Foo.Leaves))
+			fmt.printf("Get(Negative_Test): %v, %v\n", get(bits, Foo.Negative_Test))
+			fmt.printf("Freed.\n")
+		}
+*/

+ 0 - 80
core/container/bloom_filter.odin

@@ -1,80 +0,0 @@
-package container
-
-import "core:mem"
-
-Bloom_Hash_Proc :: #type proc(data: []byte) -> u32
-
-Bloom_Hash :: struct {
-	hash_proc: Bloom_Hash_Proc,
-	next:     ^Bloom_Hash,
-}
-
-Bloom_Filter :: struct {
-	allocator: mem.Allocator,
-	hash:      ^Bloom_Hash,
-	bits:      []byte,
-}
-
-bloom_filter_init :: proc(b: ^Bloom_Filter, size: int, allocator := context.allocator) {
-	b.allocator = allocator
-	b.bits = make([]byte, size, allocator)
-}
-
-bloom_filter_destroy :: proc(b: ^Bloom_Filter) {
-	context.allocator = b.allocator
-	delete(b.bits)
-	for b.hash != nil {
-		hash := b.hash
-		b.hash = b.hash.next
-		free(hash)
-	}
-}
-
-bloom_filter_add_hash_proc :: proc(b: ^Bloom_Filter, hash_proc: Bloom_Hash_Proc) {
-	context.allocator = b.allocator
-	h := new(Bloom_Hash)
-	h.hash_proc = hash_proc
-
-	head := &b.hash
-	for head^ != nil {
-		head = &(head^.next)
-	}
-	head^ = h
-}
-
-bloom_filter_add :: proc(b: ^Bloom_Filter, item: []byte) {
-	#no_bounds_check for h := b.hash; h != nil; h = h.next {
-		hash := h.hash_proc(item)
-		hash %= u32(len(b.bits) * 8)
-		b.bits[hash >> 3] |= 1 << (hash & 3)
-	}
-}
-
-bloom_filter_add_string :: proc(b: ^Bloom_Filter, item: string) {
-	bloom_filter_add(b, transmute([]byte)item)
-}
-
-bloom_filter_add_raw :: proc(b: ^Bloom_Filter, data: rawptr, size: int) {
-	item := mem.slice_ptr((^byte)(data), size)
-	bloom_filter_add(b, item)
-}
-
-bloom_filter_test :: proc(b: ^Bloom_Filter, item: []byte) -> bool {
-	#no_bounds_check for h := b.hash; h != nil; h = h.next {
-		hash := h.hash_proc(item)
-		hash %= u32(len(b.bits) * 8)
-		if (b.bits[hash >> 3] & (1 << (hash & 3)) == 0) {
-			return false
-		}
-	}
-	return true
-}
-
-bloom_filter_test_string :: proc(b: ^Bloom_Filter, item: string) -> bool {
-	return bloom_filter_test(b, transmute([]byte)item)
-}
-
-bloom_filter_test_raw :: proc(b: ^Bloom_Filter, data: rawptr, size: int) -> bool {
-	item := mem.slice_ptr((^byte)(data), size)
-	return bloom_filter_test(b, item)
-}

+ 173 - 0
core/container/intrusive/list/intrusive_list.odin

@@ -0,0 +1,173 @@
+package container_intrusive_list
+
+import "core:intrinsics"
+
+// An intrusive doubly-linked list
+//
+// As this is an intrusive container, a `Node` must be embedded in your own
+// structure which is conventionally called a "link". The use of `push_front`
+// and `push_back` take the address of this node. Retrieving the data
+// associated with the node requires finding the relative offset of the node
+// of the parent structure. The parent type and field name are given to
+// `iterator_*` procedures, or to the built-in `container_of` procedure.
+//
+// This data structure is two-pointers in size:
+// 	8 bytes on 32-bit platforms and 16 bytes on 64-bit platforms
+List :: struct {
+	head: ^Node,
+	tail: ^Node,
+}
+
+
+Node :: struct {
+	next, prev: ^Node,
+}
+
+push_front :: proc(list: ^List, node: ^Node) {
+	if list.head != nil {
+		list.head.prev = node
+		node.prev, node.next = nil, list.head
+		list.head = node
+	} else {
+		list.head, list.tail = node, node
+		node.prev, node.next = nil, nil
+	}
+}
+
+push_back :: proc(list: ^List, node: ^Node) {
+	if list.tail != nil {
+		list.tail.next = node
+		node.prev, node.next = list.tail, nil
+		list.tail = node
+	} else {
+		list.head, list.tail = node, node
+		node.prev, node.next = nil, nil
+	}
+}
+
+remove :: proc(list: ^List, node: ^Node) {
+	if node != nil {
+		if node.next != nil {
+			node.next.prev = node.prev
+		}
+		if node.prev != nil {
+			node.prev.next = node.next
+		}
+		if list.head == node {
+			list.head = node.next
+		}
+		if list.tail == node {
+			list.tail = node.prev
+		}
+	}
+}
+
+remove_by_proc :: proc(list: ^List, to_erase: proc(^Node) -> bool) {
+	for node := list.head; node != nil; {
+		next := node.next
+		if to_erase(node) {
+			if node.next != nil {
+				node.next.prev = node.prev
+			}
+			if node.prev != nil {
+				node.prev.next = node.next
+			}
+			if list.head == node {
+				list.head = node.next
+			}
+			if list.tail == node {
+				list.tail = node.prev
+			}
+		}
+		node = next
+	}
+}
+
+
+is_empty :: proc(list: ^List) -> bool {
+	return list.head == nil
+}
+
+pop_front :: proc(list: ^List) -> ^Node {
+	link := list.head
+	if link == nil {
+		return nil
+	}
+	if link.next != nil {
+		link.next.prev = link.prev
+	}
+	if link.prev != nil {
+		link.prev.next = link.next
+	}
+	if link == list.head {
+		list.head = link.next
+	}
+	if link == list.tail {
+		list.tail = link.prev
+	}
+	return link
+
+}
+pop_back :: proc(list: ^List) -> ^Node {
+	link := list.tail
+	if link == nil {
+		return nil
+	}
+	if link.next != nil {
+		link.next.prev = link.prev
+	}
+	if link.prev != nil {
+		link.prev.next = link.next
+	}
+	if link == list.head {
+		list.head = link.next
+	}
+	if link == list.tail {
+		list.tail = link.prev
+	}
+	return link
+}
+
+
+Iterator :: struct($T: typeid) {
+	curr:   ^Node,
+	offset: uintptr,
+}
+
+iterator_head :: proc(list: List, $T: typeid, $field_name: string) -> Iterator(T)
+	where intrinsics.type_has_field(T, field_name),
+	      intrinsics.type_field_type(T, field_name) == Node {
+	return {list.head, offset_of_by_string(T, field_name)}
+}
+
+iterator_tail :: proc(list: List, $T: typeid, $field_name: string) -> Iterator(T)
+	where intrinsics.type_has_field(T, field_name),
+	      intrinsics.type_field_type(T, field_name) == Node {
+	return {list.tail, offset_of_by_string(T, field_name)}
+}
+
+iterator_from_node :: proc(node: ^Node, $T: typeid, $field_name: string) -> Iterator(T)
+	where intrinsics.type_has_field(T, field_name),
+	      intrinsics.type_field_type(T, field_name) == Node {
+	return {node, offset_of_by_string(T, field_name)}
+}
+
+iterate_next :: proc(it: ^Iterator($T)) -> (ptr: ^T, ok: bool) {
+	node := it.curr
+	if node == nil {
+		return nil, false
+	}
+	it.curr = node.next
+
+	return (^T)(uintptr(node) - it.offset), true
+}
+
+iterate_prev :: proc(it: ^Iterator($T)) -> (ptr: ^T, ok: bool) {
+	node := it.curr
+	if node == nil {
+		return nil, false
+	}
+	it.curr = node.prev
+
+	return (^T)(uintptr(node) - it.offset), true
+}

+ 201 - 0
core/container/lru/lru_cache.odin

@@ -0,0 +1,201 @@
+package container_lru
+
+import "core:runtime"
+import "core:intrinsics"
+_ :: runtime
+_ :: intrinsics
+
+Node :: struct($Key, $Value: typeid) where intrinsics.type_is_valid_map_key(Key) {
+	prev, next: ^Node(Key, Value),
+	key:   Key,
+	value: Value,
+}
+
+// Cache is an LRU cache. It automatically removes entries as new entries are
+// added if the capacity is reached. Entries are removed based on how recently
+// they were used where the oldest entries are removed first.
+Cache :: struct($Key, $Value: typeid) where intrinsics.type_is_valid_map_key(Key) {
+	head: ^Node(Key, Value),
+	tail: ^Node(Key, Value),
+
+	entries: map[Key]^Node(Key, Value),
+
+	count:    int,
+	capacity: int,
+
+	node_allocator: runtime.Allocator,
+
+	on_remove: proc(key: Key, value: Value, user_data: rawptr),
+	on_remove_user_data: rawptr,
+}
+
+// init initializes a Cache
+init :: proc(c: ^$C/Cache($Key, $Value), capacity: int, entries_allocator := context.allocator, node_allocator := context.allocator) {
+	c.entries.allocator = entries_allocator
+	c.node_allocator = node_allocator
+	c.capacity = capacity
+}
+
+// destroy deinitializes a Cachem
+destroy :: proc(c: ^$C/Cache($Key, $Value), call_on_remove: bool) {
+	clear(c, call_on_remove)
+	delete(c.entries)
+}
+
+// clear the contents of a Cache
+clear :: proc(c: ^$C/Cache($Key, $Value), call_on_remove: bool) {
+	for _, node in c.entries {
+		if call_on_remove {
+			_call_on_remove(c, node)
+		}
+		free(node, c.node_allocator)
+	}
+	runtime.clear(&c.entries)
+	c.head = nil
+	c.tail = nil
+	c.count = 0
+}
+
+// set the given key value pair. This operation updates the recent usage of the item.
+set :: proc(c: ^$C/Cache($Key, $Value), key: Key, value: Value) -> runtime.Allocator_Error {
+	if e, ok := c.entries[key]; ok {
+		e.value = value
+		_pop_node(c, e)
+		_push_front_node(c, e)
+		return nil
+	}
+
+	e : ^Node(Key, Value) = nil
+	assert(c.count <= c.capacity)
+	if c.count == c.capacity {
+		e = c.tail
+		_remove_node(c, e)
+	}
+	else {
+		c.count += 1
+		e = new(Node(Key, Value), c.node_allocator) or_return
+	}
+
+	e.key = key
+	e.value = value
+	_push_front_node(c, e)
+	c.entries[key] = e
+
+	return nil
+}
+
+// get a value from the cache from a given key. This operation updates the usage of the item.
+get :: proc(c: ^$C/Cache($Key, $Value), key: Key) -> (value: Value, ok: bool) #optional_ok {
+	e: ^Node(Key, Value)
+	e, ok = c.entries[key]
+	if !ok {
+		return
+	}
+	_pop_node(c, e)
+	_push_front_node(c, e)
+	return e.value, true
+}
+
+// get_ptr gets the pointer to a value the cache from a given key. This operation updates the usage of the item.
+get_ptr :: proc(c: ^$C/Cache($Key, $Value), key: Key) -> (value: ^Value, ok: bool) #optional_ok {
+	e: ^Node(Key, Value)
+	e, ok = c.entries[key]
+	if !ok {
+		return
+	}
+	_pop_node(c, e)
+	_push_front_node(c, e)
+	return &e.value, true
+}
+
+// peek gets the value from the cache from a given key without updating the recent usage.
+peek :: proc(c: ^$C/Cache($Key, $Value), key: Key) -> (value: Value, ok: bool) #optional_ok {
+	e: ^Node(Key, Value)
+	e, ok = c.entries[key]
+	if !ok {
+		return
+	}
+	return e.value, true
+}
+
+// exists checks for the existence of a value from a given key without updating the recent usage.
+exists :: proc(c: ^$C/Cache($Key, $Value), key: Key) -> bool {
+	return key in c.entries
+}
+
+// remove removes an item from the cache.
+remove :: proc(c: ^$C/Cache($Key, $Value), key: Key) -> bool {
+	e, ok := c.entries[key]
+	if !ok {
+		return false
+	}
+	_remove_node(c, e)
+	free(node, c.node_allocator)
+	c.count -= 1
+	return true
+}
+
+
+@(private)
+_remove_node :: proc(c: ^$C/Cache($Key, $Value), node: ^Node(Key, Value)) {
+	if c.head == node {
+		c.head = node.next
+	}
+	if c.tail == node {
+		c.tail = node.prev
+	}
+	if node.prev != nil {
+		node.prev.next = node.next
+	}
+	if node.next != nil {
+		node.next.prev = node.prev
+	}
+	node.prev = nil
+	node.next = nil
+
+	delete_key(&c.entries, node.key)
+
+	_call_on_remove(c, node)
+}
+
+@(private)
+_call_on_remove :: proc(c: ^$C/Cache($Key, $Value), node: ^Node(Key, Value)) {
+	if c.on_remove != nil {
+		c.on_remove(node.key, node.value, c.on_remove_user_data)
+	}
+}
+
+@(private)
+_push_front_node :: proc(c: ^$C/Cache($Key, $Value), e: ^Node(Key, Value)) {
+	if c.head != nil {
+		e.next = c.head
+		e.next.prev = e
+	}
+	c.head = e
+	if c.tail == nil {
+		c.tail = e
+	}
+	e.prev = nil
+}
+
+@(private)
+_pop_node :: proc(c: ^$C/Cache($Key, $Value), e: ^Node(Key, Value)) {
+	if e == nil {
+		return
+	}
+	if c.head == e {
+		c.head = e.next
+	}
+	if c.tail == e {
+		c.tail = e.prev
+	}
+	if e.prev != nil {
+		e.prev.next = e.next
+	}
+
+	if e.next != nil {
+		e.next.prev = e.prev
+	}
+	e.prev = nil
+	e.next = nil
+}

+ 0 - 377
core/container/map.odin

@@ -1,377 +0,0 @@
-package container
-
-import "core:intrinsics"
-_ :: intrinsics
-
-
-Map :: struct($Key, $Value: typeid) where intrinsics.type_is_valid_map_key(Key) {
-	hash: Array(int),
-	entries: Array(Map_Entry(Key, Value)),
-}
-
-Map_Entry :: struct($Key, $Value: typeid) where intrinsics.type_is_valid_map_key(Key) {
-	hash:  uintptr,
-	next:  int,
-	key:   Key,
-	value: Value,
-}
-
-
-/*
-map_init :: proc{
-	map_init_none,
-	map_init_cap,
-}
-map_delete
-
-map_has
-map_get
-map_get_default
-map_get_ptr
-map_set
-map_remove
-map_reserve
-map_clear
-
-// Multi Map
-
-multi_map_find_first
-multi_map_find_next
-multi_map_count
-multi_map_get :: proc{
-	multi_map_get_array,
-	multi_map_get_slice,
-};
-multi_map_get_as_slice
-multi_map_insert
-multi_map_remove
-multi_map_remove_all
-
-*/
-
-map_init :: proc{map_init_none, map_init_cap}
-
-map_init_none :: proc(m: ^$M/Map($Key, $Value), allocator := context.allocator) {
-	m.hash.allocator = allocator
-	m.entries.allocator = allocator
-}
-
-map_init_cap :: proc(m: ^$M/Map($Key, $Value), cap: int, allocator := context.allocator) {
-	m.hash.allocator = allocator
-	m.entries.allocator = allocator
-	map_reserve(m, cap)
-}
-
-map_delete :: proc(m: $M/Map($Key, $Value)) {
-	array_delete(m.hash)
-	array_delete(m.entries)
-}
-
-
-map_has :: proc(m: $M/Map($Key, $Value), key: Key) -> bool {
-	return _map_find_or_fail(m, key) >= 0
-}
-
-map_get :: proc(m: $M/Map($Key, $Value), key: Key) -> (res: Value, ok: bool) #optional_ok {
-	i := _map_find_or_fail(m, key)
-	if i < 0 {
-		return {}, false
-	}
-	return array_get(m.entries, i).value, true
-}
-
-map_get_default :: proc(m: $M/Map($Key, $Value), key: Key, default: Value) -> (res: Value, ok: bool) #optional_ok {
-	i := _map_find_or_fail(m, key)
-	if i < 0 {
-		return default, false
-	}
-	return array_get(m.entries, i).value, true
-}
-
-map_get_ptr :: proc(m: $M/Map($Key, $Value), key: Key) -> ^Value {
-	i := _map_find_or_fail(m, key)
-	if i < 0 {
-		return nil
-	}
-	return array_get_ptr(m.entries, i).value
-}
-
-map_set :: proc(m: ^$M/Map($Key, $Value), key: Key, value: Value) {
-	if array_len(m.hash) == 0 {
-		_map_grow(m)
-	}
-
-	i := _map_find_or_make(m, key)
-	array_get_ptr(m.entries, i).value = value
-	if _map_full(m^) {
-		_map_grow(m)
-	}
-}
-
-map_remove :: proc(m: ^$M/Map($Key, $Value), key: Key) {
-	fr := _map_find_key(m^, key)
-	if fr.entry_index >= 0 {
-		_map_erase(m, fr)
-	}
-}
-
-
-map_reserve :: proc(m: ^$M/Map($Key, $Value), new_size: int) {
-	nm: M
-	map_init(&nm, m.hash.allocator)
-	array_resize(&nm.hash, new_size)
-	array_reserve(&nm.entries, array_len(m.entries))
-
-	for i in 0..<new_size {
-		array_set(&nm.hash, i, -1)
-	}
-	for i in 0..<array_len(m.entries) {
-		e := array_get(m.entries, i)
-		multi_map_insert(&nm, e.key, e.value)
-	}
-
-	map_delete(m^)
-	m^ = nm
-}
-
-map_clear :: proc(m: ^$M/Map($Key, $Value)) {
-	array_clear(&m.hash)
-	array_clear(&m.entries)
-}
-
-
-
-multi_map_find_first :: proc(m: $M/Map($Key, $Value), key: Key) -> ^Map_Entry(Key, Value) {
-	i := _map_find_or_fail(m, key)
-	if i < 0 {
-		return nil
-	}
-	return array_get_ptr(m.entries, i)
-}
-
-multi_map_find_next :: proc(m: $M/Map($Key, $Value), e: ^Map_Entry(Key, Value)) -> ^Map_Entry(Key, Value) {
-	i := e.next
-	for i >= 0 {
-		it := array_get_ptr(m.entries, i)
-		if it.hash == e.hash && it.key == e.key {
-			return it
-		}
-		i = it.next
-	}
-	return nil
-}
-
-multi_map_count :: proc(m: $M/Map($Key, $Value), key: Key) -> int {
-	n := 0
-	e := multi_map_find_first(m, key)
-	for e != nil {
-		n += 1
-		e = multi_map_find_next(m, e)
-	}
-	return n
-}
-
-multi_map_get :: proc{multi_map_get_array, multi_map_get_slice}
-
-multi_map_get_array :: proc(m: $M/Map($Key, $Value), key: Key, items: ^Array(Value)) {
-	if items == nil {
-		return
-	}
-	e := multi_map_find_first(m, key)
-	for e != nil {
-		array_append(items, e.value)
-		e = multi_map_find_next(m, e)
-	}
-}
-
-multi_map_get_slice :: proc(m: $M/Map($Key, $Value), key: Key, items: []Value) {
-	e := multi_map_find_first(m, key)
-	i := 0
-	for e != nil && i < len(items) {
-		items[i] = e.value
-		i += 1
-		e = multi_map_find_next(m, e)
-	}
-}
-
-multi_map_get_as_slice :: proc(m: $M/Map($Key, $Value), key: Key) -> []Value {
-	items: Array(Value)
-	array_init(&items, 0)
-
-	e := multi_map_find_first(m, key)
-	for e != nil {
-		array_append(&items, e.value)
-		e = multi_map_find_next(m, e)
-	}
-
-	return array_slice(items)
-}
-
-
-multi_map_insert :: proc(m: ^$M/Map($Key, $Value), key: Key, value: Value) {
-	if array_len(m.hash) == 0 {
-		_map_grow(m)
-	}
-
-	i := _map_make(m, key)
-	array_get_ptr(m.entries, i).value = value
-	if _map_full(m^) {
-		_map_grow(m)
-	}
-}
-
-multi_map_remove :: proc(m: ^$M/Map($Key, $Value), e: ^Map_Entry(Key, Value)) {
-	fr := _map_find_entry(m, e)
-	if fr.entry_index >= 0 {
-		_map_erase(m, fr)
-	}
-}
-
-multi_map_remove_all :: proc(m: ^$M/Map($Key, $Value), key: Key) {
-	for map_exist(m^, key) {
-		map_remove(m, key)
-	}
-}
-
-
-/// Internal
-
-
-Map_Find_Result :: struct {
-	hash_index:  int,
-	entry_prev:  int,
-	entry_index: int,
-}
-
-_map_add_entry :: proc(m: ^$M/Map($Key, $Value), key: Key) -> int where intrinsics.type_is_valid_map_key(Key) {
-	hasher := intrinsics.type_hasher_proc(Key)
-
-	e: Map_Entry(Key, Value)
-	e.key = key
-	e.hash = hasher(&e.key, 0)
-	e.next = -1
-	idx := array_len(m.entries)
-	array_push(&m.entries, e)
-	return idx
-}
-
-_map_erase :: proc(m: ^$M/Map, fr: Map_Find_Result) {
-	if fr.entry_prev < 0 {
-		array_set(&m.hash, fr.hash_index, array_get(m.entries, fr.entry_index).next)
-	} else {
-		array_get_ptr(m.entries, fr.entry_prev).next = array_get(m.entries, fr.entry_index).next
-	}
-
-	if fr.entry_index == array_len(m.entries)-1 {
-		array_pop_back(&m.entries)
-		return
-	}
-
-	array_set(&m.entries, fr.entry_index, array_get(m.entries, array_len(m.entries)-1))
-	last := _map_find_key(m^, array_get(m.entries, fr.entry_index).key)
-
-	if last.entry_prev < 0 {
-		array_get_ptr(m.entries, last.entry_prev).next = fr.entry_index
-	} else {
-		array_set(&m.hash, last.hash_index, fr.entry_index)
-	}
-}
-
-
-_map_find_key :: proc(m: $M/Map($Key, $Value), key: Key) -> Map_Find_Result where intrinsics.type_is_valid_map_key(Key) {
-	fr: Map_Find_Result
-	fr.hash_index = -1
-	fr.entry_prev = -1
-	fr.entry_index = -1
-
-	if array_len(m.hash) == 0 {
-		return fr
-	}
-
-	hasher := intrinsics.type_hasher_proc(Key)
-
-	key := key
-	hash := hasher(&key, 0)
-
-	fr.hash_index = int(hash % uintptr(array_len(m.hash)))
-	fr.entry_index = array_get(m.hash, fr.hash_index)
-	for fr.entry_index >= 0 {
-		it := array_get_ptr(m.entries, fr.entry_index)
-		if it.hash == hash && it.key == key {
-			return fr
-		}
-		fr.entry_prev = fr.entry_index
-		fr.entry_index = it.next
-	}
-	return fr
-}
-
-_map_find_entry :: proc(m: ^$M/Map($Key, $Value), e: ^Map_Entry(Key, Value)) -> Map_Find_Result {
-	fr: Map_Find_Result
-	fr.hash_index = -1
-	fr.entry_prev = -1
-	fr.entry_index = -1
-
-	if array_len(m.hash) == 0 {
-		return fr
-	}
-
-	fr.hash_index = int(e.hash % uintptr(array_len(m.hash)))
-	fr.entry_index = array_get(m.hash, fr.hash_index)
-	for fr.entry_index >= 0 {
-		it := array_get_ptr(m.entries, fr.entry_index)
-		if it == e {
-			return fr
-		}
-		fr.entry_prev = fr.entry_index
-		fr.entry_index = it.next
-	}
-	return fr
-}
-
-_map_find_or_fail :: proc(m: $M/Map($Key, $Value), key: Key) -> int {
-	return _map_find_key(m, key).entry_index
-}
-_map_find_or_make :: proc(m: ^$M/Map($Key, $Value), key: Key) -> int {
-	fr := _map_find_key(m^, key)
-	if fr.entry_index >= 0 {
-		return fr.entry_index
-	}
-
-	i := _map_add_entry(m, key)
-	if fr.entry_prev < 0 {
-		array_set(&m.hash, fr.hash_index, i)
-	} else {
-		array_get_ptr(m.entries, fr.entry_prev).next = i
-	}
-	return i
-}
-
-
-_map_make :: proc(m: ^$M/Map($Key, $Value), key: Key) -> int {
-	fr := _map_find_key(m^, key)
-	i := _map_add_entry(m, key)
-
-	if fr.entry_prev < 0 {
-		array_set(&m.hash, fr.hash_index, i)
-	} else {
-		array_get_ptr(m.entries, fr.entry_prev).next = i
-	}
-
-	array_get_ptr(m.entries, i).next = fr.entry_index
-
-	return i
-}
-
-
-_map_full :: proc(m: $M/Map($Key, $Value)) -> bool {
-	// TODO(bill): Determine good max load factor
-	return array_len(m.entries) >= (array_len(m.hash) / 4)*3
-}
-
-_map_grow :: proc(m: ^$M/Map($Key, $Value)) {
-	new_size := array_len(m.entries) * 4 + 7 // TODO(bill): Determine good grow rate
-	map_reserve(m, new_size)
-}
-
-

+ 0 - 121
core/container/priority_queue.odin

@@ -1,121 +0,0 @@
-package container
-
-Priority_Queue :: struct($T: typeid) {
-	data: Array(T),
-	len: int,
-	priority: proc(item: T) -> int,
-}
-
-priority_queue_init_none :: proc(q: ^$Q/Priority_Queue($T), f: proc(item: T) -> int, allocator := context.allocator) {
-	queue_init_len(q, f, 0, allocator)
-}
-priority_queue_init_len :: proc(q: ^$Q/Priority_Queue($T), f: proc(item: T) -> int, len: int, allocator := context.allocator) {
-	queue_init_len_cap(q, f, 0, 16, allocator)
-}
-priority_queue_init_len_cap :: proc(q: ^$Q/Priority_Queue($T), f: proc(item: T) -> int, len: int, cap: int, allocator := context.allocator) {
-	array_init(&q.data, len, cap, allocator)
-	q.len = len
-	q.priority = f
-}
-
-priority_queue_init :: proc{priority_queue_init_none, priority_queue_init_len, priority_queue_init_len_cap}
-
-
-priority_queue_delete :: proc(q: $Q/Priority_Queue($T)) {
-	array_delete(q.data)
-}
-
-priority_queue_clear :: proc(q: ^$Q/Priority_Queue($T)) {
-	q.len = 0
-}
-
-priority_queue_len :: proc(q: $Q/Priority_Queue($T)) -> int {
-	return q.len
-}
-
-priority_queue_cap :: proc(q: $Q/Priority_Queue($T)) -> int {
-	return array_cap(q.data)
-}
-
-priority_queue_space :: proc(q: $Q/Priority_Queue($T)) -> int {
-	return array_len(q.data) - q.len
-}
-
-priority_queue_reserve :: proc(q: ^$Q/Priority_Queue($T), capacity: int) {
-	if capacity > q.len {
-		array_resize(&q.data, new_capacity)
-	}
-}
-
-priority_queue_resize :: proc(q: ^$Q/Priority_Queue($T), length: int) {
-	if length > q.len {
-		array_resize(&q.data, new_capacity)
-	}
-	q.len = length
-}
-
-_priority_queue_grow :: proc(q: ^$Q/Priority_Queue($T), min_capacity: int = 0) {
-	new_capacity := max(array_len(q.data)*2 + 8, min_capacity)
-	array_resize(&q.data, new_capacity)
-}
-
-
-priority_queue_push :: proc(q: ^$Q/Priority_Queue($T), item: T) {
-	if array_len(q.data) - q.len == 0 {
-		_priority_queue_grow(q)
-	}
-
-	s := array_slice(q.data)
-	s[q.len] = item
-
-	i := q.len
-	for i > 0 {
-		p := (i - 1) / 2
-		if q.priority(s[p]) <= q.priority(item) { 
-			break 
-		}
-		s[i] = s[p]
-		i = p
-	}
-
-	q.len += 1
-	if q.len > 0 { 
-		s[i] = item 
-	} 
-}
-
-
-
-priority_queue_pop :: proc(q: ^$Q/Priority_Queue($T)) -> T {
-	assert(q.len > 0)
-
-	s := array_slice(q.data)
-	min := s[0]
-	root := s[q.len-1]
-	q.len -= 1
-
-	i := 0
-	for i * 2 + 1 < q.len {
-		a := i * 2 + 1
-		b := i * 2 + 2
-		c := b < q.len && q.priority(s[b]) < q.priority(s[a]) ? b : a
-
-		if q.priority(s[c]) >= q.priority(root) {
-			break
-		}
-		s[i] = s[c]
-		i = c
-	}
-
-	if q.len > 0 {
-		s[i] = root
-	}
-	return min
-}
-
-priority_queue_peek :: proc(q: ^$Q/Priority_Queue($T)) -> T {
-	assert(q.len > 0)
-
-	s := array_slice(q.data)
-	return s[0]
-}

+ 143 - 0
core/container/priority_queue/priority_queue.odin

@@ -0,0 +1,143 @@
+package container_priority_queue
+
+import "core:builtin"
+
+Priority_Queue :: struct($T: typeid) {
+	queue: [dynamic]T,
+	
+	less:  proc(a, b: T) -> bool,
+	swap:  proc(q: []T, i, j: int),
+}
+
+DEFAULT_CAPACITY :: 16
+
+default_swap_proc :: proc($T: typeid) -> proc(q: []T, i, j: int) {
+	return proc(q: []T, i, j: int) {
+		q[i], q[j] = q[j], q[i]
+	}
+}
+
+init :: proc(pq: ^$Q/Priority_Queue($T), less: proc(a, b: T) -> bool, swap: proc(q: []T, i, j: int), capacity := DEFAULT_CAPACITY, allocator := context.allocator) {
+	if pq.queue.allocator.procedure == nil {
+		pq.queue.allocator = allocator
+	}
+	reserve(pq, capacity)
+	pq.less = less
+	pq.swap = swap
+}
+
+init_from_dynamic_array :: proc(pq: ^$Q/Priority_Queue($T), queue: [dynamic]T, less: proc(a, b: T) -> bool, swap: proc(q: []T, i, j: int)) {
+	pq.queue = queue
+	pq.less = less
+	pq.swap = swap
+	n := builtin.len(pq.queue)
+	for i := n/2 - 1; i >= 0; i -= 1 {
+		_shift_down(pq, i, n)
+	}
+}
+
+destroy :: proc(pq: ^$Q/Priority_Queue($T)) {
+	clear(pq)
+	delete(pq.queue)
+}
+
+reserve :: proc(pq: ^$Q/Priority_Queue($T), capacity: int) {
+	builtin.reserve(&pq.queue, capacity)
+}
+clear :: proc(pq: ^$Q/Priority_Queue($T)) {
+	builtin.clear(&pq.queue)
+}
+len :: proc(pq: $Q/Priority_Queue($T)) -> int {
+	return builtin.len(pq.queue)
+}
+cap :: proc(pq: $Q/Priority_Queue($T)) -> int {
+	return builtin.cap(pq.queue)
+}
+
+_shift_down :: proc(pq: ^$Q/Priority_Queue($T), i0, n: int) -> bool {
+	// O(n log n)
+	if 0 > i0 || i0 > n {
+		return false
+	}
+	
+	i := i0
+	queue := pq.queue[:]
+	
+	for {
+		j1 := 2*i + 1
+		if j1 < 0 || j1 >= n {
+			break
+		}
+		j := j1
+		if j2 := j1+1; j2 < n && pq.less(queue[j2], queue[j1]) {
+			j = j2
+		}
+		if !pq.less(queue[j], queue[i]) {
+			break
+		}
+		
+		pq.swap(queue, i, j)
+		i = j
+	}
+	return i > i0
+}
+
+_shift_up :: proc(pq: ^$Q/Priority_Queue($T), j: int) {
+	j := j
+	queue := pq.queue[:]
+	n := builtin.len(queue)
+	for 0 <= j {
+		i := (j-1)/2
+		if i == j || !pq.less(queue[j], queue[i]) {
+			break
+		}
+		pq.swap(queue, i, j)
+		j = i
+	}
+}
+
+// NOTE(bill): When an element at index 'i' has changed its value, this will fix the
+// the heap ordering. This is using a basic "heapsort" with shift up and a shift down parts.
+fix :: proc(pq: ^$Q/Priority_Queue($T), i: int) {
+	if !_shift_down(pq, i, builtin.len(pq.queue)) {
+		_shift_up(pq, i)
+	}
+}
+
+push :: proc(pq: ^$Q/Priority_Queue($T), value: T) {
+	append(&pq.queue, value)
+	_shift_up(pq, builtin.len(pq.queue)-1)
+}
+
+pop :: proc(pq: ^$Q/Priority_Queue($T), loc := #caller_location) -> (value: T) {
+	assert(condition=builtin.len(pq.queue)>0, loc=loc)
+	
+	n := builtin.len(pq.queue)-1
+	pq.swap(pq.queue[:], 0, n)
+	_shift_down(pq, 0, n)
+	return builtin.pop(&pq.queue)
+}
+
+pop_safe :: proc(pq: ^$Q/Priority_Queue($T), loc := #caller_location) -> (value: T, ok: bool) {
+	if builtin.len(pq.queue) > 0 {
+		n := builtin.len(pq.queue)-1
+		pq.swap(pq.queue[:], 0, n)
+		_shift_down(pq, 0, n)
+		return builtin.pop_safe(&pq.queue)
+	}
+	return
+}
+
+remove :: proc(pq: ^$Q/Priority_Queue($T), i: int) -> (value: T, ok: bool) {
+	n := builtin.len(pq.queue)
+	if 0 <= i && i < n {
+		if n != i {
+			pq.swap(pq.queue[:], i, n)
+			_shift_down(pq, i, n)
+			_shift_up(pq, i)
+		}
+		value, ok = builtin.pop_safe(&pq.queue)
+	}
+	return
+}
+

+ 0 - 175
core/container/queue.odin

@@ -1,175 +0,0 @@
-package container
-
-Queue :: struct($T: typeid) {
-	data: Array(T),
-	len: int,
-	offset: int,
-}
-
-/*
-queue_init :: proc{
-	queue_init_none,
-	queue_init_len,
-	queue_init_len_cap,
-}
-queue_delete
-queue_clear
-queue_len
-queue_cap
-queue_space
-queue_get
-queue_set
-queue_reserve
-queue_resize
-queue_push :: proc{
-	queue_push_back, 
-	queue_push_elems,
-};
-queue_push_front
-queue_pop_front
-queue_pop_back
-queue_consume
-*/
-
-queue_init_none :: proc(q: ^$Q/Queue($T), allocator := context.allocator) {
-	queue_init_len(q, 0, allocator)
-}
-queue_init_len :: proc(q: ^$Q/Queue($T), len: int, allocator := context.allocator) {
-	queue_init_len_cap(q, 0, 16, allocator)
-}
-queue_init_len_cap :: proc(q: ^$Q/Queue($T), len: int, cap: int, allocator := context.allocator) {
-	array_init(&q.data, len, cap, allocator)
-	q.len = len
-	q.offset = 0
-}
-
-queue_init :: proc{queue_init_none, queue_init_len, queue_init_len_cap}
-
-queue_delete :: proc(q: $Q/Queue($T)) {
-	array_delete(q.data)
-}
-
-queue_clear :: proc(q: ^$Q/Queue($T)) {
-	q.len = 0
-}
-
-queue_len :: proc(q: $Q/Queue($T)) -> int {
-	return q.len
-}
-
-queue_cap :: proc(q: $Q/Queue($T)) -> int {
-	return array_cap(q.data)
-}
-
-queue_space :: proc(q: $Q/Queue($T)) -> int {
-	return array_len(q.data) - q.len
-}
-
-queue_get :: proc(q: $Q/Queue($T), index: int) -> T {
-	i := (index + q.offset) % array_len(q.data)
-	data := array_slice(q.data)
-	return data[i]
-}
-
-queue_set :: proc(q: ^$Q/Queue($T), index: int, item: T)  {
-	i := (index + q.offset) % array_len(q.data)
-	data := array_slice(q.data)
-	data[i] = item
-}
-
-
-queue_reserve :: proc(q: ^$Q/Queue($T), capacity: int) {
-	if capacity > q.len {
-		_queue_increase_capacity(q, capacity)
-	}
-}
-
-queue_resize :: proc(q: ^$Q/Queue($T), length: int) {
-	if length > q.len {
-		_queue_increase_capacity(q, length)
-	}
-	q.len = length
-}
-
-queue_push_back :: proc(q: ^$Q/Queue($T), item: T) {
-	if queue_space(q^) == 0 {
-		_queue_grow(q)
-	}
-
-	queue_set(q, q.len, item)
-	q.len += 1
-}
-
-queue_push_front :: proc(q: ^$Q/Queue($T), item: T) {
-	if queue_space(q^) == 0 {
-		_queue_grow(q)
-	}
-
-	q.offset = (q.offset - 1 + array_len(q.data)) % array_len(q.data)
-	q.len += 1
-	queue_set(q, 0, item)
-}
-
-queue_pop_front :: proc(q: ^$Q/Queue($T)) -> T {
-	assert(q.len > 0)
-	item := queue_get(q^, 0)
-	q.offset = (q.offset + 1) % array_len(q.data)
-	q.len -= 1
-	if q.len == 0 {
-		q.offset = 0
-	}
-	return item
-}
-
-queue_pop_back :: proc(q: ^$Q/Queue($T)) -> T {
-	assert(q.len > 0)
-	item := queue_get(q^, q.len-1)
-	q.len -= 1
-	return item
-}
-
-queue_consume :: proc(q: ^$Q/Queue($T), count: int) {
-	q.offset = (q.offset + count) & array_len(q.data)
-	q.len -= count
-}
-
-
-queue_push_elems :: proc(q: ^$Q/Queue($T), items: ..T) {
-	if queue_space(q^) < len(items) {
-		_queue_grow(q, q.len + len(items))
-	}
-	size := array_len(q.data)
-	insert := (q.offset + q.len) % size
-
-	to_insert := len(items)
-	if insert + to_insert > size {
-		to_insert = size - insert
-	}
-
-	the_items := items[:]
-
-	data := array_slice(q.data)
-
-	q.len += copy(data[insert:][:to_insert], the_items)
-	the_items = the_items[to_insert:]
-	q.len += copy(data[:], the_items)
-}
-
-queue_push :: proc{queue_push_back, queue_push_elems}
-
-
-
-_queue_increase_capacity :: proc(q: ^$Q/Queue($T), new_capacity: int) {
-	end := array_len(q.data)
-	array_resize(&q.data, new_capacity)
-	if q.offset + q.len > end {
-		end_items := q.len + end
-		data := array_slice(q.data)
-		copy(data[new_capacity-end_items:][:end_items], data[q.offset:][:end_items])
-		q.offset += new_capacity - end
-	}
-}
-_queue_grow :: proc(q: ^$Q/Queue($T), min_capacity: int = 0) {
-	new_capacity := max(array_len(q.data)*2 + 8, min_capacity)
-	_queue_increase_capacity(q, new_capacity)
-}

+ 219 - 0
core/container/queue/queue.odin

@@ -0,0 +1,219 @@
+package container_queue
+
+import "core:builtin"
+import "core:runtime"
+_ :: runtime
+
+// Dynamically resizable double-ended queue/ring-buffer
+Queue :: struct($T: typeid) {
+	data:   [dynamic]T,
+	len:    uint,
+	offset: uint,
+}
+
+DEFAULT_CAPACITY :: 16
+
+// Procedure to initialize a queue
+init :: proc(q: ^$Q/Queue($T), capacity := DEFAULT_CAPACITY, allocator := context.allocator) -> bool {
+	if q.data.allocator.procedure == nil {
+		q.data.allocator = allocator
+	}
+	clear(q)
+	return reserve(q, capacity)
+}
+
+// Procedure to initialize a queue from a fixed backing slice
+init_from_slice :: proc(q: ^$Q/Queue($T), backing: []T) -> bool {
+	clear(q)
+	q.data = transmute([dynamic]T)runtime.Raw_Dynamic_Array{
+		data = raw_data(backing),
+		len = builtin.len(backing),
+		cap = builtin.len(backing),
+		allocator = {procedure=runtime.nil_allocator_proc, data=nil},
+	}
+	return true
+}
+
+// Procedure to destroy a queue
+destroy :: proc(q: ^$Q/Queue($T)) {
+	delete(q.data)
+}
+
+// The length of the queue
+len :: proc(q: $Q/Queue($T)) -> int {
+	return int(q.len)
+}
+
+// The current capacity of the queue
+cap :: proc(q: $Q/Queue($T)) -> int {
+	return builtin.len(q.data)
+}
+
+// Remaining space in the queue (cap-len)
+space :: proc(q: $Q/Queue($T)) -> int {
+	return builtin.len(q.data) - int(q.len)
+}
+
+// Reserve enough space for at least the specified capacity
+reserve :: proc(q: ^$Q/Queue($T), capacity: int) -> bool {
+	if uint(capacity) > q.len {
+		return _grow(q, uint(capacity)) 
+	}
+	return true
+}
+
+
+get :: proc(q: ^$Q/Queue($T), #any_int i: int, loc := #caller_location) -> T {
+	runtime.bounds_check_error_loc(loc, i, builtin.len(q.data))
+
+	idx := (uint(i)+q.offset)%builtin.len(q.data)
+	return q.data[idx]
+}
+
+front :: proc(q: ^$Q/Queue($T)) -> T {
+	return q.data[q.offset]
+}
+
+back :: proc(q: ^$Q/Queue($T)) -> T {
+	idx := (q.offset+uint(q.len))%builtin.len(q.data)
+	return q.data[idx]
+}
+
+set :: proc(q: ^$Q/Queue($T), #any_int i: int, val: T, loc := #caller_location) {
+	runtime.bounds_check_error_loc(loc, i, builtin.len(q.data))
+	
+	idx := (uint(i)+q.offset)%builtin.len(q.data)
+	q.data[idx] = val
+}
+get_ptr :: proc(q: ^$Q/Queue($T), #any_int i: int, loc := #caller_location) -> ^T {
+	runtime.bounds_check_error_loc(loc, i, builtin.len(q.data))
+	
+	idx := (uint(i)+q.offset)%builtin.len(q.data)
+	return &q.data[idx]
+}
+
+// Push an element to the back of the queue
+push_back :: proc(q: ^$Q/Queue($T), elem: T) -> bool {
+	if space(q^) == 0 {
+		_grow(q) or_return
+	}
+	idx := (q.offset+uint(q.len))%builtin.len(q.data)
+	q.data[idx] = elem
+	q.len += 1
+	return true
+}
+
+// Push an element to the front of the queue
+push_front :: proc(q: ^$Q/Queue($T), elem: T) -> bool {
+	if space(q^) == 0 {
+		_grow(q) or_return
+	}	
+	q.offset = uint(q.offset - 1 + builtin.len(q.data)) % builtin.len(q.data)
+	q.len += 1
+	q.data[q.offset] = elem
+	return true
+}
+
+
+// Pop an element from the back of the queue
+pop_back :: proc(q: ^$Q/Queue($T), loc := #caller_location) -> (elem: T) {
+	assert(condition=q.len > 0, loc=loc)
+	q.len -= 1
+	idx := (q.offset+uint(q.len))%builtin.len(q.data)
+	elem = q.data[idx]
+	return
+}
+// Safely pop an element from the back of the queue
+pop_back_safe :: proc(q: ^$Q/Queue($T)) -> (elem: T, ok: bool) {
+	if q.len > 0 {
+		q.len -= 1
+		idx := (q.offset+uint(q.len))%builtin.len(q.data)
+		elem = q.data[idx]
+		ok = true
+	}
+	return
+}
+
+// Pop an element from the front of the queue
+pop_front :: proc(q: ^$Q/Queue($T), loc := #caller_location) -> (elem: T) {
+	assert(condition=q.len > 0, loc=loc)
+	elem = q.data[q.offset]
+	q.offset = (q.offset+1)%builtin.len(q.data)
+	q.len -= 1
+	return
+}
+// Safely pop an element from the front of the queue
+pop_front_safe :: proc(q: ^$Q/Queue($T)) -> (elem: T, ok: bool) {
+	if q.len > 0 {
+		elem = q.data[q.offset]
+		q.offset = (q.offset+1)%builtin.len(q.data)
+		q.len -= 1
+		ok = true
+	}
+	return
+}
+
+// Push multiple elements to the front of the queue
+push_back_elems :: proc(q: ^$Q/Queue($T), elems: ..T) -> bool {
+	n := uint(builtin.len(elems))
+	if space(q^) < int(n) {
+		_grow(q, q.len + n) or_return
+	}
+	
+	sz := uint(builtin.len(q.data))
+	insert_from := (q.offset + q.len) % sz
+	insert_to := n
+	if insert_from + insert_to > sz {
+		insert_to = sz - insert_from
+	}
+	copy(q.data[insert_from:], elems[:insert_to])
+	copy(q.data[:insert_from], elems[insert_to:])
+	q.len += n
+	return true
+}
+
+// Consume `n` elements from the front of the queue
+consume_front :: proc(q: ^$Q/Queue($T), n: int, loc := #caller_location) {
+	assert(condition=int(q.len) >= n, loc=loc)
+	if n > 0 {
+		nu := uint(n)
+		q.offset = (q.offset + nu) % builtin.len(q.data)
+		q.len -= nu	
+	}
+}
+
+// Consume `n` elements from the back of the queue
+consume_back :: proc(q: ^$Q/Queue($T), n: int, loc := #caller_location) {
+	assert(condition=int(q.len) >= n, loc=loc)
+	if n > 0 {
+		q.len -= uint(n)
+	}
+}
+
+
+
+append_elem  :: push_back
+append_elems :: push_back_elems
+push   :: proc{push_back, push_back_elems}
+append :: proc{push_back, push_back_elems}
+
+
+// Clear the contents of the queue
+clear :: proc(q: ^$Q/Queue($T)) {
+	q.len = 0
+	q.offset = 0
+}
+
+
+// Internal growinh procedure
+_grow :: proc(q: ^$Q/Queue($T), min_capacity: uint = 0) -> bool {
+	new_capacity := max(min_capacity, uint(8), uint(builtin.len(q.data))*2)
+	n := uint(builtin.len(q.data))
+	builtin.resize(&q.data, int(new_capacity)) or_return
+	if q.offset + q.len > n {
+		diff := n - q.offset
+		copy(q.data[new_capacity-diff:], q.data[q.offset:][:diff])
+		q.offset += new_capacity - n
+	}
+	return true
+}

+ 0 - 74
core/container/ring.odin

@@ -1,74 +0,0 @@
-package container
-
-
-Ring :: struct($T: typeid) {
-	next, prev: ^Ring(T),
-	value: T,
-}
-
-ring_init :: proc(r: ^$R/Ring) -> ^R {
-	r.prev, r.next = r, r
-	return r
-}
-
-ring_next :: proc(r: ^$R/Ring) -> ^R {
-	if r.next == nil {
-		return ring_init(r)
-	}
-	return r.next
-}
-ring_prev :: proc(r: ^$R/Ring) -> ^R {
-	if r.prev == nil {
-		return ring_init(r)
-	}
-	return r.prev
-}
-
-
-ring_move :: proc(r: ^$R/Ring, n: int) -> ^R {
-  r := r
-	if r.next == nil {
-		return ring_init(r)
-	}
-
-	switch {
-	case n < 0:
-		for _ in n..<0 {
-			r = r.prev
-		}
-	case n > 0:
-		for _ in 0..<n {
-			r = r.next
-		}
-	}
-	return r
-}
-
-ring_link :: proc(r, s: ^$R/Ring) -> ^R {
-	n := ring_next(r)
-	if s != nil {
-		p := ring_prev(s)
-		r.next = s
-		s.prev = r
-		n.prev = p
-		p.next = n
-	}
-	return n
-}
-ring_unlink :: proc(r: ^$R/Ring, n: int) -> ^R {
-	if n <= 0 {
-		return nil
-	}
-	return ring_link(r, ring_move(r, n+1))
-}
-ring_len :: proc(r: ^$R/Ring) -> int {
-	n := 0
-	if r != nil {
-		n = 1
-		for p := ring_next(r); p != r; p = p.next {
-			n += 1
-		}
-	}
-	return n
-}
-

+ 0 - 240
core/container/set.odin

@@ -1,240 +0,0 @@
-package container
-
-Set :: struct {
-	hash:    Array(int),
-	entries: Array(Set_Entry),
-}
-
-Set_Entry :: struct {
-	key:   u64,
-	next:  int,
-}
-
-
-/*
-set_init :: proc{
-	set_init_none,
-	set_init_cap,
-}
-set_delete
-
-set_in
-set_not_in
-set_add
-set_remove
-set_reserve
-set_clear
-*/
-
-set_init :: proc{set_init_none, set_init_cap}
-
-set_init_none :: proc(m: ^Set, allocator := context.allocator) {
-	m.hash.allocator = allocator
-	m.entries.allocator = allocator
-}
-
-set_init_cap :: proc(m: ^Set, cap: int, allocator := context.allocator) {
-	m.hash.allocator = allocator
-	m.entries.allocator = allocator
-	set_reserve(m, cap)
-}
-
-set_delete :: proc(m: Set) {
-	array_delete(m.hash)
-	array_delete(m.entries)
-}
-
-
-set_in :: proc(m: Set, key: u64) -> bool {
-	return _set_find_or_fail(m, key) >= 0
-}
-set_not_in :: proc(m: Set, key: u64) -> bool {
-	return _set_find_or_fail(m, key) < 0
-}
-
-set_add :: proc(m: ^Set, key: u64) {
-	if array_len(m.hash) == 0 {
-		_set_grow(m)
-	}
-
-	_ = _set_find_or_make(m, key)
-	if _set_full(m^) {
-		_set_grow(m)
-	}
-}
-
-set_remove :: proc(m: ^Set, key: u64) {
-	fr := _set_find_key(m^, key)
-	if fr.entry_index >= 0 {
-		_set_erase(m, fr)
-	}
-}
-
-
-set_reserve :: proc(m: ^Set, new_size: int) {
-	nm: Set
-	set_init(&nm, m.hash.allocator)
-	array_resize(&nm.hash, new_size)
-	array_reserve(&nm.entries, array_len(m.entries))
-
-	for i in 0..<new_size {
-		array_set(&nm.hash, i, -1)
-	}
-	for i in 0..<array_len(m.entries) {
-		e := array_get(m.entries, i)
-		set_add(&nm, e.key)
-	}
-
-	set_delete(m^)
-	m^ = nm
-}
-
-set_clear :: proc(m: ^Set) {
-	array_clear(&m.hash)
-	array_clear(&m.entries)
-}
-
-
-set_equal :: proc(a, b: Set) -> bool {
-	a_entries := array_slice(a.entries)
-	b_entries := array_slice(b.entries)
-	if len(a_entries) != len(b_entries) {
-		return false
-	}
-	for e in a_entries {
-		if set_not_in(b, e.key) {
-			return false
-		}
-	}
-
-	return true
-}
-
-
-
-/// Internal
-
-_set_add_entry :: proc(m: ^Set, key: u64) -> int {
-	e: Set_Entry
-	e.key = key
-	e.next = -1
-	idx := array_len(m.entries)
-	array_push(&m.entries, e)
-	return idx
-}
-
-_set_erase :: proc(m: ^Set, fr: Map_Find_Result) {
-	if fr.entry_prev < 0 {
-		array_set(&m.hash, fr.hash_index, array_get(m.entries, fr.entry_index).next)
-	} else {
-		array_get_ptr(m.entries, fr.entry_prev).next = array_get(m.entries, fr.entry_index).next
-	}
-
-	if fr.entry_index == array_len(m.entries)-1 {
-		array_pop_back(&m.entries)
-		return
-	}
-
-	array_set(&m.entries, fr.entry_index, array_get(m.entries, array_len(m.entries)-1))
-	last := _set_find_key(m^, array_get(m.entries, fr.entry_index).key)
-
-	if last.entry_prev < 0 {
-		array_get_ptr(m.entries, last.entry_prev).next = fr.entry_index
-	} else {
-		array_set(&m.hash, last.hash_index, fr.entry_index)
-	}
-}
-
-
-_set_find_key :: proc(m: Set, key: u64) -> Map_Find_Result {
-	fr: Map_Find_Result
-	fr.hash_index = -1
-	fr.entry_prev = -1
-	fr.entry_index = -1
-
-	if array_len(m.hash) == 0 {
-		return fr
-	}
-
-	fr.hash_index = int(key % u64(array_len(m.hash)))
-	fr.entry_index = array_get(m.hash, fr.hash_index)
-	for fr.entry_index >= 0 {
-		it := array_get_ptr(m.entries, fr.entry_index)
-		if it.key == key {
-			return fr
-		}
-		fr.entry_prev = fr.entry_index
-		fr.entry_index = it.next
-	}
-	return fr
-}
-
-_set_find_entry :: proc(m: ^Set, e: ^Set_Entry) -> Map_Find_Result {
-	fr: Map_Find_Result
-	fr.hash_index = -1
-	fr.entry_prev = -1
-	fr.entry_index = -1
-
-	if array_len(m.hash) == 0 {
-		return fr
-	}
-
-	fr.hash_index = int(e.key % u64(array_len(m.hash)))
-	fr.entry_index = array_get(m.hash, fr.hash_index)
-	for fr.entry_index >= 0 {
-		it := array_get_ptr(m.entries, fr.entry_index)
-		if it == e {
-			return fr
-		}
-		fr.entry_prev = fr.entry_index
-		fr.entry_index = it.next
-	}
-	return fr
-}
-
-_set_find_or_fail :: proc(m: Set, key: u64) -> int {
-	return _set_find_key(m, key).entry_index
-}
-_set_find_or_make :: proc(m: ^Set, key: u64) -> int {
-	fr := _set_find_key(m^, key)
-	if fr.entry_index >= 0 {
-		return fr.entry_index
-	}
-
-	i := _set_add_entry(m, key)
-	if fr.entry_prev < 0 {
-		array_set(&m.hash, fr.hash_index, i)
-	} else {
-		array_get_ptr(m.entries, fr.entry_prev).next = i
-	}
-	return i
-}
-
-
-_set_make :: proc(m: ^Set, key: u64) -> int {
-	fr := _set_find_key(m^, key)
-	i := _set_add_entry(m, key)
-
-	if fr.entry_prev < 0 {
-		array_set(&m.hash, fr.hash_index, i)
-	} else {
-		array_get_ptr(m.entries, fr.entry_prev).next = i
-	}
-
-	array_get_ptr(m.entries, i).next = fr.entry_index
-
-	return i
-}
-
-
-_set_full :: proc(m: Set) -> bool {
-	// TODO(bill): Determine good max load factor
-	return array_len(m.entries) >= (array_len(m.hash) / 4)*3
-}
-
-_set_grow :: proc(m: ^Set) {
-	new_size := array_len(m.entries) * 4 + 7 // TODO(bill): Determine good grow rate
-	set_reserve(m, new_size)
-}
-
-

+ 0 - 95
core/container/small_array.odin

@@ -1,95 +0,0 @@
-package container
-
-Small_Array :: struct($N: int, $T: typeid) where N >= 0 {
-	data: [N]T,
-	len:  int,
-}
-
-
-small_array_len :: proc(a: $A/Small_Array) -> int {
-	return a.len
-}
-
-small_array_cap :: proc(a: $A/Small_Array) -> int {
-	return len(a.data)
-}
-
-small_array_space :: proc(a: $A/Small_Array) -> int {
-	return len(a.data) - a.len
-}
-
-small_array_slice :: proc(a: ^$A/Small_Array($N, $T)) -> []T {
-	return a.data[:a.len]
-}
-
-
-small_array_get :: proc(a: $A/Small_Array($N, $T), index: int, loc := #caller_location) -> T {
-	return a.data[index]
-}
-small_array_get_ptr :: proc(a: $A/Small_Array($N, $T), index: int, loc := #caller_location) -> ^T {
-	return &a.data[index]
-}
-
-small_array_set :: proc(a: ^$A/Small_Array($N, $T), index: int, item: T, loc := #caller_location) {
-	a.data[index] = item
-}
-
-small_array_resize :: proc(a: ^$A/Small_Array, length: int) {
-	a.len = min(length, len(a.data))
-}
-
-
-small_array_push_back :: proc(a: ^$A/Small_Array($N, $T), item: T) -> bool {
-	if a.len < len(a.data) {
-		a.len += 1
-		a.data[a.len-1] = item
-		return true
-	}
-	return false
-}
-
-small_array_push_front :: proc(a: ^$A/Small_Array($N, $T), item: T) -> bool {
-	if a.len < len(a.data) {
-		a.len += 1
-		data := small_array_slice(a)
-		copy(data[1:], data[:])
-		data[0] = item
-		return true
-	}
-	return false
-}
-
-small_array_pop_back :: proc(a: ^$A/Small_Array($N, $T), loc := #caller_location) -> T {
-	assert(condition=a.len > 0, loc=loc)
-	item := a.data[a.len-1]
-	a.len -= 1
-	return item
-}
-
-small_array_pop_front :: proc(a: ^$A/Small_Array($N, $T), loc := #caller_location) -> T {
-	assert(condition=a.len > 0, loc=loc)
-	item := a.data[0]
-	s := small_array_slice(a)
-	copy(s[:], s[1:])
-	a.len -= 1
-	return item
-}
-
-
-small_array_consume :: proc(a: ^$A/Small_Array($N, $T), count: int, loc := #caller_location) {
-	assert(condition=a.len >= count, loc=loc)
-	a.len -= count
-}
-
-small_array_clear :: proc(a: ^$A/Small_Array($N, $T)) {
-	small_array_resize(a, 0)
-}
-
-small_array_push_back_elems :: proc(a: ^$A/Small_Array($N, $T), items: ..T) {
-	n := copy(a.data[a.len:], items[:])
-	a.len += n
-}
-
-small_array_push   :: proc{small_array_push_back, small_array_push_back_elems}
-small_array_append :: proc{small_array_push_back, small_array_push_back_elems}
-

+ 117 - 0
core/container/small_array/small_array.odin

@@ -0,0 +1,117 @@
+package container_small_array
+
+import "core:builtin"
+
+Small_Array :: struct($N: int, $T: typeid) where N >= 0 {
+	data: [N]T,
+	len:  int,
+}
+
+
+len :: proc(a: $A/Small_Array) -> int {
+	return a.len
+}
+
+cap :: proc(a: $A/Small_Array) -> int {
+	return builtin.len(a.data)
+}
+
+space :: proc(a: $A/Small_Array) -> int {
+	return builtin.len(a.data) - a.len
+}
+
+slice :: proc(a: ^$A/Small_Array($N, $T)) -> []T {
+	return a.data[:a.len]
+}
+
+
+get :: proc(a: $A/Small_Array($N, $T), index: int) -> T {
+	return a.data[index]
+}
+get_ptr :: proc(a: ^$A/Small_Array($N, $T), index: int) -> ^T {
+	return &a.data[index]
+}
+
+set :: proc(a: ^$A/Small_Array($N, $T), index: int, item: T) {
+	a.data[index] = item
+}
+
+resize :: proc(a: ^$A/Small_Array, length: int) {
+	a.len = min(length, builtin.len(a.data))
+}
+
+
+push_back :: proc(a: ^$A/Small_Array($N, $T), item: T) -> bool {
+	if a.len < cap(a^) {
+		a.data[a.len] = item
+		a.len += 1
+		return true
+	}
+	return false
+}
+
+push_front :: proc(a: ^$A/Small_Array($N, $T), item: T) -> bool {
+	if a.len < cap(a^) {
+		a.len += 1
+		data := slice(a)
+		copy(data[1:], data[:])
+		data[0] = item
+		return true
+	}
+	return false
+}
+
+pop_back :: proc(a: ^$A/Small_Array($N, $T), loc := #caller_location) -> T {
+	assert(condition=(N > 0 && a.len > 0), loc=loc)
+	item := a.data[a.len-1]
+	a.len -= 1
+	return item
+}
+
+pop_front :: proc(a: ^$A/Small_Array($N, $T), loc := #caller_location) -> T {
+	assert(condition=(N > 0 && a.len > 0), loc=loc)
+	item := a.data[0]
+	s := slice(a)
+	copy(s[:], s[1:])
+	a.len -= 1
+	return item
+}
+
+pop_back_safe :: proc(a: ^$A/Small_Array($N, $T)) -> (item: T, ok: bool) {
+	if N > 0 && a.len > 0 {
+		item = a.data[a.len-1]
+		a.len -= 1
+		ok = true
+	}
+	return
+}
+
+pop_front_safe :: proc(a: ^$A/Small_Array($N, $T)) -> (item: T, ok: bool) {
+	if N > 0 && a.len > 0 {
+		item = a.data[0]
+		s := slice(a)
+		copy(s[:], s[1:])
+		a.len -= 1
+		ok = true
+	} 
+	return
+}
+
+consume :: proc(a: ^$A/Small_Array($N, $T), count: int, loc := #caller_location) {
+	assert(condition=a.len >= count, loc=loc)
+	a.len -= count
+}
+
+clear :: proc(a: ^$A/Small_Array($N, $T)) {
+	resize(a, 0)
+}
+
+push_back_elems :: proc(a: ^$A/Small_Array($N, $T), items: ..T) {
+	n := copy(a.data[a.len:], items[:])
+	a.len += n
+}
+
+append_elem  :: push_back
+append_elems :: push_back_elems
+push   :: proc{push_back, push_back_elems}
+append :: proc{push_back, push_back_elems}

+ 98 - 0
core/container/topological_sort/topological_sort.odin

@@ -0,0 +1,98 @@
+// The following is a generic O(V+E) topological sorter implementation.
+// This is the fastest known method for topological sorting and Odin's
+// map type is being used to accelerate lookups.
+package container_topological_sort
+
+import "core:intrinsics"
+import "core:runtime"
+_ :: intrinsics
+_ :: runtime
+
+
+Relations :: struct($K: typeid) where intrinsics.type_is_valid_map_key(K) {
+	dependents:   map[K]bool,
+	dependencies: int,
+}
+
+Sorter :: struct(K: typeid) where intrinsics.type_is_valid_map_key(K)  {
+	relations: map[K]Relations(K),
+	dependents_allocator: runtime.Allocator,
+}
+
+@(private="file")
+make_relations :: proc(sorter: ^$S/Sorter($K)) -> (r: Relations(K)) {
+	r.dependents.allocator = sorter.dependents_allocator
+	return
+}
+
+
+init :: proc(sorter: ^$S/Sorter($K)) {
+	sorter.relations = make(map[K]Relations(K))
+	sorter.dependents_allocator = context.allocator
+}
+
+destroy :: proc(sorter: ^$S/Sorter($K)) {
+	for _, v in &sorter.relations {
+		delete(v.dependents)
+	}
+	delete(sorter.relations)
+}
+
+add_key :: proc(sorter: ^$S/Sorter($K), key: K) -> bool {
+	if key in sorter.relations {
+		return false
+	}
+	sorter.relations[key] = make_relations(sorter)
+	return true
+}
+
+add_dependency :: proc(sorter: ^$S/Sorter($K), key, dependency: K) -> bool {
+	if key == dependency {
+		return false
+	}
+
+	find := &sorter.relations[dependency]
+	if find == nil {
+		find = map_insert(&sorter.relations, dependency, make_relations(sorter))
+	}
+
+	if find.dependents[key] {
+		return true
+	}
+	find.dependents[key] = true
+
+ 	find = &sorter.relations[key]
+	if find == nil {
+		find = map_insert(&sorter.relations, key, make_relations(sorter))
+	}
+
+	find.dependencies += 1
+
+	return true
+}
+
+sort :: proc(sorter: ^$S/Sorter($K)) -> (sorted, cycled: [dynamic]K) {
+	relations := &sorter.relations
+
+	for k, v in relations {
+		if v.dependencies == 0 {
+			append(&sorted, k)
+		}
+	}
+
+	for root in &sorted do for k, _ in relations[root].dependents {
+		relation := &relations[k]
+		relation.dependencies -= 1
+		if relation.dependencies == 0 {
+			append(&sorted, k)
+		}
+	}
+
+	for k, v in relations {
+		if v.dependencies != 0 {
+			append(&cycled, k)
+		}
+	}
+
+	return
+}

+ 7 - 1
core/crypto/README.md

@@ -32,9 +32,11 @@ Please see the chart below for the options.
 
 
 #### High level API
 #### High level API
 Each hash algorithm contains a procedure group named `hash`, or if the algorithm provides more than one digest size `hash_<size>`\*.  
 Each hash algorithm contains a procedure group named `hash`, or if the algorithm provides more than one digest size `hash_<size>`\*.  
-Included in these groups are four procedures.
+Included in these groups are six procedures.
 * `hash_string` - Hash a given string and return the computed hash. Just calls `hash_bytes` internally
 * `hash_string` - Hash a given string and return the computed hash. Just calls `hash_bytes` internally
 * `hash_bytes` - Hash a given byte slice and return the computed hash
 * `hash_bytes` - Hash a given byte slice and return the computed hash
+* `hash_string_to_buffer` - Hash a given string and put the computed hash in the second proc parameter. Just calls `hash_bytes_to_buffer` internally
+* `hash_bytes_to_buffer` - Hash a given string and put the computed hash in the second proc parameter. The destination buffer has to be at least as big as the digest size of the hash
 * `hash_stream` - Takes a stream from io.Stream and returns the computed hash from it
 * `hash_stream` - Takes a stream from io.Stream and returns the computed hash from it
 * `hash_file` - Takes a file handle and returns the computed hash from it. A second optional boolean parameter controls if the file is streamed (this is the default) or read at once (set to true)
 * `hash_file` - Takes a file handle and returns the computed hash from it. A second optional boolean parameter controls if the file is streamed (this is the default) or read at once (set to true)
 
 
@@ -59,6 +61,10 @@ main :: proc() {
     // Compute the hash, using the high level API
     // Compute the hash, using the high level API
     computed_hash := md4.hash(input)
     computed_hash := md4.hash(input)
 
 
+    // Variant that takes a destination buffer, instead of returning the computed hash
+    hash := make([]byte, md4.DIGEST_SIZE) // @note: Destination buffer has to be at least as big as the digest size of the hash
+    md4.hash(input, hash[:])
+
     // Compute the hash, using the low level API
     // Compute the hash, using the low level API
     ctx: md4.Md4_Context
     ctx: md4.Md4_Context
     computed_hash_low: [16]byte
     computed_hash_low: [16]byte

+ 35 - 0
core/crypto/_fiat/README.md

@@ -0,0 +1,35 @@
+# fiat
+
+This package contains low level arithmetic required to implement certain
+cryptographic primitives, ported from the [fiat-crypto project][1]
+along with some higher-level helpers.
+
+## Notes
+
+fiat-crypto gives the choice of 3 licenses for derived works.  The 1-Clause
+BSD license is chosen as it is compatible with Odin's existing licensing.
+
+The routines are intended to be timing-safe, as long as the underlying
+integer arithmetic is constant time.  This is true on most systems commonly
+used today, with the notable exception of WASM.
+
+While fiat-crypto provides both output targeting both 32-bit and 64-bit
+architectures, only the 64-bit versions were used, as 32-bit architectures
+are becoming increasingly uncommon and irrelevant.
+
+With the current Odin syntax, the Go output is trivially ported in most
+cases and was used as the basis of the port.
+
+In the future, it would be better to auto-generate Odin either directly
+by adding an appropriate code-gen backend written in Coq, or perhaps by
+parsing the JSON output.
+
+As this is a port rather than autogenerated output, none of fiat-crypto's
+formal verification guarantees apply, unless it is possible to prove binary
+equivalence.
+
+For the most part, alterations to the base fiat-crypto generated code was
+kept to a minimum, to aid auditability.  This results in a somewhat
+ideosyncratic style, and in some cases minor performance penalties.
+
+[1]: https://github.com/mit-plv/fiat-crypto

+ 24 - 0
core/crypto/_fiat/fiat.odin

@@ -0,0 +1,24 @@
+package fiat
+
+// This package provides various helpers and types common to all of the
+// fiat-crypto derived backends.
+
+// This code only works on a two's complement system.
+#assert((-1 & 3) == 3)
+
+u1 :: distinct u8
+i1 :: distinct i8
+
+cmovznz_u64 :: #force_inline proc "contextless" (arg1: u1, arg2, arg3: u64) -> (out1: u64) {
+	x1 := (u64(arg1) * 0xffffffffffffffff)
+	x2 := ((x1 & arg3) | ((~x1) & arg2))
+	out1 = x2
+	return
+}
+
+cmovznz_u32 :: #force_inline proc "contextless" (arg1: u1, arg2, arg3: u32) -> (out1: u32) {
+	x1 := (u32(arg1) * 0xffffffff)
+	x2 := ((x1 & arg3) | ((~x1) & arg2))
+	out1 = x2
+	return
+}

+ 138 - 0
core/crypto/_fiat/field_curve25519/field.odin

@@ -0,0 +1,138 @@
+package field_curve25519
+
+import "core:crypto"
+import "core:mem"
+
+fe_relax_cast :: #force_inline proc "contextless" (arg1: ^Tight_Field_Element) -> ^Loose_Field_Element {
+	return transmute(^Loose_Field_Element)(arg1)
+}
+
+fe_tighten_cast :: #force_inline proc "contextless" (arg1: ^Loose_Field_Element) -> ^Tight_Field_Element {
+	return transmute(^Tight_Field_Element)(arg1)
+}
+
+fe_from_bytes :: proc "contextless" (out1: ^Tight_Field_Element, arg1: ^[32]byte) {
+	// Ignore the unused bit by copying the input and masking the bit off
+	// prior to deserialization.
+	tmp1: [32]byte = ---
+	copy_slice(tmp1[:], arg1[:])
+	tmp1[31] &= 127
+
+	_fe_from_bytes(out1, &tmp1)
+
+	mem.zero_explicit(&tmp1, size_of(tmp1))
+}
+
+fe_equal :: proc "contextless" (arg1, arg2: ^Tight_Field_Element) -> int {
+	tmp2: [32]byte = ---
+
+	fe_to_bytes(&tmp2, arg2)
+	ret := fe_equal_bytes(arg1, &tmp2)
+
+	mem.zero_explicit(&tmp2, size_of(tmp2))
+
+	return ret
+}
+
+fe_equal_bytes :: proc "contextless" (arg1: ^Tight_Field_Element, arg2: ^[32]byte) -> int {
+	tmp1: [32]byte = ---
+
+	fe_to_bytes(&tmp1, arg1)
+
+	ret := crypto.compare_constant_time(tmp1[:], arg2[:])
+
+	mem.zero_explicit(&tmp1, size_of(tmp1))
+
+	return ret
+}
+
+fe_carry_pow2k :: proc (out1: ^Tight_Field_Element, arg1: ^Loose_Field_Element, arg2: uint) {
+	// Special case: `arg1^(2 * 0) = 1`, though this should never happen.
+	if arg2 == 0 {
+		fe_one(out1)
+		return
+	}
+
+	fe_carry_square(out1, arg1)
+	for _ in 1..<arg2 {
+		fe_carry_square(out1, fe_relax_cast(out1))
+	}
+}
+
+fe_carry_opp :: #force_inline proc "contextless" (out1, arg1: ^Tight_Field_Element) {
+	fe_opp(fe_relax_cast(out1), arg1)
+	fe_carry(out1, fe_relax_cast(out1))
+}
+
+fe_carry_invsqrt :: proc (out1: ^Tight_Field_Element, arg1: ^Loose_Field_Element) -> int {
+	// Inverse square root taken from Monocypher.
+
+	tmp1, tmp2, tmp3: Tight_Field_Element = ---, ---, ---
+
+	// t0 = x^((p-5)/8)
+	// Can be achieved with a simple double & add ladder,
+	// but it would be slower.
+	fe_carry_pow2k(&tmp1, arg1, 1)
+	fe_carry_pow2k(&tmp2, fe_relax_cast(&tmp1), 2)
+	fe_carry_mul(&tmp2, arg1, fe_relax_cast(&tmp2))
+	fe_carry_mul(&tmp1, fe_relax_cast(&tmp1), fe_relax_cast(&tmp2))
+	fe_carry_pow2k(&tmp1, fe_relax_cast(&tmp1), 1)
+	fe_carry_mul(&tmp1, fe_relax_cast(&tmp2), fe_relax_cast(&tmp1))
+	fe_carry_pow2k(&tmp2, fe_relax_cast(&tmp1), 5)
+	fe_carry_mul(&tmp1, fe_relax_cast(&tmp2), fe_relax_cast(&tmp1))
+	fe_carry_pow2k(&tmp2, fe_relax_cast(&tmp1), 10)
+	fe_carry_mul(&tmp2, fe_relax_cast(&tmp2), fe_relax_cast(&tmp1))
+	fe_carry_pow2k(&tmp3, fe_relax_cast(&tmp2), 20)
+	fe_carry_mul(&tmp2, fe_relax_cast(&tmp3), fe_relax_cast(&tmp2))
+	fe_carry_pow2k(&tmp2, fe_relax_cast(&tmp2), 10)
+	fe_carry_mul(&tmp1, fe_relax_cast(&tmp2), fe_relax_cast(&tmp1))
+	fe_carry_pow2k(&tmp2, fe_relax_cast(&tmp1), 50)
+	fe_carry_mul(&tmp2, fe_relax_cast(&tmp2), fe_relax_cast(&tmp1))
+	fe_carry_pow2k(&tmp3, fe_relax_cast(&tmp2), 100)
+	fe_carry_mul(&tmp2, fe_relax_cast(&tmp3), fe_relax_cast(&tmp2))
+	fe_carry_pow2k(&tmp2, fe_relax_cast(&tmp2), 50)
+	fe_carry_mul(&tmp1, fe_relax_cast(&tmp2), fe_relax_cast(&tmp1))
+	fe_carry_pow2k(&tmp1, fe_relax_cast(&tmp1), 2)
+	fe_carry_mul(&tmp1, fe_relax_cast(&tmp1), arg1)
+
+	// quartic = x^((p-1)/4)
+	quartic := &tmp2
+	fe_carry_square(quartic, fe_relax_cast(&tmp1))
+	fe_carry_mul(quartic, fe_relax_cast(quartic), arg1)
+
+	// Serialize quartic once to save on repeated serialization/sanitization.
+	quartic_buf: [32]byte = ---
+	fe_to_bytes(&quartic_buf, quartic)
+	check := &tmp3
+
+	fe_one(check)
+	p1 := fe_equal_bytes(check, &quartic_buf)
+	fe_carry_opp(check, check)
+	m1 := fe_equal_bytes(check, &quartic_buf)
+	fe_carry_opp(check, &SQRT_M1)
+	ms := fe_equal_bytes(check, &quartic_buf)
+
+	// if quartic == -1 or sqrt(-1)
+	// then  isr = x^((p-1)/4) * sqrt(-1)
+	// else  isr = x^((p-1)/4)
+	fe_carry_mul(out1, fe_relax_cast(&tmp1), fe_relax_cast(&SQRT_M1))
+	fe_cond_assign(out1, &tmp1, (m1|ms) ~ 1)
+
+	mem.zero_explicit(&tmp1, size_of(tmp1))
+	mem.zero_explicit(&tmp2, size_of(tmp2))
+	mem.zero_explicit(&tmp3, size_of(tmp3))
+	mem.zero_explicit(&quartic_buf, size_of(quartic_buf))
+
+	return p1 | m1
+}
+
+fe_carry_inv :: proc (out1: ^Tight_Field_Element, arg1: ^Loose_Field_Element) {
+	tmp1: Tight_Field_Element
+
+	fe_carry_square(&tmp1, arg1)
+	_ = fe_carry_invsqrt(&tmp1, fe_relax_cast(&tmp1))
+	fe_carry_square(&tmp1, fe_relax_cast(&tmp1))
+	fe_carry_mul(out1, fe_relax_cast(&tmp1), arg1)
+
+	mem.zero_explicit(&tmp1, size_of(tmp1))
+}

+ 616 - 0
core/crypto/_fiat/field_curve25519/field51.odin

@@ -0,0 +1,616 @@
+// The BSD 1-Clause License (BSD-1-Clause)
+//
+// Copyright (c) 2015-2020 the fiat-crypto authors (see the AUTHORS file)
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     1. Redistributions of source code must retain the above copyright
+//        notice, this list of conditions and the following disclaimer.
+//
+// THIS SOFTWARE IS PROVIDED BY the fiat-crypto authors "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+// THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL Berkeley Software Design,
+// Inc. BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package field_curve25519
+
+// The file provides arithmetic on the field Z/(2^255-19) using
+// unsaturated 64-bit integer arithmetic.  It is derived primarily
+// from the machine generated Golang output from the fiat-crypto project.
+//
+// While the base implementation is provably correct, this implementation
+// makes no such claims as the port and optimizations were done by hand.
+// At some point, it may be worth adding support to fiat-crypto for
+// generating Odin output.
+//
+// TODO:
+//  * When fiat-crypto supports it, using a saturated 64-bit limbs
+//    instead of 51-bit limbs will be faster, though the gains are
+//    minimal unless adcx/adox/mulx are used.
+
+import fiat "core:crypto/_fiat"
+import "core:math/bits"
+
+Loose_Field_Element :: distinct [5]u64
+Tight_Field_Element :: distinct [5]u64
+
+SQRT_M1 := Tight_Field_Element{
+	1718705420411056,
+	234908883556509,
+	2233514472574048,
+	2117202627021982,
+	765476049583133,
+}
+
+_addcarryx_u51 :: #force_inline proc "contextless" (arg1: fiat.u1, arg2, arg3: u64) -> (out1: u64, out2: fiat.u1) {
+	x1 := ((u64(arg1) + arg2) + arg3)
+	x2 := (x1 & 0x7ffffffffffff)
+	x3 := fiat.u1((x1 >> 51))
+	out1 = x2
+	out2 = x3
+	return
+}
+
+_subborrowx_u51 :: #force_inline proc "contextless" (arg1: fiat.u1, arg2, arg3: u64) -> (out1: u64, out2: fiat.u1) {
+	x1 := ((i64(arg2) - i64(arg1)) - i64(arg3))
+	x2 := fiat.i1((x1 >> 51))
+	x3 := (u64(x1) & 0x7ffffffffffff)
+	out1 = x3
+	out2 = (0x0 - fiat.u1(x2))
+	return
+}
+
+fe_carry_mul :: proc (out1: ^Tight_Field_Element, arg1, arg2: ^Loose_Field_Element) {
+	x2, x1 := bits.mul_u64(arg1[4], (arg2[4] * 0x13))
+	x4, x3 := bits.mul_u64(arg1[4], (arg2[3] * 0x13))
+	x6, x5 := bits.mul_u64(arg1[4], (arg2[2] * 0x13))
+	x8, x7 := bits.mul_u64(arg1[4], (arg2[1] * 0x13))
+	x10, x9 := bits.mul_u64(arg1[3], (arg2[4] * 0x13))
+	x12, x11 := bits.mul_u64(arg1[3], (arg2[3] * 0x13))
+	x14, x13 := bits.mul_u64(arg1[3], (arg2[2] * 0x13))
+	x16, x15 := bits.mul_u64(arg1[2], (arg2[4] * 0x13))
+	x18, x17 := bits.mul_u64(arg1[2], (arg2[3] * 0x13))
+	x20, x19 := bits.mul_u64(arg1[1], (arg2[4] * 0x13))
+	x22, x21 := bits.mul_u64(arg1[4], arg2[0])
+	x24, x23 := bits.mul_u64(arg1[3], arg2[1])
+	x26, x25 := bits.mul_u64(arg1[3], arg2[0])
+	x28, x27 := bits.mul_u64(arg1[2], arg2[2])
+	x30, x29 := bits.mul_u64(arg1[2], arg2[1])
+	x32, x31 := bits.mul_u64(arg1[2], arg2[0])
+	x34, x33 := bits.mul_u64(arg1[1], arg2[3])
+	x36, x35 := bits.mul_u64(arg1[1], arg2[2])
+	x38, x37 := bits.mul_u64(arg1[1], arg2[1])
+	x40, x39 := bits.mul_u64(arg1[1], arg2[0])
+	x42, x41 := bits.mul_u64(arg1[0], arg2[4])
+	x44, x43 := bits.mul_u64(arg1[0], arg2[3])
+	x46, x45 := bits.mul_u64(arg1[0], arg2[2])
+	x48, x47 := bits.mul_u64(arg1[0], arg2[1])
+	x50, x49 := bits.mul_u64(arg1[0], arg2[0])
+	x51, x52 := bits.add_u64(x13, x7, u64(0x0))
+	x53, _ := bits.add_u64(x14, x8, u64(fiat.u1(x52)))
+	x55, x56 := bits.add_u64(x17, x51, u64(0x0))
+	x57, _ := bits.add_u64(x18, x53, u64(fiat.u1(x56)))
+	x59, x60 := bits.add_u64(x19, x55, u64(0x0))
+	x61, _ := bits.add_u64(x20, x57, u64(fiat.u1(x60)))
+	x63, x64 := bits.add_u64(x49, x59, u64(0x0))
+	x65, _ := bits.add_u64(x50, x61, u64(fiat.u1(x64)))
+	x67 := ((x63 >> 51) | ((x65 << 13) & 0xffffffffffffffff))
+	x68 := (x63 & 0x7ffffffffffff)
+	x69, x70 := bits.add_u64(x23, x21, u64(0x0))
+	x71, _ := bits.add_u64(x24, x22, u64(fiat.u1(x70)))
+	x73, x74 := bits.add_u64(x27, x69, u64(0x0))
+	x75, _ := bits.add_u64(x28, x71, u64(fiat.u1(x74)))
+	x77, x78 := bits.add_u64(x33, x73, u64(0x0))
+	x79, _ := bits.add_u64(x34, x75, u64(fiat.u1(x78)))
+	x81, x82 := bits.add_u64(x41, x77, u64(0x0))
+	x83, _ := bits.add_u64(x42, x79, u64(fiat.u1(x82)))
+	x85, x86 := bits.add_u64(x25, x1, u64(0x0))
+	x87, _ := bits.add_u64(x26, x2, u64(fiat.u1(x86)))
+	x89, x90 := bits.add_u64(x29, x85, u64(0x0))
+	x91, _ := bits.add_u64(x30, x87, u64(fiat.u1(x90)))
+	x93, x94 := bits.add_u64(x35, x89, u64(0x0))
+	x95, _ := bits.add_u64(x36, x91, u64(fiat.u1(x94)))
+	x97, x98 := bits.add_u64(x43, x93, u64(0x0))
+	x99, _ := bits.add_u64(x44, x95, u64(fiat.u1(x98)))
+	x101, x102 := bits.add_u64(x9, x3, u64(0x0))
+	x103, _ := bits.add_u64(x10, x4, u64(fiat.u1(x102)))
+	x105, x106 := bits.add_u64(x31, x101, u64(0x0))
+	x107, _ := bits.add_u64(x32, x103, u64(fiat.u1(x106)))
+	x109, x110 := bits.add_u64(x37, x105, u64(0x0))
+	x111, _ := bits.add_u64(x38, x107, u64(fiat.u1(x110)))
+	x113, x114 := bits.add_u64(x45, x109, u64(0x0))
+	x115, _ := bits.add_u64(x46, x111, u64(fiat.u1(x114)))
+	x117, x118 := bits.add_u64(x11, x5, u64(0x0))
+	x119, _ := bits.add_u64(x12, x6, u64(fiat.u1(x118)))
+	x121, x122 := bits.add_u64(x15, x117, u64(0x0))
+	x123, _ := bits.add_u64(x16, x119, u64(fiat.u1(x122)))
+	x125, x126 := bits.add_u64(x39, x121, u64(0x0))
+	x127, _ := bits.add_u64(x40, x123, u64(fiat.u1(x126)))
+	x129, x130 := bits.add_u64(x47, x125, u64(0x0))
+	x131, _ := bits.add_u64(x48, x127, u64(fiat.u1(x130)))
+	x133, x134 := bits.add_u64(x67, x129, u64(0x0))
+	x135 := (u64(fiat.u1(x134)) + x131)
+	x136 := ((x133 >> 51) | ((x135 << 13) & 0xffffffffffffffff))
+	x137 := (x133 & 0x7ffffffffffff)
+	x138, x139 := bits.add_u64(x136, x113, u64(0x0))
+	x140 := (u64(fiat.u1(x139)) + x115)
+	x141 := ((x138 >> 51) | ((x140 << 13) & 0xffffffffffffffff))
+	x142 := (x138 & 0x7ffffffffffff)
+	x143, x144 := bits.add_u64(x141, x97, u64(0x0))
+	x145 := (u64(fiat.u1(x144)) + x99)
+	x146 := ((x143 >> 51) | ((x145 << 13) & 0xffffffffffffffff))
+	x147 := (x143 & 0x7ffffffffffff)
+	x148, x149 := bits.add_u64(x146, x81, u64(0x0))
+	x150 := (u64(fiat.u1(x149)) + x83)
+	x151 := ((x148 >> 51) | ((x150 << 13) & 0xffffffffffffffff))
+	x152 := (x148 & 0x7ffffffffffff)
+	x153 := (x151 * 0x13)
+	x154 := (x68 + x153)
+	x155 := (x154 >> 51)
+	x156 := (x154 & 0x7ffffffffffff)
+	x157 := (x155 + x137)
+	x158 := fiat.u1((x157 >> 51))
+	x159 := (x157 & 0x7ffffffffffff)
+	x160 := (u64(x158) + x142)
+	out1[0] = x156
+	out1[1] = x159
+	out1[2] = x160
+	out1[3] = x147
+	out1[4] = x152
+}
+
+fe_carry_square :: proc (out1: ^Tight_Field_Element, arg1: ^Loose_Field_Element) {
+	x1 := (arg1[4] * 0x13)
+	x2 := (x1 * 0x2)
+	x3 := (arg1[4] * 0x2)
+	x4 := (arg1[3] * 0x13)
+	x5 := (x4 * 0x2)
+	x6 := (arg1[3] * 0x2)
+	x7 := (arg1[2] * 0x2)
+	x8 := (arg1[1] * 0x2)
+	x10, x9 := bits.mul_u64(arg1[4], x1)
+	x12, x11 := bits.mul_u64(arg1[3], x2)
+	x14, x13 := bits.mul_u64(arg1[3], x4)
+	x16, x15 := bits.mul_u64(arg1[2], x2)
+	x18, x17 := bits.mul_u64(arg1[2], x5)
+	x20, x19 := bits.mul_u64(arg1[2], arg1[2])
+	x22, x21 := bits.mul_u64(arg1[1], x2)
+	x24, x23 := bits.mul_u64(arg1[1], x6)
+	x26, x25 := bits.mul_u64(arg1[1], x7)
+	x28, x27 := bits.mul_u64(arg1[1], arg1[1])
+	x30, x29 := bits.mul_u64(arg1[0], x3)
+	x32, x31 := bits.mul_u64(arg1[0], x6)
+	x34, x33 := bits.mul_u64(arg1[0], x7)
+	x36, x35 := bits.mul_u64(arg1[0], x8)
+	x38, x37 := bits.mul_u64(arg1[0], arg1[0])
+	x39, x40 := bits.add_u64(x21, x17, u64(0x0))
+	x41, _ := bits.add_u64(x22, x18, u64(fiat.u1(x40)))
+	x43, x44 := bits.add_u64(x37, x39, u64(0x0))
+	x45, _ := bits.add_u64(x38, x41, u64(fiat.u1(x44)))
+	x47 := ((x43 >> 51) | ((x45 << 13) & 0xffffffffffffffff))
+	x48 := (x43 & 0x7ffffffffffff)
+	x49, x50 := bits.add_u64(x23, x19, u64(0x0))
+	x51, _ := bits.add_u64(x24, x20, u64(fiat.u1(x50)))
+	x53, x54 := bits.add_u64(x29, x49, u64(0x0))
+	x55, _ := bits.add_u64(x30, x51, u64(fiat.u1(x54)))
+	x57, x58 := bits.add_u64(x25, x9, u64(0x0))
+	x59, _ := bits.add_u64(x26, x10, u64(fiat.u1(x58)))
+	x61, x62 := bits.add_u64(x31, x57, u64(0x0))
+	x63, _ := bits.add_u64(x32, x59, u64(fiat.u1(x62)))
+	x65, x66 := bits.add_u64(x27, x11, u64(0x0))
+	x67, _ := bits.add_u64(x28, x12, u64(fiat.u1(x66)))
+	x69, x70 := bits.add_u64(x33, x65, u64(0x0))
+	x71, _ := bits.add_u64(x34, x67, u64(fiat.u1(x70)))
+	x73, x74 := bits.add_u64(x15, x13, u64(0x0))
+	x75, _ := bits.add_u64(x16, x14, u64(fiat.u1(x74)))
+	x77, x78 := bits.add_u64(x35, x73, u64(0x0))
+	x79, _ := bits.add_u64(x36, x75, u64(fiat.u1(x78)))
+	x81, x82 := bits.add_u64(x47, x77, u64(0x0))
+	x83 := (u64(fiat.u1(x82)) + x79)
+	x84 := ((x81 >> 51) | ((x83 << 13) & 0xffffffffffffffff))
+	x85 := (x81 & 0x7ffffffffffff)
+	x86, x87 := bits.add_u64(x84, x69, u64(0x0))
+	x88 := (u64(fiat.u1(x87)) + x71)
+	x89 := ((x86 >> 51) | ((x88 << 13) & 0xffffffffffffffff))
+	x90 := (x86 & 0x7ffffffffffff)
+	x91, x92 := bits.add_u64(x89, x61, u64(0x0))
+	x93 := (u64(fiat.u1(x92)) + x63)
+	x94 := ((x91 >> 51) | ((x93 << 13) & 0xffffffffffffffff))
+	x95 := (x91 & 0x7ffffffffffff)
+	x96, x97 := bits.add_u64(x94, x53, u64(0x0))
+	x98 := (u64(fiat.u1(x97)) + x55)
+	x99 := ((x96 >> 51) | ((x98 << 13) & 0xffffffffffffffff))
+	x100 := (x96 & 0x7ffffffffffff)
+	x101 := (x99 * 0x13)
+	x102 := (x48 + x101)
+	x103 := (x102 >> 51)
+	x104 := (x102 & 0x7ffffffffffff)
+	x105 := (x103 + x85)
+	x106 := fiat.u1((x105 >> 51))
+	x107 := (x105 & 0x7ffffffffffff)
+	x108 := (u64(x106) + x90)
+	out1[0] = x104
+	out1[1] = x107
+	out1[2] = x108
+	out1[3] = x95
+	out1[4] = x100
+}
+
+fe_carry :: proc "contextless" (out1: ^Tight_Field_Element, arg1: ^Loose_Field_Element) {
+	x1 := arg1[0]
+	x2 := ((x1 >> 51) + arg1[1])
+	x3 := ((x2 >> 51) + arg1[2])
+	x4 := ((x3 >> 51) + arg1[3])
+	x5 := ((x4 >> 51) + arg1[4])
+	x6 := ((x1 & 0x7ffffffffffff) + ((x5 >> 51) * 0x13))
+	x7 := (u64(fiat.u1((x6 >> 51))) + (x2 & 0x7ffffffffffff))
+	x8 := (x6 & 0x7ffffffffffff)
+	x9 := (x7 & 0x7ffffffffffff)
+	x10 := (u64(fiat.u1((x7 >> 51))) + (x3 & 0x7ffffffffffff))
+	x11 := (x4 & 0x7ffffffffffff)
+	x12 := (x5 & 0x7ffffffffffff)
+	out1[0] = x8
+	out1[1] = x9
+	out1[2] = x10
+	out1[3] = x11
+	out1[4] = x12
+}
+
+fe_add :: proc "contextless" (out1: ^Loose_Field_Element, arg1, arg2: ^Tight_Field_Element) {
+	x1 := (arg1[0] + arg2[0])
+	x2 := (arg1[1] + arg2[1])
+	x3 := (arg1[2] + arg2[2])
+	x4 := (arg1[3] + arg2[3])
+	x5 := (arg1[4] + arg2[4])
+	out1[0] = x1
+	out1[1] = x2
+	out1[2] = x3
+	out1[3] = x4
+	out1[4] = x5
+}
+
+fe_sub :: proc "contextless" (out1: ^Loose_Field_Element, arg1, arg2: ^Tight_Field_Element) {
+	x1 := ((0xfffffffffffda + arg1[0]) - arg2[0])
+	x2 := ((0xffffffffffffe + arg1[1]) - arg2[1])
+	x3 := ((0xffffffffffffe + arg1[2]) - arg2[2])
+	x4 := ((0xffffffffffffe + arg1[3]) - arg2[3])
+	x5 := ((0xffffffffffffe + arg1[4]) - arg2[4])
+	out1[0] = x1
+	out1[1] = x2
+	out1[2] = x3
+	out1[3] = x4
+	out1[4] = x5
+}
+
+fe_opp :: proc "contextless" (out1: ^Loose_Field_Element, arg1: ^Tight_Field_Element) {
+	x1 := (0xfffffffffffda - arg1[0])
+	x2 := (0xffffffffffffe - arg1[1])
+	x3 := (0xffffffffffffe - arg1[2])
+	x4 := (0xffffffffffffe - arg1[3])
+	x5 := (0xffffffffffffe - arg1[4])
+	out1[0] = x1
+	out1[1] = x2
+	out1[2] = x3
+	out1[3] = x4
+	out1[4] = x5
+}
+
+fe_cond_assign :: proc "contextless" (out1, arg1: ^Tight_Field_Element, arg2: int) {
+	x1 := fiat.cmovznz_u64(fiat.u1(arg2), out1[0], arg1[0])
+	x2 := fiat.cmovznz_u64(fiat.u1(arg2), out1[1], arg1[1])
+	x3 := fiat.cmovznz_u64(fiat.u1(arg2), out1[2], arg1[2])
+	x4 := fiat.cmovznz_u64(fiat.u1(arg2), out1[3], arg1[3])
+	x5 := fiat.cmovznz_u64(fiat.u1(arg2), out1[4], arg1[4])
+	out1[0] = x1
+	out1[1] = x2
+	out1[2] = x3
+	out1[3] = x4
+	out1[4] = x5
+}
+
+fe_to_bytes :: proc "contextless" (out1: ^[32]byte, arg1: ^Tight_Field_Element) {
+	x1, x2 := _subborrowx_u51(0x0, arg1[0], 0x7ffffffffffed)
+	x3, x4 := _subborrowx_u51(x2, arg1[1], 0x7ffffffffffff)
+	x5, x6 := _subborrowx_u51(x4, arg1[2], 0x7ffffffffffff)
+	x7, x8 := _subborrowx_u51(x6, arg1[3], 0x7ffffffffffff)
+	x9, x10 := _subborrowx_u51(x8, arg1[4], 0x7ffffffffffff)
+	x11 := fiat.cmovznz_u64(x10, u64(0x0), 0xffffffffffffffff)
+	x12, x13 := _addcarryx_u51(0x0, x1, (x11 & 0x7ffffffffffed))
+	x14, x15 := _addcarryx_u51(x13, x3, (x11 & 0x7ffffffffffff))
+	x16, x17 := _addcarryx_u51(x15, x5, (x11 & 0x7ffffffffffff))
+	x18, x19 := _addcarryx_u51(x17, x7, (x11 & 0x7ffffffffffff))
+	x20, _ := _addcarryx_u51(x19, x9, (x11 & 0x7ffffffffffff))
+	x22 := (x20 << 4)
+	x23 := (x18 * u64(0x2))
+	x24 := (x16 << 6)
+	x25 := (x14 << 3)
+	x26 := (u8(x12) & 0xff)
+	x27 := (x12 >> 8)
+	x28 := (u8(x27) & 0xff)
+	x29 := (x27 >> 8)
+	x30 := (u8(x29) & 0xff)
+	x31 := (x29 >> 8)
+	x32 := (u8(x31) & 0xff)
+	x33 := (x31 >> 8)
+	x34 := (u8(x33) & 0xff)
+	x35 := (x33 >> 8)
+	x36 := (u8(x35) & 0xff)
+	x37 := u8((x35 >> 8))
+	x38 := (x25 + u64(x37))
+	x39 := (u8(x38) & 0xff)
+	x40 := (x38 >> 8)
+	x41 := (u8(x40) & 0xff)
+	x42 := (x40 >> 8)
+	x43 := (u8(x42) & 0xff)
+	x44 := (x42 >> 8)
+	x45 := (u8(x44) & 0xff)
+	x46 := (x44 >> 8)
+	x47 := (u8(x46) & 0xff)
+	x48 := (x46 >> 8)
+	x49 := (u8(x48) & 0xff)
+	x50 := u8((x48 >> 8))
+	x51 := (x24 + u64(x50))
+	x52 := (u8(x51) & 0xff)
+	x53 := (x51 >> 8)
+	x54 := (u8(x53) & 0xff)
+	x55 := (x53 >> 8)
+	x56 := (u8(x55) & 0xff)
+	x57 := (x55 >> 8)
+	x58 := (u8(x57) & 0xff)
+	x59 := (x57 >> 8)
+	x60 := (u8(x59) & 0xff)
+	x61 := (x59 >> 8)
+	x62 := (u8(x61) & 0xff)
+	x63 := (x61 >> 8)
+	x64 := (u8(x63) & 0xff)
+	x65 := fiat.u1((x63 >> 8))
+	x66 := (x23 + u64(x65))
+	x67 := (u8(x66) & 0xff)
+	x68 := (x66 >> 8)
+	x69 := (u8(x68) & 0xff)
+	x70 := (x68 >> 8)
+	x71 := (u8(x70) & 0xff)
+	x72 := (x70 >> 8)
+	x73 := (u8(x72) & 0xff)
+	x74 := (x72 >> 8)
+	x75 := (u8(x74) & 0xff)
+	x76 := (x74 >> 8)
+	x77 := (u8(x76) & 0xff)
+	x78 := u8((x76 >> 8))
+	x79 := (x22 + u64(x78))
+	x80 := (u8(x79) & 0xff)
+	x81 := (x79 >> 8)
+	x82 := (u8(x81) & 0xff)
+	x83 := (x81 >> 8)
+	x84 := (u8(x83) & 0xff)
+	x85 := (x83 >> 8)
+	x86 := (u8(x85) & 0xff)
+	x87 := (x85 >> 8)
+	x88 := (u8(x87) & 0xff)
+	x89 := (x87 >> 8)
+	x90 := (u8(x89) & 0xff)
+	x91 := u8((x89 >> 8))
+	out1[0] = x26
+	out1[1] = x28
+	out1[2] = x30
+	out1[3] = x32
+	out1[4] = x34
+	out1[5] = x36
+	out1[6] = x39
+	out1[7] = x41
+	out1[8] = x43
+	out1[9] = x45
+	out1[10] = x47
+	out1[11] = x49
+	out1[12] = x52
+	out1[13] = x54
+	out1[14] = x56
+	out1[15] = x58
+	out1[16] = x60
+	out1[17] = x62
+	out1[18] = x64
+	out1[19] = x67
+	out1[20] = x69
+	out1[21] = x71
+	out1[22] = x73
+	out1[23] = x75
+	out1[24] = x77
+	out1[25] = x80
+	out1[26] = x82
+	out1[27] = x84
+	out1[28] = x86
+	out1[29] = x88
+	out1[30] = x90
+	out1[31] = x91
+}
+
+_fe_from_bytes :: proc "contextless" (out1: ^Tight_Field_Element, arg1: ^[32]byte) {
+	x1 := (u64(arg1[31]) << 44)
+	x2 := (u64(arg1[30]) << 36)
+	x3 := (u64(arg1[29]) << 28)
+	x4 := (u64(arg1[28]) << 20)
+	x5 := (u64(arg1[27]) << 12)
+	x6 := (u64(arg1[26]) << 4)
+	x7 := (u64(arg1[25]) << 47)
+	x8 := (u64(arg1[24]) << 39)
+	x9 := (u64(arg1[23]) << 31)
+	x10 := (u64(arg1[22]) << 23)
+	x11 := (u64(arg1[21]) << 15)
+	x12 := (u64(arg1[20]) << 7)
+	x13 := (u64(arg1[19]) << 50)
+	x14 := (u64(arg1[18]) << 42)
+	x15 := (u64(arg1[17]) << 34)
+	x16 := (u64(arg1[16]) << 26)
+	x17 := (u64(arg1[15]) << 18)
+	x18 := (u64(arg1[14]) << 10)
+	x19 := (u64(arg1[13]) << 2)
+	x20 := (u64(arg1[12]) << 45)
+	x21 := (u64(arg1[11]) << 37)
+	x22 := (u64(arg1[10]) << 29)
+	x23 := (u64(arg1[9]) << 21)
+	x24 := (u64(arg1[8]) << 13)
+	x25 := (u64(arg1[7]) << 5)
+	x26 := (u64(arg1[6]) << 48)
+	x27 := (u64(arg1[5]) << 40)
+	x28 := (u64(arg1[4]) << 32)
+	x29 := (u64(arg1[3]) << 24)
+	x30 := (u64(arg1[2]) << 16)
+	x31 := (u64(arg1[1]) << 8)
+	x32 := arg1[0]
+	x33 := (x31 + u64(x32))
+	x34 := (x30 + x33)
+	x35 := (x29 + x34)
+	x36 := (x28 + x35)
+	x37 := (x27 + x36)
+	x38 := (x26 + x37)
+	x39 := (x38 & 0x7ffffffffffff)
+	x40 := u8((x38 >> 51))
+	x41 := (x25 + u64(x40))
+	x42 := (x24 + x41)
+	x43 := (x23 + x42)
+	x44 := (x22 + x43)
+	x45 := (x21 + x44)
+	x46 := (x20 + x45)
+	x47 := (x46 & 0x7ffffffffffff)
+	x48 := u8((x46 >> 51))
+	x49 := (x19 + u64(x48))
+	x50 := (x18 + x49)
+	x51 := (x17 + x50)
+	x52 := (x16 + x51)
+	x53 := (x15 + x52)
+	x54 := (x14 + x53)
+	x55 := (x13 + x54)
+	x56 := (x55 & 0x7ffffffffffff)
+	x57 := u8((x55 >> 51))
+	x58 := (x12 + u64(x57))
+	x59 := (x11 + x58)
+	x60 := (x10 + x59)
+	x61 := (x9 + x60)
+	x62 := (x8 + x61)
+	x63 := (x7 + x62)
+	x64 := (x63 & 0x7ffffffffffff)
+	x65 := u8((x63 >> 51))
+	x66 := (x6 + u64(x65))
+	x67 := (x5 + x66)
+	x68 := (x4 + x67)
+	x69 := (x3 + x68)
+	x70 := (x2 + x69)
+	x71 := (x1 + x70)
+	out1[0] = x39
+	out1[1] = x47
+	out1[2] = x56
+	out1[3] = x64
+	out1[4] = x71
+}
+
+fe_relax :: proc "contextless" (out1: ^Loose_Field_Element, arg1: ^Tight_Field_Element) {
+	x1 := arg1[0]
+	x2 := arg1[1]
+	x3 := arg1[2]
+	x4 := arg1[3]
+	x5 := arg1[4]
+	out1[0] = x1
+	out1[1] = x2
+	out1[2] = x3
+	out1[3] = x4
+	out1[4] = x5
+}
+
+fe_carry_scmul_121666 :: proc (out1: ^Tight_Field_Element, arg1: ^Loose_Field_Element) {
+	x2, x1 := bits.mul_u64(0x1db42, arg1[4])
+	x4, x3 := bits.mul_u64(0x1db42, arg1[3])
+	x6, x5 := bits.mul_u64(0x1db42, arg1[2])
+	x8, x7 := bits.mul_u64(0x1db42, arg1[1])
+	x10, x9 := bits.mul_u64(0x1db42, arg1[0])
+	x11 := ((x9 >> 51) | ((x10 << 13) & 0xffffffffffffffff))
+	x12 := (x9 & 0x7ffffffffffff)
+	x13, x14 := bits.add_u64(x11, x7, u64(0x0))
+	x15 := (u64(fiat.u1(x14)) + x8)
+	x16 := ((x13 >> 51) | ((x15 << 13) & 0xffffffffffffffff))
+	x17 := (x13 & 0x7ffffffffffff)
+	x18, x19 := bits.add_u64(x16, x5, u64(0x0))
+	x20 := (u64(fiat.u1(x19)) + x6)
+	x21 := ((x18 >> 51) | ((x20 << 13) & 0xffffffffffffffff))
+	x22 := (x18 & 0x7ffffffffffff)
+	x23, x24 := bits.add_u64(x21, x3, u64(0x0))
+	x25 := (u64(fiat.u1(x24)) + x4)
+	x26 := ((x23 >> 51) | ((x25 << 13) & 0xffffffffffffffff))
+	x27 := (x23 & 0x7ffffffffffff)
+	x28, x29 := bits.add_u64(x26, x1, u64(0x0))
+	x30 := (u64(fiat.u1(x29)) + x2)
+	x31 := ((x28 >> 51) | ((x30 << 13) & 0xffffffffffffffff))
+	x32 := (x28 & 0x7ffffffffffff)
+	x33 := (x31 * 0x13)
+	x34 := (x12 + x33)
+	x35 := fiat.u1((x34 >> 51))
+	x36 := (x34 & 0x7ffffffffffff)
+	x37 := (u64(x35) + x17)
+	x38 := fiat.u1((x37 >> 51))
+	x39 := (x37 & 0x7ffffffffffff)
+	x40 := (u64(x38) + x22)
+	out1[0] = x36
+	out1[1] = x39
+	out1[2] = x40
+	out1[3] = x27
+	out1[4] = x32
+}
+
+// The following routines were added by hand, and do not come from fiat-crypto.
+
+fe_zero :: proc "contextless" (out1: ^Tight_Field_Element) {
+	out1[0] = 0
+	out1[1] = 0
+	out1[2] = 0
+	out1[3] = 0
+	out1[4] = 0
+}
+
+fe_one :: proc "contextless" (out1: ^Tight_Field_Element) {
+	out1[0] = 1
+	out1[1] = 0
+	out1[2] = 0
+	out1[3] = 0
+	out1[4] = 0
+}
+
+fe_set :: proc "contextless" (out1, arg1: ^Tight_Field_Element) {
+	x1 := arg1[0]
+	x2 := arg1[1]
+	x3 := arg1[2]
+	x4 := arg1[3]
+	x5 := arg1[4]
+	out1[0] = x1
+	out1[1] = x2
+	out1[2] = x3
+	out1[3] = x4
+	out1[4] = x5
+}
+
+fe_cond_swap :: proc "contextless" (out1, out2: ^Tight_Field_Element, arg1: int) {
+	mask := -u64(arg1)
+	x := (out1[0] ~ out2[0]) & mask
+	x1, y1 := out1[0] ~ x, out2[0] ~ x
+	x = (out1[1] ~ out2[1]) & mask
+	x2, y2 := out1[1] ~ x, out2[1] ~ x
+	x = (out1[2] ~ out2[2]) & mask
+	x3, y3 := out1[2] ~ x, out2[2] ~ x
+	x = (out1[3] ~ out2[3]) & mask
+	x4, y4 := out1[3] ~ x, out2[3] ~ x
+	x = (out1[4] ~ out2[4]) & mask
+	x5, y5 := out1[4] ~ x, out2[4] ~ x
+	out1[0], out2[0] = x1, y1
+	out1[1], out2[1] = x2, y2
+	out1[2], out2[2] = x3, y3
+	out1[3], out2[3] = x4, y4
+	out1[4], out2[4] = x5, y5
+}

+ 66 - 0
core/crypto/_fiat/field_poly1305/field.odin

@@ -0,0 +1,66 @@
+package field_poly1305
+
+import "core:crypto/util"
+import "core:mem"
+
+fe_relax_cast :: #force_inline proc "contextless" (arg1: ^Tight_Field_Element) -> ^Loose_Field_Element {
+	return transmute(^Loose_Field_Element)(arg1)
+}
+
+fe_tighten_cast :: #force_inline proc "contextless" (arg1: ^Loose_Field_Element) -> ^Tight_Field_Element {
+	return transmute(^Tight_Field_Element)(arg1)
+}
+
+fe_from_bytes :: #force_inline proc (out1: ^Tight_Field_Element, arg1: []byte, arg2: byte, sanitize: bool = true) {
+	// fiat-crypto's deserialization routine effectively processes a
+	// single byte at a time, and wants 256-bits of input for a value
+	// that will be 128-bits or 129-bits.
+	//
+	// This is somewhat cumbersome to use, so at a minimum a wrapper
+	// makes implementing the actual MAC block processing considerably
+	// neater.
+
+	assert(len(arg1) == 16)
+
+	when ODIN_ARCH == .i386 || ODIN_ARCH == .amd64 {
+		// While it may be unwise to do deserialization here on our
+		// own when fiat-crypto provides equivalent functionality,
+		// doing it this way provides a little under 3x performance
+		// improvement when optimization is enabled.
+		src_p := transmute(^[2]u64)(&arg1[0])
+		lo := src_p[0]
+		hi := src_p[1]
+
+		// This is inspired by poly1305-donna, though adjustments were
+		// made since a Tight_Field_Element's limbs are 44-bits, 43-bits,
+		// and 43-bits wide.
+		//
+		// Note: This could be transplated into fe_from_u64s, but that
+		// code is called once per MAC, and is non-criticial path.
+		hibit := u64(arg2) << 41 // arg2 << 128
+		out1[0] = lo & 0xfffffffffff
+		out1[1] = ((lo >> 44) | (hi << 20)) & 0x7ffffffffff
+		out1[2] = ((hi >> 23) & 0x7ffffffffff) | hibit
+	} else {
+		tmp: [32]byte
+		copy_slice(tmp[0:16], arg1[:])
+		tmp[16] = arg2
+
+		_fe_from_bytes(out1, &tmp)
+		if sanitize {
+			// This is used to deserialize `s` which is confidential.
+			mem.zero_explicit(&tmp, size_of(tmp))
+		}
+	}
+}
+
+fe_from_u64s :: proc "contextless" (out1: ^Tight_Field_Element, lo, hi: u64) {
+	tmp: [32]byte
+	util.PUT_U64_LE(tmp[0:8], lo)
+	util.PUT_U64_LE(tmp[8:16], hi)
+
+	_fe_from_bytes(out1, &tmp)
+
+	// This routine is only used to deserialize `r` which is confidential.
+	mem.zero_explicit(&tmp, size_of(tmp))
+}

+ 356 - 0
core/crypto/_fiat/field_poly1305/field4344.odin

@@ -0,0 +1,356 @@
+// The BSD 1-Clause License (BSD-1-Clause)
+//
+// Copyright (c) 2015-2020 the fiat-crypto authors (see the AUTHORS file)
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     1. Redistributions of source code must retain the above copyright
+//        notice, this list of conditions and the following disclaimer.
+//
+// THIS SOFTWARE IS PROVIDED BY the fiat-crypto authors "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+// THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL Berkeley Software Design,
+// Inc. BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package field_poly1305
+
+// This file provides arithmetic on the field Z/(2^130 - 5) using
+// unsaturated 64-bit integer arithmetic.  It is derived primarily
+// from the machine generate Golang output from the fiat-crypto project.
+//
+// While the base implementation is provably correct, this implementation
+// makes no such claims as the port and optimizations were done by hand.
+// At some point, it may be worth adding support to fiat-crypto for
+// generating Odin output.
+
+import fiat "core:crypto/_fiat"
+import "core:math/bits"
+
+Loose_Field_Element :: distinct [3]u64
+Tight_Field_Element :: distinct [3]u64
+
+_addcarryx_u44 :: #force_inline proc "contextless" (arg1: fiat.u1, arg2, arg3: u64) -> (out1: u64, out2: fiat.u1) {
+	x1 := ((u64(arg1) + arg2) + arg3)
+	x2 := (x1 & 0xfffffffffff)
+	x3 := fiat.u1((x1 >> 44))
+	out1 = x2
+	out2 = x3
+	return
+}
+
+_subborrowx_u44 :: #force_inline proc "contextless" (arg1: fiat.u1, arg2, arg3: u64) -> (out1: u64, out2: fiat.u1) {
+	x1 := ((i64(arg2) - i64(arg1)) - i64(arg3))
+	x2 := fiat.i1((x1 >> 44))
+	x3 := (u64(x1) & 0xfffffffffff)
+	out1 = x3
+	out2 = (0x0 - fiat.u1(x2))
+	return
+}
+
+_addcarryx_u43 :: #force_inline proc "contextless" (arg1: fiat.u1, arg2, arg3: u64) -> (out1: u64, out2: fiat.u1) {
+	x1 := ((u64(arg1) + arg2) + arg3)
+	x2 := (x1 & 0x7ffffffffff)
+	x3 := fiat.u1((x1 >> 43))
+	out1 = x2
+	out2 = x3
+	return
+}
+
+_subborrowx_u43 :: #force_inline proc "contextless" (arg1: fiat.u1, arg2, arg3: u64) -> (out1: u64, out2: fiat.u1) {
+	x1 := ((i64(arg2) - i64(arg1)) - i64(arg3))
+	x2 := fiat.i1((x1 >> 43))
+	x3 := (u64(x1) & 0x7ffffffffff)
+	out1 = x3
+	out2 = (0x0 - fiat.u1(x2))
+	return
+}
+
+fe_carry_mul :: proc (out1: ^Tight_Field_Element, arg1, arg2: ^Loose_Field_Element) {
+	x2, x1 := bits.mul_u64(arg1[2], (arg2[2] * 0x5))
+	x4, x3 := bits.mul_u64(arg1[2], (arg2[1] * 0xa))
+	x6, x5 := bits.mul_u64(arg1[1], (arg2[2] * 0xa))
+	x8, x7 := bits.mul_u64(arg1[2], arg2[0])
+	x10, x9 := bits.mul_u64(arg1[1], (arg2[1] * 0x2))
+	x12, x11 := bits.mul_u64(arg1[1], arg2[0])
+	x14, x13 := bits.mul_u64(arg1[0], arg2[2])
+	x16, x15 := bits.mul_u64(arg1[0], arg2[1])
+	x18, x17 := bits.mul_u64(arg1[0], arg2[0])
+	x19, x20 := bits.add_u64(x5, x3, u64(0x0))
+	x21, _ := bits.add_u64(x6, x4, u64(fiat.u1(x20)))
+	x23, x24 := bits.add_u64(x17, x19, u64(0x0))
+	x25, _ := bits.add_u64(x18, x21, u64(fiat.u1(x24)))
+	x27 := ((x23 >> 44) | ((x25 << 20) & 0xffffffffffffffff))
+	x28 := (x23 & 0xfffffffffff)
+	x29, x30 := bits.add_u64(x9, x7, u64(0x0))
+	x31, _ := bits.add_u64(x10, x8, u64(fiat.u1(x30)))
+	x33, x34 := bits.add_u64(x13, x29, u64(0x0))
+	x35, _ := bits.add_u64(x14, x31, u64(fiat.u1(x34)))
+	x37, x38 := bits.add_u64(x11, x1, u64(0x0))
+	x39, _ := bits.add_u64(x12, x2, u64(fiat.u1(x38)))
+	x41, x42 := bits.add_u64(x15, x37, u64(0x0))
+	x43, _ := bits.add_u64(x16, x39, u64(fiat.u1(x42)))
+	x45, x46 := bits.add_u64(x27, x41, u64(0x0))
+	x47 := (u64(fiat.u1(x46)) + x43)
+	x48 := ((x45 >> 43) | ((x47 << 21) & 0xffffffffffffffff))
+	x49 := (x45 & 0x7ffffffffff)
+	x50, x51 := bits.add_u64(x48, x33, u64(0x0))
+	x52 := (u64(fiat.u1(x51)) + x35)
+	x53 := ((x50 >> 43) | ((x52 << 21) & 0xffffffffffffffff))
+	x54 := (x50 & 0x7ffffffffff)
+	x55 := (x53 * 0x5)
+	x56 := (x28 + x55)
+	x57 := (x56 >> 44)
+	x58 := (x56 & 0xfffffffffff)
+	x59 := (x57 + x49)
+	x60 := fiat.u1((x59 >> 43))
+	x61 := (x59 & 0x7ffffffffff)
+	x62 := (u64(x60) + x54)
+	out1[0] = x58
+	out1[1] = x61
+	out1[2] = x62
+}
+
+fe_carry_square :: proc (out1: ^Tight_Field_Element, arg1: ^Loose_Field_Element) {
+	x1 := (arg1[2] * 0x5)
+	x2 := (x1 * 0x2)
+	x3 := (arg1[2] * 0x2)
+	x4 := (arg1[1] * 0x2)
+	x6, x5 := bits.mul_u64(arg1[2], x1)
+	x8, x7 := bits.mul_u64(arg1[1], (x2 * 0x2))
+	x10, x9 := bits.mul_u64(arg1[1], (arg1[1] * 0x2))
+	x12, x11 := bits.mul_u64(arg1[0], x3)
+	x14, x13 := bits.mul_u64(arg1[0], x4)
+	x16, x15 := bits.mul_u64(arg1[0], arg1[0])
+	x17, x18 := bits.add_u64(x15, x7, u64(0x0))
+	x19, _ := bits.add_u64(x16, x8, u64(fiat.u1(x18)))
+	x21 := ((x17 >> 44) | ((x19 << 20) & 0xffffffffffffffff))
+	x22 := (x17 & 0xfffffffffff)
+	x23, x24 := bits.add_u64(x11, x9, u64(0x0))
+	x25, _ := bits.add_u64(x12, x10, u64(fiat.u1(x24)))
+	x27, x28 := bits.add_u64(x13, x5, u64(0x0))
+	x29, _ := bits.add_u64(x14, x6, u64(fiat.u1(x28)))
+	x31, x32 := bits.add_u64(x21, x27, u64(0x0))
+	x33 := (u64(fiat.u1(x32)) + x29)
+	x34 := ((x31 >> 43) | ((x33 << 21) & 0xffffffffffffffff))
+	x35 := (x31 & 0x7ffffffffff)
+	x36, x37 := bits.add_u64(x34, x23, u64(0x0))
+	x38 := (u64(fiat.u1(x37)) + x25)
+	x39 := ((x36 >> 43) | ((x38 << 21) & 0xffffffffffffffff))
+	x40 := (x36 & 0x7ffffffffff)
+	x41 := (x39 * 0x5)
+	x42 := (x22 + x41)
+	x43 := (x42 >> 44)
+	x44 := (x42 & 0xfffffffffff)
+	x45 := (x43 + x35)
+	x46 := fiat.u1((x45 >> 43))
+	x47 := (x45 & 0x7ffffffffff)
+	x48 := (u64(x46) + x40)
+	out1[0] = x44
+	out1[1] = x47
+	out1[2] = x48
+}
+
+fe_carry :: proc "contextless" (out1: ^Tight_Field_Element, arg1: ^Loose_Field_Element) {
+	x1 := arg1[0]
+	x2 := ((x1 >> 44) + arg1[1])
+	x3 := ((x2 >> 43) + arg1[2])
+	x4 := ((x1 & 0xfffffffffff) + ((x3 >> 43) * 0x5))
+	x5 := (u64(fiat.u1((x4 >> 44))) + (x2 & 0x7ffffffffff))
+	x6 := (x4 & 0xfffffffffff)
+	x7 := (x5 & 0x7ffffffffff)
+	x8 := (u64(fiat.u1((x5 >> 43))) + (x3 & 0x7ffffffffff))
+	out1[0] = x6
+	out1[1] = x7
+	out1[2] = x8
+}
+
+fe_add :: proc "contextless" (out1: ^Loose_Field_Element, arg1, arg2: ^Tight_Field_Element) {
+	x1 := (arg1[0] + arg2[0])
+	x2 := (arg1[1] + arg2[1])
+	x3 := (arg1[2] + arg2[2])
+	out1[0] = x1
+	out1[1] = x2
+	out1[2] = x3
+}
+
+fe_sub :: proc "contextless" (out1: ^Loose_Field_Element, arg1, arg2: ^Tight_Field_Element) {
+	x1 := ((0x1ffffffffff6 + arg1[0]) - arg2[0])
+	x2 := ((0xffffffffffe + arg1[1]) - arg2[1])
+	x3 := ((0xffffffffffe + arg1[2]) - arg2[2])
+	out1[0] = x1
+	out1[1] = x2
+	out1[2] = x3
+}
+
+fe_opp :: proc "contextless" (out1: ^Loose_Field_Element, arg1: ^Tight_Field_Element) {
+	x1 := (0x1ffffffffff6 - arg1[0])
+	x2 := (0xffffffffffe - arg1[1])
+	x3 := (0xffffffffffe - arg1[2])
+	out1[0] = x1
+	out1[1] = x2
+	out1[2] = x3
+}
+
+fe_cond_assign :: proc "contextless" (out1, arg1: ^Tight_Field_Element, arg2: bool) {
+	x1 := fiat.cmovznz_u64(fiat.u1(arg2), out1[0], arg1[0])
+	x2 := fiat.cmovznz_u64(fiat.u1(arg2), out1[1], arg1[1])
+	x3 := fiat.cmovznz_u64(fiat.u1(arg2), out1[2], arg1[2])
+	out1[0] = x1
+	out1[1] = x2
+	out1[2] = x3
+}
+
+fe_to_bytes :: proc "contextless" (out1: ^[32]byte, arg1: ^Tight_Field_Element) {
+	x1, x2 := _subborrowx_u44(0x0, arg1[0], 0xffffffffffb)
+	x3, x4 := _subborrowx_u43(x2, arg1[1], 0x7ffffffffff)
+	x5, x6 := _subborrowx_u43(x4, arg1[2], 0x7ffffffffff)
+	x7 := fiat.cmovznz_u64(x6, u64(0x0), 0xffffffffffffffff)
+	x8, x9 := _addcarryx_u44(0x0, x1, (x7 & 0xffffffffffb))
+	x10, x11 := _addcarryx_u43(x9, x3, (x7 & 0x7ffffffffff))
+	x12, _ := _addcarryx_u43(x11, x5, (x7 & 0x7ffffffffff))
+	x14 := (x12 << 7)
+	x15 := (x10 << 4)
+	x16 := (u8(x8) & 0xff)
+	x17 := (x8 >> 8)
+	x18 := (u8(x17) & 0xff)
+	x19 := (x17 >> 8)
+	x20 := (u8(x19) & 0xff)
+	x21 := (x19 >> 8)
+	x22 := (u8(x21) & 0xff)
+	x23 := (x21 >> 8)
+	x24 := (u8(x23) & 0xff)
+	x25 := u8((x23 >> 8))
+	x26 := (x15 + u64(x25))
+	x27 := (u8(x26) & 0xff)
+	x28 := (x26 >> 8)
+	x29 := (u8(x28) & 0xff)
+	x30 := (x28 >> 8)
+	x31 := (u8(x30) & 0xff)
+	x32 := (x30 >> 8)
+	x33 := (u8(x32) & 0xff)
+	x34 := (x32 >> 8)
+	x35 := (u8(x34) & 0xff)
+	x36 := u8((x34 >> 8))
+	x37 := (x14 + u64(x36))
+	x38 := (u8(x37) & 0xff)
+	x39 := (x37 >> 8)
+	x40 := (u8(x39) & 0xff)
+	x41 := (x39 >> 8)
+	x42 := (u8(x41) & 0xff)
+	x43 := (x41 >> 8)
+	x44 := (u8(x43) & 0xff)
+	x45 := (x43 >> 8)
+	x46 := (u8(x45) & 0xff)
+	x47 := (x45 >> 8)
+	x48 := (u8(x47) & 0xff)
+	x49 := u8((x47 >> 8))
+	out1[0] = x16
+	out1[1] = x18
+	out1[2] = x20
+	out1[3] = x22
+	out1[4] = x24
+	out1[5] = x27
+	out1[6] = x29
+	out1[7] = x31
+	out1[8] = x33
+	out1[9] = x35
+	out1[10] = x38
+	out1[11] = x40
+	out1[12] = x42
+	out1[13] = x44
+	out1[14] = x46
+	out1[15] = x48
+	out1[16] = x49
+}
+
+_fe_from_bytes :: proc "contextless" (out1: ^Tight_Field_Element, arg1: ^[32]byte) {
+	x1 := (u64(arg1[16]) << 41)
+	x2 := (u64(arg1[15]) << 33)
+	x3 := (u64(arg1[14]) << 25)
+	x4 := (u64(arg1[13]) << 17)
+	x5 := (u64(arg1[12]) << 9)
+	x6 := (u64(arg1[11]) * u64(0x2))
+	x7 := (u64(arg1[10]) << 36)
+	x8 := (u64(arg1[9]) << 28)
+	x9 := (u64(arg1[8]) << 20)
+	x10 := (u64(arg1[7]) << 12)
+	x11 := (u64(arg1[6]) << 4)
+	x12 := (u64(arg1[5]) << 40)
+	x13 := (u64(arg1[4]) << 32)
+	x14 := (u64(arg1[3]) << 24)
+	x15 := (u64(arg1[2]) << 16)
+	x16 := (u64(arg1[1]) << 8)
+	x17 := arg1[0]
+	x18 := (x16 + u64(x17))
+	x19 := (x15 + x18)
+	x20 := (x14 + x19)
+	x21 := (x13 + x20)
+	x22 := (x12 + x21)
+	x23 := (x22 & 0xfffffffffff)
+	x24 := u8((x22 >> 44))
+	x25 := (x11 + u64(x24))
+	x26 := (x10 + x25)
+	x27 := (x9 + x26)
+	x28 := (x8 + x27)
+	x29 := (x7 + x28)
+	x30 := (x29 & 0x7ffffffffff)
+	x31 := fiat.u1((x29 >> 43))
+	x32 := (x6 + u64(x31))
+	x33 := (x5 + x32)
+	x34 := (x4 + x33)
+	x35 := (x3 + x34)
+	x36 := (x2 + x35)
+	x37 := (x1 + x36)
+	out1[0] = x23
+	out1[1] = x30
+	out1[2] = x37
+}
+
+fe_relax :: proc "contextless" (out1: ^Loose_Field_Element, arg1: ^Tight_Field_Element) {
+	x1 := arg1[0]
+	x2 := arg1[1]
+	x3 := arg1[2]
+	out1[0] = x1
+	out1[1] = x2
+	out1[2] = x3
+}
+
+// The following routines were added by hand, and do not come from fiat-crypto.
+
+fe_zero :: proc "contextless" (out1: ^Tight_Field_Element) {
+	out1[0] = 0
+	out1[1] = 0
+	out1[2] = 0
+}
+
+fe_set :: #force_inline proc "contextless" (out1, arg1: ^Tight_Field_Element) {
+	x1 := arg1[0]
+	x2 := arg1[1]
+	x3 := arg1[2]
+	out1[0] = x1
+	out1[1] = x2
+	out1[2] = x3
+}
+
+fe_cond_swap :: proc "contextless" (out1, out2: ^Tight_Field_Element, arg1: bool) {
+	mask := -u64(arg1)
+	x := (out1[0] ~ out2[0]) & mask
+	x1, y1 := out1[0] ~ x, out2[0] ~ x
+	x = (out1[1] ~ out2[1]) & mask
+	x2, y2 := out1[1] ~ x, out2[1] ~ x
+	x = (out1[2] ~ out2[2]) & mask
+	x3, y3 := out1[2] ~ x, out2[2] ~ x
+	out1[0], out2[0] = x1, y1
+	out1[1], out2[1] = x2, y2
+	out1[2], out2[2] = x3, y3
+}

+ 2 - 2
core/crypto/_sha3/_sha3.odin

@@ -52,7 +52,7 @@ keccakf :: proc "contextless" (st: ^[25]u64) {
     t: u64       = ---
     t: u64       = ---
     bc: [5]u64   = ---
     bc: [5]u64   = ---
 
 
-    when ODIN_ENDIAN != "little" {
+    when ODIN_ENDIAN != .Little {
         v: uintptr = ---
         v: uintptr = ---
         for i = 0; i < 25; i += 1 {
         for i = 0; i < 25; i += 1 {
             v := uintptr(&st[i])
             v := uintptr(&st[i])
@@ -98,7 +98,7 @@ keccakf :: proc "contextless" (st: ^[25]u64) {
         st[0] ~= keccakf_rndc[r]
         st[0] ~= keccakf_rndc[r]
     }
     }
 
 
-    when ODIN_ENDIAN != "little" {
+    when ODIN_ENDIAN != .Little {
         for i = 0; i < 25; i += 1 {
         for i = 0; i < 25; i += 1 {
             v = uintptr(&st[i])
             v = uintptr(&st[i])
             t = st[i]
             t = st[i]

+ 117 - 28
core/crypto/blake/blake.odin

@@ -17,16 +17,21 @@ import "core:io"
     High level API
     High level API
 */
 */
 
 
+DIGEST_SIZE_224 :: 28
+DIGEST_SIZE_256 :: 32
+DIGEST_SIZE_384 :: 48
+DIGEST_SIZE_512 :: 64
+
 // hash_string_224 will hash the given input and return the
 // hash_string_224 will hash the given input and return the
 // computed hash
 // computed hash
-hash_string_224 :: proc "contextless" (data: string) -> [28]byte {
+hash_string_224 :: proc "contextless" (data: string) -> [DIGEST_SIZE_224]byte {
     return hash_bytes_224(transmute([]byte)(data))
     return hash_bytes_224(transmute([]byte)(data))
 }
 }
 
 
 // hash_bytes_224 will hash the given input and return the
 // hash_bytes_224 will hash the given input and return the
 // computed hash
 // computed hash
-hash_bytes_224 :: proc "contextless" (data: []byte) -> [28]byte {
-    hash: [28]byte
+hash_bytes_224 :: proc "contextless" (data: []byte) -> [DIGEST_SIZE_224]byte {
+    hash: [DIGEST_SIZE_224]byte
     ctx: Blake256_Context
     ctx: Blake256_Context
     ctx.is224 = true
     ctx.is224 = true
     init(&ctx)
     init(&ctx)
@@ -35,10 +40,29 @@ hash_bytes_224 :: proc "contextless" (data: []byte) -> [28]byte {
     return hash
     return hash
 }
 }
 
 
+// hash_string_to_buffer_224 will hash the given input and assign the
+// computed hash to the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_string_to_buffer_224 :: proc(data: string, hash: []byte) {
+    hash_bytes_to_buffer_224(transmute([]byte)(data), hash)
+}
+
+// hash_bytes_to_buffer_224 will hash the given input and write the
+// computed hash into the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_bytes_to_buffer_224 :: proc(data, hash: []byte) {
+    assert(len(hash) >= DIGEST_SIZE_224, "Size of destination buffer is smaller than the digest size")
+    ctx: Blake256_Context
+    ctx.is224 = true
+    init(&ctx)
+    update(&ctx, data)
+    final(&ctx, hash)
+}
+
 // hash_stream_224 will read the stream in chunks and compute a
 // hash_stream_224 will read the stream in chunks and compute a
 // hash from its contents
 // hash from its contents
-hash_stream_224 :: proc(s: io.Stream) -> ([28]byte, bool) {
-    hash: [28]byte
+hash_stream_224 :: proc(s: io.Stream) -> ([DIGEST_SIZE_224]byte, bool) {
+    hash: [DIGEST_SIZE_224]byte
     ctx: Blake256_Context
     ctx: Blake256_Context
     ctx.is224 = true
     ctx.is224 = true
     init(&ctx)
     init(&ctx)
@@ -57,7 +81,7 @@ hash_stream_224 :: proc(s: io.Stream) -> ([28]byte, bool) {
 
 
 // hash_file_224 will read the file provided by the given handle
 // hash_file_224 will read the file provided by the given handle
 // and compute a hash
 // and compute a hash
-hash_file_224 :: proc(hd: os.Handle, load_at_once := false) -> ([28]byte, bool) {
+hash_file_224 :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE_224]byte, bool) {
     if !load_at_once {
     if !load_at_once {
         return hash_stream_224(os.stream_from_handle(hd))
         return hash_stream_224(os.stream_from_handle(hd))
     } else {
     } else {
@@ -65,7 +89,7 @@ hash_file_224 :: proc(hd: os.Handle, load_at_once := false) -> ([28]byte, bool)
             return hash_bytes_224(buf[:]), ok
             return hash_bytes_224(buf[:]), ok
         }
         }
     }
     }
-    return [28]byte{}, false
+    return [DIGEST_SIZE_224]byte{}, false
 }
 }
 
 
 hash_224 :: proc {
 hash_224 :: proc {
@@ -73,18 +97,20 @@ hash_224 :: proc {
     hash_file_224,
     hash_file_224,
     hash_bytes_224,
     hash_bytes_224,
     hash_string_224,
     hash_string_224,
+    hash_bytes_to_buffer_224,
+    hash_string_to_buffer_224,
 }
 }
 
 
 // hash_string_256 will hash the given input and return the
 // hash_string_256 will hash the given input and return the
 // computed hash
 // computed hash
-hash_string_256 :: proc "contextless" (data: string) -> [32]byte {
+hash_string_256 :: proc "contextless" (data: string) -> [DIGEST_SIZE_256]byte {
     return hash_bytes_256(transmute([]byte)(data))
     return hash_bytes_256(transmute([]byte)(data))
 }
 }
 
 
 // hash_bytes_256 will hash the given input and return the
 // hash_bytes_256 will hash the given input and return the
 // computed hash
 // computed hash
-hash_bytes_256 :: proc "contextless" (data: []byte) -> [32]byte {
-    hash: [32]byte
+hash_bytes_256 :: proc "contextless" (data: []byte) -> [DIGEST_SIZE_256]byte {
+    hash: [DIGEST_SIZE_256]byte
     ctx: Blake256_Context
     ctx: Blake256_Context
     ctx.is224 = false
     ctx.is224 = false
     init(&ctx)
     init(&ctx)
@@ -93,10 +119,29 @@ hash_bytes_256 :: proc "contextless" (data: []byte) -> [32]byte {
     return hash
     return hash
 }
 }
 
 
+// hash_string_to_buffer_256 will hash the given input and assign the
+// computed hash to the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_string_to_buffer_256 :: proc(data: string, hash: []byte) {
+    hash_bytes_to_buffer_256(transmute([]byte)(data), hash)
+}
+
+// hash_bytes_to_buffer_256 will hash the given input and write the
+// computed hash into the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_bytes_to_buffer_256 :: proc(data, hash: []byte) {
+    assert(len(hash) >= DIGEST_SIZE_256, "Size of destination buffer is smaller than the digest size")
+    ctx: Blake256_Context
+    ctx.is224 = false
+    init(&ctx)
+    update(&ctx, data)
+    final(&ctx, hash)
+}
+
 // hash_stream_256 will read the stream in chunks and compute a
 // hash_stream_256 will read the stream in chunks and compute a
 // hash from its contents
 // hash from its contents
-hash_stream_256 :: proc(s: io.Stream) -> ([32]byte, bool) {
-    hash: [32]byte
+hash_stream_256 :: proc(s: io.Stream) -> ([DIGEST_SIZE_256]byte, bool) {
+    hash: [DIGEST_SIZE_256]byte
     ctx: Blake256_Context
     ctx: Blake256_Context
     ctx.is224 = false
     ctx.is224 = false
     init(&ctx)
     init(&ctx)
@@ -115,7 +160,7 @@ hash_stream_256 :: proc(s: io.Stream) -> ([32]byte, bool) {
 
 
 // hash_file_256 will read the file provided by the given handle
 // hash_file_256 will read the file provided by the given handle
 // and compute a hash
 // and compute a hash
-hash_file_256 :: proc(hd: os.Handle, load_at_once := false) -> ([32]byte, bool) {
+hash_file_256 :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE_256]byte, bool) {
     if !load_at_once {
     if !load_at_once {
         return hash_stream_256(os.stream_from_handle(hd))
         return hash_stream_256(os.stream_from_handle(hd))
     } else {
     } else {
@@ -123,7 +168,7 @@ hash_file_256 :: proc(hd: os.Handle, load_at_once := false) -> ([32]byte, bool)
             return hash_bytes_256(buf[:]), ok
             return hash_bytes_256(buf[:]), ok
         }
         }
     }
     }
-    return [32]byte{}, false
+    return [DIGEST_SIZE_256]byte{}, false
 }
 }
 
 
 hash_256 :: proc {
 hash_256 :: proc {
@@ -131,18 +176,20 @@ hash_256 :: proc {
     hash_file_256,
     hash_file_256,
     hash_bytes_256,
     hash_bytes_256,
     hash_string_256,
     hash_string_256,
+    hash_bytes_to_buffer_256,
+    hash_string_to_buffer_256,
 }
 }
 
 
 // hash_string_384 will hash the given input and return the
 // hash_string_384 will hash the given input and return the
 // computed hash
 // computed hash
-hash_string_384 :: proc "contextless" (data: string) -> [48]byte {
+hash_string_384 :: proc "contextless" (data: string) -> [DIGEST_SIZE_384]byte {
     return hash_bytes_384(transmute([]byte)(data))
     return hash_bytes_384(transmute([]byte)(data))
 }
 }
 
 
 // hash_bytes_384 will hash the given input and return the
 // hash_bytes_384 will hash the given input and return the
 // computed hash
 // computed hash
-hash_bytes_384 :: proc "contextless" (data: []byte) -> [48]byte {
-    hash: [48]byte
+hash_bytes_384 :: proc "contextless" (data: []byte) -> [DIGEST_SIZE_384]byte {
+    hash: [DIGEST_SIZE_384]byte
     ctx: Blake512_Context
     ctx: Blake512_Context
     ctx.is384 = true
     ctx.is384 = true
     init(&ctx)
     init(&ctx)
@@ -151,10 +198,29 @@ hash_bytes_384 :: proc "contextless" (data: []byte) -> [48]byte {
     return hash
     return hash
 }
 }
 
 
+// hash_string_to_buffer_384 will hash the given input and assign the
+// computed hash to the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_string_to_buffer_384 :: proc(data: string, hash: []byte) {
+    hash_bytes_to_buffer_384(transmute([]byte)(data), hash)
+}
+
+// hash_bytes_to_buffer_384 will hash the given input and write the
+// computed hash into the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_bytes_to_buffer_384 :: proc(data, hash: []byte) {
+    assert(len(hash) >= DIGEST_SIZE_384, "Size of destination buffer is smaller than the digest size")
+    ctx: Blake512_Context
+    ctx.is384 = true
+    init(&ctx)
+    update(&ctx, data)
+    final(&ctx, hash)
+}
+
 // hash_stream_384 will read the stream in chunks and compute a
 // hash_stream_384 will read the stream in chunks and compute a
 // hash from its contents
 // hash from its contents
-hash_stream_384 :: proc(s: io.Stream) -> ([48]byte, bool) {
-    hash: [48]byte
+hash_stream_384 :: proc(s: io.Stream) -> ([DIGEST_SIZE_384]byte, bool) {
+    hash: [DIGEST_SIZE_384]byte
     ctx: Blake512_Context
     ctx: Blake512_Context
     ctx.is384 = true
     ctx.is384 = true
     init(&ctx)
     init(&ctx)
@@ -173,7 +239,7 @@ hash_stream_384 :: proc(s: io.Stream) -> ([48]byte, bool) {
 
 
 // hash_file_384 will read the file provided by the given handle
 // hash_file_384 will read the file provided by the given handle
 // and compute a hash
 // and compute a hash
-hash_file_384 :: proc(hd: os.Handle, load_at_once := false) -> ([48]byte, bool) {
+hash_file_384 :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE_384]byte, bool) {
     if !load_at_once {
     if !load_at_once {
         return hash_stream_384(os.stream_from_handle(hd))
         return hash_stream_384(os.stream_from_handle(hd))
     } else {
     } else {
@@ -181,7 +247,7 @@ hash_file_384 :: proc(hd: os.Handle, load_at_once := false) -> ([48]byte, bool)
             return hash_bytes_384(buf[:]), ok
             return hash_bytes_384(buf[:]), ok
         }
         }
     }
     }
-    return [48]byte{}, false
+    return [DIGEST_SIZE_384]byte{}, false
 }
 }
 
 
 hash_384 :: proc {
 hash_384 :: proc {
@@ -189,18 +255,20 @@ hash_384 :: proc {
     hash_file_384,
     hash_file_384,
     hash_bytes_384,
     hash_bytes_384,
     hash_string_384,
     hash_string_384,
+    hash_bytes_to_buffer_384,
+    hash_string_to_buffer_384,
 }
 }
 
 
 // hash_string_512 will hash the given input and return the
 // hash_string_512 will hash the given input and return the
 // computed hash
 // computed hash
-hash_string_512 :: proc "contextless" (data: string) -> [64]byte {
+hash_string_512 :: proc "contextless" (data: string) -> [DIGEST_SIZE_512]byte {
     return hash_bytes_512(transmute([]byte)(data))
     return hash_bytes_512(transmute([]byte)(data))
 }
 }
 
 
 // hash_bytes_512 will hash the given input and return the
 // hash_bytes_512 will hash the given input and return the
 // computed hash
 // computed hash
-hash_bytes_512 :: proc "contextless" (data: []byte) -> [64]byte {
-    hash: [64]byte
+hash_bytes_512 :: proc "contextless" (data: []byte) -> [DIGEST_SIZE_512]byte {
+    hash: [DIGEST_SIZE_512]byte
     ctx: Blake512_Context
     ctx: Blake512_Context
     ctx.is384 = false
     ctx.is384 = false
     init(&ctx)
     init(&ctx)
@@ -209,10 +277,29 @@ hash_bytes_512 :: proc "contextless" (data: []byte) -> [64]byte {
     return hash
     return hash
 }
 }
 
 
+// hash_string_to_buffer_512 will hash the given input and assign the
+// computed hash to the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_string_to_buffer_512 :: proc(data: string, hash: []byte) {
+    hash_bytes_to_buffer_512(transmute([]byte)(data), hash)
+}
+
+// hash_bytes_to_buffer_512 will hash the given input and write the
+// computed hash into the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_bytes_to_buffer_512 :: proc(data, hash: []byte) {
+    assert(len(hash) >= DIGEST_SIZE_512, "Size of destination buffer is smaller than the digest size")
+    ctx: Blake512_Context
+    ctx.is384 = false
+    init(&ctx)
+    update(&ctx, data)
+    final(&ctx, hash)
+}
+
 // hash_stream_512 will read the stream in chunks and compute a
 // hash_stream_512 will read the stream in chunks and compute a
 // hash from its contents
 // hash from its contents
-hash_stream_512 :: proc(s: io.Stream) -> ([64]byte, bool) {
-    hash: [64]byte
+hash_stream_512 :: proc(s: io.Stream) -> ([DIGEST_SIZE_512]byte, bool) {
+    hash: [DIGEST_SIZE_512]byte
     ctx: Blake512_Context
     ctx: Blake512_Context
     ctx.is384 = false
     ctx.is384 = false
     init(&ctx)
     init(&ctx)
@@ -231,7 +318,7 @@ hash_stream_512 :: proc(s: io.Stream) -> ([64]byte, bool) {
 
 
 // hash_file_512 will read the file provided by the given handle
 // hash_file_512 will read the file provided by the given handle
 // and compute a hash
 // and compute a hash
-hash_file_512 :: proc(hd: os.Handle, load_at_once := false) -> ([64]byte, bool) {
+hash_file_512 :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE_512]byte, bool) {
     if !load_at_once {
     if !load_at_once {
         return hash_stream_512(os.stream_from_handle(hd))
         return hash_stream_512(os.stream_from_handle(hd))
     } else {
     } else {
@@ -239,7 +326,7 @@ hash_file_512 :: proc(hd: os.Handle, load_at_once := false) -> ([64]byte, bool)
             return hash_bytes_512(buf[:]), ok
             return hash_bytes_512(buf[:]), ok
         }
         }
     }
     }
-    return [64]byte{}, false
+    return [DIGEST_SIZE_512]byte{}, false
 }
 }
 
 
 hash_512 :: proc {
 hash_512 :: proc {
@@ -247,6 +334,8 @@ hash_512 :: proc {
     hash_file_512,
     hash_file_512,
     hash_bytes_512,
     hash_bytes_512,
     hash_string_512,
     hash_string_512,
+    hash_bytes_to_buffer_512,
+    hash_string_to_buffer_512,
 }
 }
 
 
 /*
 /*

+ 33 - 7
core/crypto/blake2b/blake2b.odin

@@ -20,16 +20,18 @@ import "../_blake2"
     High level API
     High level API
 */
 */
 
 
+DIGEST_SIZE :: 64
+
 // hash_string will hash the given input and return the
 // hash_string will hash the given input and return the
 // computed hash
 // computed hash
-hash_string :: proc(data: string) -> [64]byte {
+hash_string :: proc(data: string) -> [DIGEST_SIZE]byte {
     return hash_bytes(transmute([]byte)(data))
     return hash_bytes(transmute([]byte)(data))
 }
 }
 
 
 // hash_bytes will hash the given input and return the
 // hash_bytes will hash the given input and return the
 // computed hash
 // computed hash
-hash_bytes :: proc(data: []byte) -> [64]byte {
-    hash: [64]byte
+hash_bytes :: proc(data: []byte) -> [DIGEST_SIZE]byte {
+    hash: [DIGEST_SIZE]byte
     ctx: _blake2.Blake2b_Context
     ctx: _blake2.Blake2b_Context
     cfg: _blake2.Blake2_Config
     cfg: _blake2.Blake2_Config
     cfg.size = _blake2.BLAKE2B_SIZE
     cfg.size = _blake2.BLAKE2B_SIZE
@@ -40,10 +42,32 @@ hash_bytes :: proc(data: []byte) -> [64]byte {
     return hash
     return hash
 }
 }
 
 
+// hash_string_to_buffer will hash the given input and assign the
+// computed hash to the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_string_to_buffer :: proc(data: string, hash: []byte) {
+    hash_bytes_to_buffer(transmute([]byte)(data), hash)
+}
+
+// hash_bytes_to_buffer will hash the given input and write the
+// computed hash into the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_bytes_to_buffer :: proc(data, hash: []byte) {
+    assert(len(hash) >= DIGEST_SIZE, "Size of destination buffer is smaller than the digest size")
+    ctx: _blake2.Blake2b_Context
+    cfg: _blake2.Blake2_Config
+    cfg.size = _blake2.BLAKE2B_SIZE
+    ctx.cfg  = cfg
+    _blake2.init(&ctx)
+    _blake2.update(&ctx, data)
+    _blake2.final(&ctx, hash)
+}
+
+
 // hash_stream will read the stream in chunks and compute a
 // hash_stream will read the stream in chunks and compute a
 // hash from its contents
 // hash from its contents
-hash_stream :: proc(s: io.Stream) -> ([64]byte, bool) {
-    hash: [64]byte
+hash_stream :: proc(s: io.Stream) -> ([DIGEST_SIZE]byte, bool) {
+    hash: [DIGEST_SIZE]byte
     ctx: _blake2.Blake2b_Context
     ctx: _blake2.Blake2b_Context
     cfg: _blake2.Blake2_Config
     cfg: _blake2.Blake2_Config
     cfg.size = _blake2.BLAKE2B_SIZE
     cfg.size = _blake2.BLAKE2B_SIZE
@@ -64,7 +88,7 @@ hash_stream :: proc(s: io.Stream) -> ([64]byte, bool) {
 
 
 // hash_file will read the file provided by the given handle
 // hash_file will read the file provided by the given handle
 // and compute a hash
 // and compute a hash
-hash_file :: proc(hd: os.Handle, load_at_once := false) -> ([64]byte, bool) {
+hash_file :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE]byte, bool) {
     if !load_at_once {
     if !load_at_once {
         return hash_stream(os.stream_from_handle(hd))
         return hash_stream(os.stream_from_handle(hd))
     } else {
     } else {
@@ -72,7 +96,7 @@ hash_file :: proc(hd: os.Handle, load_at_once := false) -> ([64]byte, bool) {
             return hash_bytes(buf[:]), ok
             return hash_bytes(buf[:]), ok
         }
         }
     }
     }
-    return [64]byte{}, false
+    return [DIGEST_SIZE]byte{}, false
 }
 }
 
 
 hash :: proc {
 hash :: proc {
@@ -80,6 +104,8 @@ hash :: proc {
     hash_file,
     hash_file,
     hash_bytes,
     hash_bytes,
     hash_string,
     hash_string,
+    hash_bytes_to_buffer,
+    hash_string_to_buffer,
 }
 }
 
 
 /*
 /*

+ 33 - 7
core/crypto/blake2s/blake2s.odin

@@ -20,16 +20,18 @@ import "../_blake2"
     High level API
     High level API
 */
 */
 
 
+DIGEST_SIZE :: 32
+
 // hash_string will hash the given input and return the
 // hash_string will hash the given input and return the
 // computed hash
 // computed hash
-hash_string :: proc(data: string) -> [32]byte {
+hash_string :: proc(data: string) -> [DIGEST_SIZE]byte {
     return hash_bytes(transmute([]byte)(data))
     return hash_bytes(transmute([]byte)(data))
 }
 }
 
 
 // hash_bytes will hash the given input and return the
 // hash_bytes will hash the given input and return the
 // computed hash
 // computed hash
-hash_bytes :: proc(data: []byte) -> [32]byte {
-    hash: [32]byte
+hash_bytes :: proc(data: []byte) -> [DIGEST_SIZE]byte {
+    hash: [DIGEST_SIZE]byte
     ctx: _blake2.Blake2s_Context
     ctx: _blake2.Blake2s_Context
     cfg: _blake2.Blake2_Config
     cfg: _blake2.Blake2_Config
     cfg.size = _blake2.BLAKE2S_SIZE
     cfg.size = _blake2.BLAKE2S_SIZE
@@ -40,10 +42,32 @@ hash_bytes :: proc(data: []byte) -> [32]byte {
     return hash
     return hash
 }
 }
 
 
+
+// hash_string_to_buffer will hash the given input and assign the
+// computed hash to the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_string_to_buffer :: proc(data: string, hash: []byte) {
+    hash_bytes_to_buffer(transmute([]byte)(data), hash)
+}
+
+// hash_bytes_to_buffer will hash the given input and write the
+// computed hash into the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_bytes_to_buffer :: proc(data, hash: []byte) {
+    assert(len(hash) >= DIGEST_SIZE, "Size of destination buffer is smaller than the digest size")
+    ctx: _blake2.Blake2s_Context
+    cfg: _blake2.Blake2_Config
+    cfg.size = _blake2.BLAKE2S_SIZE
+    ctx.cfg  = cfg
+    _blake2.init(&ctx)
+    _blake2.update(&ctx, data)
+    _blake2.final(&ctx, hash)
+}
+
 // hash_stream will read the stream in chunks and compute a
 // hash_stream will read the stream in chunks and compute a
 // hash from its contents
 // hash from its contents
-hash_stream :: proc(s: io.Stream) -> ([32]byte, bool) {
-    hash: [32]byte
+hash_stream :: proc(s: io.Stream) -> ([DIGEST_SIZE]byte, bool) {
+    hash: [DIGEST_SIZE]byte
     ctx: _blake2.Blake2s_Context
     ctx: _blake2.Blake2s_Context
     cfg: _blake2.Blake2_Config
     cfg: _blake2.Blake2_Config
     cfg.size = _blake2.BLAKE2S_SIZE
     cfg.size = _blake2.BLAKE2S_SIZE
@@ -64,7 +88,7 @@ hash_stream :: proc(s: io.Stream) -> ([32]byte, bool) {
 
 
 // hash_file will read the file provided by the given handle
 // hash_file will read the file provided by the given handle
 // and compute a hash
 // and compute a hash
-hash_file :: proc(hd: os.Handle, load_at_once := false) -> ([32]byte, bool) {
+hash_file :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE]byte, bool) {
     if !load_at_once {
     if !load_at_once {
         return hash_stream(os.stream_from_handle(hd))
         return hash_stream(os.stream_from_handle(hd))
     } else {
     } else {
@@ -72,7 +96,7 @@ hash_file :: proc(hd: os.Handle, load_at_once := false) -> ([32]byte, bool) {
             return hash_bytes(buf[:]), ok
             return hash_bytes(buf[:]), ok
         }
         }
     }
     }
-    return [32]byte{}, false
+    return [DIGEST_SIZE]byte{}, false
 }
 }
 
 
 hash :: proc {
 hash :: proc {
@@ -80,6 +104,8 @@ hash :: proc {
     hash_file,
     hash_file,
     hash_bytes,
     hash_bytes,
     hash_string,
     hash_string,
+    hash_bytes_to_buffer,
+    hash_string_to_buffer,
 }
 }
 
 
 /*
 /*

+ 581 - 0
core/crypto/chacha20/chacha20.odin

@@ -0,0 +1,581 @@
+package chacha20
+
+import "core:crypto/util"
+import "core:math/bits"
+import "core:mem"
+
+KEY_SIZE :: 32
+NONCE_SIZE :: 12
+XNONCE_SIZE :: 24
+
+_MAX_CTR_IETF :: 0xffffffff
+
+_BLOCK_SIZE :: 64
+_STATE_SIZE_U32 :: 16
+_ROUNDS :: 20
+
+_SIGMA_0 : u32 : 0x61707865
+_SIGMA_1 : u32 : 0x3320646e
+_SIGMA_2 : u32 : 0x79622d32
+_SIGMA_3 : u32 : 0x6b206574
+
+Context :: struct {
+	_s: [_STATE_SIZE_U32]u32,
+
+	_buffer: [_BLOCK_SIZE]byte,
+	_off: int,
+
+	_is_ietf_flavor: bool,
+	_is_initialized: bool,
+}
+
+init :: proc (ctx: ^Context, key, nonce: []byte) {
+	if len(key) != KEY_SIZE {
+		panic("crypto/chacha20: invalid ChaCha20 key size")
+	}
+	if n_len := len(nonce); n_len != NONCE_SIZE && n_len != XNONCE_SIZE {
+		panic("crypto/chacha20: invalid (X)ChaCha20 nonce size")
+	}
+
+	k, n := key, nonce
+
+	// Derive the XChaCha20 subkey and sub-nonce via HChaCha20.
+	is_xchacha := len(nonce) == XNONCE_SIZE
+	if is_xchacha {
+		sub_key := ctx._buffer[:KEY_SIZE]
+		_hchacha20(sub_key, k, n)
+		k = sub_key
+		n = n[16:24]
+	}
+
+	ctx._s[0] = _SIGMA_0
+	ctx._s[1] = _SIGMA_1
+	ctx._s[2] = _SIGMA_2
+	ctx._s[3] = _SIGMA_3
+	ctx._s[4] = util.U32_LE(k[0:4])
+	ctx._s[5] = util.U32_LE(k[4:8])
+	ctx._s[6] = util.U32_LE(k[8:12])
+	ctx._s[7] = util.U32_LE(k[12:16])
+	ctx._s[8] = util.U32_LE(k[16:20])
+	ctx._s[9] = util.U32_LE(k[20:24])
+	ctx._s[10] = util.U32_LE(k[24:28])
+	ctx._s[11] = util.U32_LE(k[28:32])
+	ctx._s[12] = 0
+	if !is_xchacha {
+		ctx._s[13] = util.U32_LE(n[0:4])
+		ctx._s[14] = util.U32_LE(n[4:8])
+		ctx._s[15] = util.U32_LE(n[8:12])
+	} else {
+		ctx._s[13] = 0
+		ctx._s[14] = util.U32_LE(n[0:4])
+		ctx._s[15] = util.U32_LE(n[4:8])
+
+		// The sub-key is stored in the keystream buffer.  While
+		// this will be overwritten in most circumstances, explicitly
+		// clear it out early.
+		mem.zero_explicit(&ctx._buffer, KEY_SIZE)
+	}
+
+	ctx._off = _BLOCK_SIZE
+	ctx._is_ietf_flavor = !is_xchacha
+	ctx._is_initialized = true
+}
+
+seek :: proc (ctx: ^Context, block_nr: u64) {
+	assert(ctx._is_initialized)
+
+	if ctx._is_ietf_flavor {
+		if block_nr > _MAX_CTR_IETF {
+			panic("crypto/chacha20: attempted to seek past maximum counter")
+		}
+	} else {
+		ctx._s[13] = u32(block_nr >> 32)
+	}
+	ctx._s[12] = u32(block_nr)
+	ctx._off = _BLOCK_SIZE
+}
+
+xor_bytes :: proc (ctx: ^Context, dst, src: []byte) {
+	assert(ctx._is_initialized)
+
+	// TODO: Enforcing that dst and src alias exactly or not at all
+	// is a good idea, though odd aliasing should be extremely uncommon.
+
+	src, dst := src, dst
+	if dst_len := len(dst); dst_len < len(src) {
+		src = src[:dst_len]
+	}
+
+	for remaining := len(src); remaining > 0; {
+		// Process multiple blocks at once
+		if ctx._off == _BLOCK_SIZE {
+			if nr_blocks := remaining / _BLOCK_SIZE; nr_blocks > 0 {
+				direct_bytes := nr_blocks * _BLOCK_SIZE
+				_do_blocks(ctx, dst, src, nr_blocks)
+				remaining -= direct_bytes
+				if remaining == 0 {
+					return
+				}
+				dst = dst[direct_bytes:]
+				src = src[direct_bytes:]
+			}
+
+			// If there is a partial block, generate and buffer 1 block
+			// worth of keystream.
+			_do_blocks(ctx, ctx._buffer[:], nil, 1)
+			ctx._off = 0
+		}
+
+		// Process partial blocks from the buffered keystream.
+		to_xor := min(_BLOCK_SIZE - ctx._off, remaining)
+		buffered_keystream := ctx._buffer[ctx._off:]
+		for i := 0; i < to_xor; i = i + 1 {
+			dst[i] = buffered_keystream[i] ~ src[i]
+		}
+		ctx._off += to_xor
+		dst = dst[to_xor:]
+		src = src[to_xor:]
+		remaining -= to_xor
+	}
+}
+
+keystream_bytes :: proc (ctx: ^Context, dst: []byte) {
+	assert(ctx._is_initialized)
+
+	dst := dst
+	for remaining := len(dst); remaining > 0; {
+		// Process multiple blocks at once
+		if ctx._off == _BLOCK_SIZE {
+			if nr_blocks := remaining / _BLOCK_SIZE; nr_blocks > 0 {
+				direct_bytes := nr_blocks * _BLOCK_SIZE
+				_do_blocks(ctx, dst, nil, nr_blocks)
+				remaining -= direct_bytes
+				if remaining == 0 {
+					return
+				}
+				dst = dst[direct_bytes:]
+			}
+
+			// If there is a partial block, generate and buffer 1 block
+			// worth of keystream.
+			_do_blocks(ctx, ctx._buffer[:], nil, 1)
+			ctx._off = 0
+		}
+
+		// Process partial blocks from the buffered keystream.
+		to_copy := min(_BLOCK_SIZE - ctx._off, remaining)
+		buffered_keystream := ctx._buffer[ctx._off:]
+		copy(dst[:to_copy], buffered_keystream[:to_copy])
+		ctx._off += to_copy
+		dst = dst[to_copy:]
+		remaining -= to_copy
+	}
+}
+
+reset :: proc (ctx: ^Context) {
+	mem.zero_explicit(&ctx._s, size_of(ctx._s))
+	mem.zero_explicit(&ctx._buffer, size_of(ctx._buffer))
+
+	ctx._is_initialized = false
+}
+
+_do_blocks :: proc (ctx: ^Context, dst, src: []byte, nr_blocks: int) {
+	// Enforce the maximum consumed keystream per nonce.
+	//
+	// While all modern "standard" definitions of ChaCha20 use
+	// the IETF 32-bit counter, for XChaCha20 most common
+	// implementations allow for a 64-bit counter.
+	//
+	// Honestly, the answer here is "use a MRAE primitive", but
+	// go with common practice in the case of XChaCha20.
+	if ctx._is_ietf_flavor {
+		if u64(ctx._s[12]) + u64(nr_blocks) > 0xffffffff {
+			panic("crypto/chacha20: maximum ChaCha20 keystream per nonce reached")
+		}
+	} else {
+		ctr := (u64(ctx._s[13]) << 32) | u64(ctx._s[12])
+		if _, carry := bits.add_u64(ctr, u64(nr_blocks), 0); carry != 0 {
+			panic("crypto/chacha20: maximum XChaCha20 keystream per nonce reached")
+		}
+	}
+
+	dst, src := dst, src
+	x := &ctx._s
+	for n := 0; n < nr_blocks; n = n + 1 {
+		x0, x1, x2, x3 := _SIGMA_0, _SIGMA_1, _SIGMA_2, _SIGMA_3
+		x4, x5, x6, x7, x8, x9, x10, x11, x12, x13, x14, x15 := x[4], x[5], x[6], x[7], x[8], x[9], x[10], x[11], x[12], x[13], x[14], x[15]
+
+		for i := _ROUNDS; i > 0; i = i - 2 {
+			// Even when forcing inlining manually inlining all of
+			// these is decently faster.
+
+			// quarterround(x, 0, 4, 8, 12)
+			x0 += x4
+			x12 ~= x0
+			x12 = util.ROTL32(x12, 16)
+			x8 += x12
+			x4 ~= x8
+			x4 = util.ROTL32(x4, 12)
+			x0 += x4
+			x12 ~= x0
+			x12 = util.ROTL32(x12, 8)
+			x8 += x12
+			x4 ~= x8
+			x4 = util.ROTL32(x4, 7)
+
+			// quarterround(x, 1, 5, 9, 13)
+			x1 += x5
+			x13 ~= x1
+			x13 = util.ROTL32(x13, 16)
+			x9 += x13
+			x5 ~= x9
+			x5 = util.ROTL32(x5, 12)
+			x1 += x5
+			x13 ~= x1
+			x13 = util.ROTL32(x13, 8)
+			x9 += x13
+			x5 ~= x9
+			x5 = util.ROTL32(x5, 7)
+
+			// quarterround(x, 2, 6, 10, 14)
+			x2 += x6
+			x14 ~= x2
+			x14 = util.ROTL32(x14, 16)
+			x10 += x14
+			x6 ~= x10
+			x6 = util.ROTL32(x6, 12)
+			x2 += x6
+			x14 ~= x2
+			x14 = util.ROTL32(x14, 8)
+			x10 += x14
+			x6 ~= x10
+			x6 = util.ROTL32(x6, 7)
+
+			// quarterround(x, 3, 7, 11, 15)
+			x3 += x7
+			x15 ~= x3
+			x15 = util.ROTL32(x15, 16)
+			x11 += x15
+			x7 ~= x11
+			x7 = util.ROTL32(x7, 12)
+			x3 += x7
+			x15 ~= x3
+			x15 = util.ROTL32(x15, 8)
+			x11 += x15
+			x7 ~= x11
+			x7 = util.ROTL32(x7, 7)
+
+			// quarterround(x, 0, 5, 10, 15)
+			x0 += x5
+			x15 ~= x0
+			x15 = util.ROTL32(x15, 16)
+			x10 += x15
+			x5 ~= x10
+			x5 = util.ROTL32(x5, 12)
+			x0 += x5
+			x15 ~= x0
+			x15 = util.ROTL32(x15, 8)
+			x10 += x15
+			x5 ~= x10
+			x5 = util.ROTL32(x5, 7)
+
+			// quarterround(x, 1, 6, 11, 12)
+			x1 += x6
+			x12 ~= x1
+			x12 = util.ROTL32(x12, 16)
+			x11 += x12
+			x6 ~= x11
+			x6 = util.ROTL32(x6, 12)
+			x1 += x6
+			x12 ~= x1
+			x12 = util.ROTL32(x12, 8)
+			x11 += x12
+			x6 ~= x11
+			x6 = util.ROTL32(x6, 7)
+
+			// quarterround(x, 2, 7, 8, 13)
+			x2 += x7
+			x13 ~= x2
+			x13 = util.ROTL32(x13, 16)
+			x8 += x13
+			x7 ~= x8
+			x7 = util.ROTL32(x7, 12)
+			x2 += x7
+			x13 ~= x2
+			x13 = util.ROTL32(x13, 8)
+			x8 += x13
+			x7 ~= x8
+			x7 = util.ROTL32(x7, 7)
+
+			// quarterround(x, 3, 4, 9, 14)
+			x3 += x4
+			x14 ~= x3
+			x14 = util.ROTL32(x14, 16)
+			x9 += x14
+			x4 ~= x9
+			x4 = util.ROTL32(x4, 12)
+			x3 += x4
+			x14 ~= x3
+			x14 = util.ROTL32(x14, 8)
+			x9 += x14
+			x4 ~= x9
+			x4 = util.ROTL32(x4, 7)
+		}
+
+		x0 += _SIGMA_0
+		x1 += _SIGMA_1
+		x2 += _SIGMA_2
+		x3 += _SIGMA_3
+		x4 += x[4]
+		x5 += x[5]
+		x6 += x[6]
+		x7 += x[7]
+		x8 += x[8]
+		x9 += x[9]
+		x10 += x[10]
+		x11 += x[11]
+		x12 += x[12]
+		x13 += x[13]
+		x14 += x[14]
+		x15 += x[15]
+
+		// While the "correct" answer to getting more performance out of
+		// this is "use vector operations", support for that is currently
+		// a work in progress/to be designed.
+		//
+		// Until dedicated assembly can be written leverage the fact that
+		// the callers of this routine ensure that src/dst are valid.
+
+		when ODIN_ARCH == .i386 || ODIN_ARCH == .amd64 {
+			// util.PUT_U32_LE/util.U32_LE are not required on little-endian
+			// systems that also happen to not be strict about aligned
+			// memory access.
+
+			dst_p := transmute(^[16]u32)(&dst[0])
+			if src != nil {
+				src_p := transmute(^[16]u32)(&src[0])
+				dst_p[0] = src_p[0] ~ x0
+				dst_p[1] = src_p[1] ~ x1
+				dst_p[2] = src_p[2] ~ x2
+				dst_p[3] = src_p[3] ~ x3
+				dst_p[4] = src_p[4] ~ x4
+				dst_p[5] = src_p[5] ~ x5
+				dst_p[6] = src_p[6] ~ x6
+				dst_p[7] = src_p[7] ~ x7
+				dst_p[8] = src_p[8] ~ x8
+				dst_p[9] = src_p[9] ~ x9
+				dst_p[10] = src_p[10] ~ x10
+				dst_p[11] = src_p[11] ~ x11
+				dst_p[12] = src_p[12] ~ x12
+				dst_p[13] = src_p[13] ~ x13
+				dst_p[14] = src_p[14] ~ x14
+				dst_p[15] = src_p[15] ~ x15
+				src = src[_BLOCK_SIZE:]
+			} else {
+				dst_p[0] = x0
+				dst_p[1] = x1
+				dst_p[2] = x2
+				dst_p[3] = x3
+				dst_p[4] = x4
+				dst_p[5] = x5
+				dst_p[6] = x6
+				dst_p[7] = x7
+				dst_p[8] = x8
+				dst_p[9] = x9
+				dst_p[10] = x10
+				dst_p[11] = x11
+				dst_p[12] = x12
+				dst_p[13] = x13
+				dst_p[14] = x14
+				dst_p[15] = x15
+			}
+			dst = dst[_BLOCK_SIZE:]
+		} else {
+			#no_bounds_check {
+				if src != nil {
+					util.PUT_U32_LE(dst[0:4], util.U32_LE(src[0:4]) ~ x0)
+					util.PUT_U32_LE(dst[4:8], util.U32_LE(src[4:8]) ~ x1)
+					util.PUT_U32_LE(dst[8:12], util.U32_LE(src[8:12]) ~ x2)
+					util.PUT_U32_LE(dst[12:16], util.U32_LE(src[12:16]) ~ x3)
+					util.PUT_U32_LE(dst[16:20], util.U32_LE(src[16:20]) ~ x4)
+					util.PUT_U32_LE(dst[20:24], util.U32_LE(src[20:24]) ~ x5)
+					util.PUT_U32_LE(dst[24:28], util.U32_LE(src[24:28]) ~ x6)
+					util.PUT_U32_LE(dst[28:32], util.U32_LE(src[28:32]) ~ x7)
+					util.PUT_U32_LE(dst[32:36], util.U32_LE(src[32:36]) ~ x8)
+					util.PUT_U32_LE(dst[36:40], util.U32_LE(src[36:40]) ~ x9)
+					util.PUT_U32_LE(dst[40:44], util.U32_LE(src[40:44]) ~ x10)
+					util.PUT_U32_LE(dst[44:48], util.U32_LE(src[44:48]) ~ x11)
+					util.PUT_U32_LE(dst[48:52], util.U32_LE(src[48:52]) ~ x12)
+					util.PUT_U32_LE(dst[52:56], util.U32_LE(src[52:56]) ~ x13)
+					util.PUT_U32_LE(dst[56:60], util.U32_LE(src[56:60]) ~ x14)
+					util.PUT_U32_LE(dst[60:64], util.U32_LE(src[60:64]) ~ x15)
+					src = src[_BLOCK_SIZE:]
+				} else {
+					util.PUT_U32_LE(dst[0:4], x0)
+					util.PUT_U32_LE(dst[4:8], x1)
+					util.PUT_U32_LE(dst[8:12], x2)
+					util.PUT_U32_LE(dst[12:16], x3)
+					util.PUT_U32_LE(dst[16:20], x4)
+					util.PUT_U32_LE(dst[20:24], x5)
+					util.PUT_U32_LE(dst[24:28], x6)
+					util.PUT_U32_LE(dst[28:32], x7)
+					util.PUT_U32_LE(dst[32:36], x8)
+					util.PUT_U32_LE(dst[36:40], x9)
+					util.PUT_U32_LE(dst[40:44], x10)
+					util.PUT_U32_LE(dst[44:48], x11)
+					util.PUT_U32_LE(dst[48:52], x12)
+					util.PUT_U32_LE(dst[52:56], x13)
+					util.PUT_U32_LE(dst[56:60], x14)
+					util.PUT_U32_LE(dst[60:64], x15)
+				}
+				dst = dst[_BLOCK_SIZE:]
+			}
+		}
+
+		// Increment the counter.  Overflow checking is done upon
+		// entry into the routine, so a 64-bit increment safely
+		// covers both cases.
+		new_ctr := ((u64(ctx._s[13]) << 32) | u64(ctx._s[12])) + 1
+		x[12] = u32(new_ctr)
+		x[13] = u32(new_ctr >> 32)
+	}
+}
+
+_hchacha20 :: proc (dst, key, nonce: []byte) {
+	x0, x1, x2, x3 := _SIGMA_0, _SIGMA_1, _SIGMA_2, _SIGMA_3
+	x4 := util.U32_LE(key[0:4])
+	x5 := util.U32_LE(key[4:8])
+	x6 := util.U32_LE(key[8:12])
+	x7 := util.U32_LE(key[12:16])
+	x8 := util.U32_LE(key[16:20])
+	x9 := util.U32_LE(key[20:24])
+	x10 := util.U32_LE(key[24:28])
+	x11 := util.U32_LE(key[28:32])
+	x12 := util.U32_LE(nonce[0:4])
+	x13 := util.U32_LE(nonce[4:8])
+	x14 := util.U32_LE(nonce[8:12])
+	x15 := util.U32_LE(nonce[12:16])
+
+	for i := _ROUNDS; i > 0; i = i - 2 {
+		// quarterround(x, 0, 4, 8, 12)
+		x0 += x4
+		x12 ~= x0
+		x12 = util.ROTL32(x12, 16)
+		x8 += x12
+		x4 ~= x8
+		x4 = util.ROTL32(x4, 12)
+		x0 += x4
+		x12 ~= x0
+		x12 = util.ROTL32(x12, 8)
+		x8 += x12
+		x4 ~= x8
+		x4 = util.ROTL32(x4, 7)
+
+		// quarterround(x, 1, 5, 9, 13)
+		x1 += x5
+		x13 ~= x1
+		x13 = util.ROTL32(x13, 16)
+		x9 += x13
+		x5 ~= x9
+		x5 = util.ROTL32(x5, 12)
+		x1 += x5
+		x13 ~= x1
+		x13 = util.ROTL32(x13, 8)
+		x9 += x13
+		x5 ~= x9
+		x5 = util.ROTL32(x5, 7)
+
+		// quarterround(x, 2, 6, 10, 14)
+		x2 += x6
+		x14 ~= x2
+		x14 = util.ROTL32(x14, 16)
+		x10 += x14
+		x6 ~= x10
+		x6 = util.ROTL32(x6, 12)
+		x2 += x6
+		x14 ~= x2
+		x14 = util.ROTL32(x14, 8)
+		x10 += x14
+		x6 ~= x10
+		x6 = util.ROTL32(x6, 7)
+
+		// quarterround(x, 3, 7, 11, 15)
+		x3 += x7
+		x15 ~= x3
+		x15 = util.ROTL32(x15, 16)
+		x11 += x15
+		x7 ~= x11
+		x7 = util.ROTL32(x7, 12)
+		x3 += x7
+		x15 ~= x3
+		x15 = util.ROTL32(x15, 8)
+		x11 += x15
+		x7 ~= x11
+		x7 = util.ROTL32(x7, 7)
+
+		// quarterround(x, 0, 5, 10, 15)
+		x0 += x5
+		x15 ~= x0
+		x15 = util.ROTL32(x15, 16)
+		x10 += x15
+		x5 ~= x10
+		x5 = util.ROTL32(x5, 12)
+		x0 += x5
+		x15 ~= x0
+		x15 = util.ROTL32(x15, 8)
+		x10 += x15
+		x5 ~= x10
+		x5 = util.ROTL32(x5, 7)
+
+		// quarterround(x, 1, 6, 11, 12)
+		x1 += x6
+		x12 ~= x1
+		x12 = util.ROTL32(x12, 16)
+		x11 += x12
+		x6 ~= x11
+		x6 = util.ROTL32(x6, 12)
+		x1 += x6
+		x12 ~= x1
+		x12 = util.ROTL32(x12, 8)
+		x11 += x12
+		x6 ~= x11
+		x6 = util.ROTL32(x6, 7)
+
+		// quarterround(x, 2, 7, 8, 13)
+		x2 += x7
+		x13 ~= x2
+		x13 = util.ROTL32(x13, 16)
+		x8 += x13
+		x7 ~= x8
+		x7 = util.ROTL32(x7, 12)
+		x2 += x7
+		x13 ~= x2
+		x13 = util.ROTL32(x13, 8)
+		x8 += x13
+		x7 ~= x8
+		x7 = util.ROTL32(x7, 7)
+
+		// quarterround(x, 3, 4, 9, 14)
+		x3 += x4
+		x14 ~= x3
+		x14 = util.ROTL32(x14, 16)
+		x9 += x14
+		x4 ~= x9
+		x4 = util.ROTL32(x4, 12)
+		x3 += x4
+		x14 ~= x3
+		x14 = util.ROTL32(x14, 8)
+		x9 += x14
+		x4 ~= x9
+		x4 = util.ROTL32(x4, 7)
+	}
+
+	util.PUT_U32_LE(dst[0:4], x0)
+	util.PUT_U32_LE(dst[4:8], x1)
+	util.PUT_U32_LE(dst[8:12], x2)
+	util.PUT_U32_LE(dst[12:16], x3)
+	util.PUT_U32_LE(dst[16:20], x12)
+	util.PUT_U32_LE(dst[20:24], x13)
+	util.PUT_U32_LE(dst[24:28], x14)
+	util.PUT_U32_LE(dst[28:32], x15)
+}

+ 146 - 0
core/crypto/chacha20poly1305/chacha20poly1305.odin

@@ -0,0 +1,146 @@
+package chacha20poly1305
+
+import "core:crypto"
+import "core:crypto/chacha20"
+import "core:crypto/poly1305"
+import "core:crypto/util"
+import "core:mem"
+
+KEY_SIZE :: chacha20.KEY_SIZE
+NONCE_SIZE :: chacha20.NONCE_SIZE
+TAG_SIZE :: poly1305.TAG_SIZE
+
+_P_MAX :: 64 * 0xffffffff // 64 * (2^32-1)
+
+_validate_common_slice_sizes :: proc (tag, key, nonce, aad, text: []byte) {
+	if len(tag) != TAG_SIZE {
+		panic("crypto/chacha20poly1305: invalid destination tag size")
+	}
+	if len(key) != KEY_SIZE {
+		panic("crypto/chacha20poly1305: invalid key size")
+	}
+	if len(nonce) != NONCE_SIZE {
+		panic("crypto/chacha20poly1305: invalid nonce size")
+	}
+
+	#assert(size_of(int) == 8 || size_of(int) <= 4)
+	when size_of(int) == 8 {
+		// A_MAX = 2^64 - 1 due to the length field limit.
+		// P_MAX = 64 * (2^32 - 1) due to the IETF ChaCha20 counter limit.
+		//
+		// A_MAX is limited by size_of(int), so there is no need to
+		// enforce it. P_MAX only needs to be checked on 64-bit targets,
+		// for reasons that should be obvious.
+		if text_len := len(text); text_len > _P_MAX {
+			panic("crypto/chacha20poly1305: oversized src data")
+		}
+	}
+}
+
+_PAD: [16]byte
+_update_mac_pad16 :: #force_inline proc (ctx: ^poly1305.Context, x_len: int) {
+	if pad_len := 16 - (x_len & (16-1)); pad_len != 16 {
+		poly1305.update(ctx, _PAD[:pad_len])
+	}
+}
+
+encrypt :: proc (ciphertext, tag, key, nonce, aad, plaintext: []byte) {
+	_validate_common_slice_sizes(tag, key, nonce, aad, plaintext)
+	if len(ciphertext) != len(plaintext) {
+		panic("crypto/chacha20poly1305: invalid destination ciphertext size")
+	}
+
+	stream_ctx: chacha20.Context = ---
+	chacha20.init(&stream_ctx, key, nonce)
+
+	// otk = poly1305_key_gen(key, nonce)
+	otk: [poly1305.KEY_SIZE]byte = ---
+	chacha20.keystream_bytes(&stream_ctx, otk[:])
+	mac_ctx: poly1305.Context = ---
+	poly1305.init(&mac_ctx, otk[:])
+	mem.zero_explicit(&otk, size_of(otk))
+
+	aad_len, ciphertext_len := len(aad), len(ciphertext)
+
+	// There is nothing preventing aad and ciphertext from overlapping
+	// so auth the AAD before encrypting (slightly different from the
+	// RFC, since the RFC encrypts into a new buffer).
+	//
+	// mac_data = aad | pad16(aad)
+	poly1305.update(&mac_ctx, aad)
+	_update_mac_pad16(&mac_ctx, aad_len)
+
+	// ciphertext = chacha20_encrypt(key, 1, nonce, plaintext)
+	chacha20.seek(&stream_ctx, 1)
+	chacha20.xor_bytes(&stream_ctx, ciphertext, plaintext)
+	chacha20.reset(&stream_ctx) // Don't need the stream context anymore.
+
+	// mac_data |= ciphertext | pad16(ciphertext)
+	poly1305.update(&mac_ctx, ciphertext)
+	_update_mac_pad16(&mac_ctx, ciphertext_len)
+
+	// mac_data |= num_to_8_le_bytes(aad.length)
+	// mac_data |= num_to_8_le_bytes(ciphertext.length)
+	l_buf := otk[0:16] // Reuse the scratch buffer.
+	util.PUT_U64_LE(l_buf[0:8], u64(aad_len))
+	util.PUT_U64_LE(l_buf[8:16], u64(ciphertext_len))
+	poly1305.update(&mac_ctx, l_buf)
+
+	// tag = poly1305_mac(mac_data, otk)
+	poly1305.final(&mac_ctx, tag) // Implicitly sanitizes context.
+}
+
+decrypt :: proc (plaintext, tag, key, nonce, aad, ciphertext: []byte) -> bool {
+	_validate_common_slice_sizes(tag, key, nonce, aad, ciphertext)
+	if len(ciphertext) != len(plaintext) {
+		panic("crypto/chacha20poly1305: invalid destination plaintext size")
+	}
+
+	// Note: Unlike encrypt, this can fail early, so use defer for
+	// sanitization rather than assuming control flow reaches certain
+	// points where needed.
+
+	stream_ctx: chacha20.Context = ---
+	chacha20.init(&stream_ctx, key, nonce)
+
+	// otk = poly1305_key_gen(key, nonce)
+	otk: [poly1305.KEY_SIZE]byte = ---
+	chacha20.keystream_bytes(&stream_ctx, otk[:])
+	defer chacha20.reset(&stream_ctx)
+
+	mac_ctx: poly1305.Context = ---
+	poly1305.init(&mac_ctx, otk[:])
+	defer mem.zero_explicit(&otk, size_of(otk))
+
+	aad_len, ciphertext_len := len(aad), len(ciphertext)
+
+	// mac_data = aad | pad16(aad)
+	// mac_data |= ciphertext | pad16(ciphertext)
+	// mac_data |= num_to_8_le_bytes(aad.length)
+	// mac_data |= num_to_8_le_bytes(ciphertext.length)
+	poly1305.update(&mac_ctx, aad)
+	_update_mac_pad16(&mac_ctx, aad_len)
+	poly1305.update(&mac_ctx, ciphertext)
+	_update_mac_pad16(&mac_ctx, ciphertext_len)
+	l_buf := otk[0:16] // Reuse the scratch buffer.
+	util.PUT_U64_LE(l_buf[0:8], u64(aad_len))
+	util.PUT_U64_LE(l_buf[8:16], u64(ciphertext_len))
+	poly1305.update(&mac_ctx, l_buf)
+
+	// tag = poly1305_mac(mac_data, otk)
+	derived_tag := otk[0:poly1305.TAG_SIZE] // Reuse the scratch buffer again.
+	poly1305.final(&mac_ctx, derived_tag) // Implicitly sanitizes context.
+
+	// Validate the tag in constant time.
+	if crypto.compare_constant_time(tag, derived_tag) != 1 {
+		// Zero out the plaintext, as a defense in depth measure.
+		mem.zero_explicit(raw_data(plaintext), ciphertext_len)
+		return false
+	}
+
+	// plaintext = chacha20_decrypt(key, 1, nonce, ciphertext)
+	chacha20.seek(&stream_ctx, 1)
+	chacha20.xor_bytes(&stream_ctx, plaintext, ciphertext)
+
+	return true
+}

+ 52 - 0
core/crypto/crypto.odin

@@ -0,0 +1,52 @@
+package crypto
+
+import "core:mem"
+
+// compare_constant_time returns 1 iff a and b are equal, 0 otherwise.
+//
+// The execution time of this routine is constant regardless of the contents
+// of the slices being compared, as long as the length of the slices is equal.
+// If the length of the two slices is different, it will early-return 0.
+compare_constant_time :: proc "contextless" (a, b: []byte) -> int {
+	// If the length of the slices is different, early return.
+	//
+	// This leaks the fact that the slices have a different length,
+	// but the routine is primarily intended for comparing things
+	// like MACS and password digests.
+	n := len(a)
+	if n != len(b) {
+		return 0
+	}
+
+	return compare_byte_ptrs_constant_time(raw_data(a), raw_data(b), n)
+}
+
+// compare_byte_ptrs_constant_time returns 1 iff the bytes pointed to by
+// a and b are equal, 0 otherwise.
+//
+// The execution time of this routine is constant regardless of the
+// contents of the memory being compared.
+compare_byte_ptrs_constant_time :: proc "contextless" (a, b: ^byte, n: int) -> int {
+	x := mem.slice_ptr(a, n)
+	y := mem.slice_ptr(b, n)
+
+	v: byte
+	for i in 0..<n {
+		v |= x[i] ~ y[i]
+	}
+
+	// After the loop, v == 0 iff a == b.  The subtraction will underflow
+	// iff v == 0, setting the sign-bit, which gets returned.
+	return int((u32(v)-1) >> 31)
+}
+
+// rand_bytes fills the dst buffer with cryptographic entropy taken from
+// the system entropy source.  This routine will block if the system entropy
+// source is not ready yet.  All system entropy source failures are treated
+// as catastrophic, resulting in a panic.
+rand_bytes :: proc (dst: []byte) {
+	// zero-fill the buffer first
+	mem.zero_explicit(raw_data(dst), len(dst))
+
+	_rand_bytes(dst)
+}

+ 29 - 7
core/crypto/gost/gost.odin

@@ -18,16 +18,18 @@ import "core:io"
     High level API
     High level API
 */
 */
 
 
+DIGEST_SIZE :: 32
+
 // hash_string will hash the given input and return the
 // hash_string will hash the given input and return the
 // computed hash
 // computed hash
-hash_string :: proc(data: string) -> [32]byte {
+hash_string :: proc(data: string) -> [DIGEST_SIZE]byte {
     return hash_bytes(transmute([]byte)(data))
     return hash_bytes(transmute([]byte)(data))
 }
 }
 
 
 // hash_bytes will hash the given input and return the
 // hash_bytes will hash the given input and return the
 // computed hash
 // computed hash
-hash_bytes :: proc(data: []byte) -> [32]byte {
-    hash: [32]byte
+hash_bytes :: proc(data: []byte) -> [DIGEST_SIZE]byte {
+    hash: [DIGEST_SIZE]byte
     ctx: Gost_Context
     ctx: Gost_Context
     init(&ctx)
     init(&ctx)
     update(&ctx, data)
     update(&ctx, data)
@@ -35,10 +37,28 @@ hash_bytes :: proc(data: []byte) -> [32]byte {
     return hash
     return hash
 }
 }
 
 
+// hash_string_to_buffer will hash the given input and assign the
+// computed hash to the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_string_to_buffer :: proc(data: string, hash: []byte) {
+    hash_bytes_to_buffer(transmute([]byte)(data), hash)
+}
+
+// hash_bytes_to_buffer will hash the given input and write the
+// computed hash into the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_bytes_to_buffer :: proc(data, hash: []byte) {
+    assert(len(hash) >= DIGEST_SIZE, "Size of destination buffer is smaller than the digest size")
+    ctx: Gost_Context
+    init(&ctx)
+    update(&ctx, data)
+    final(&ctx, hash)
+}
+
 // hash_stream will read the stream in chunks and compute a
 // hash_stream will read the stream in chunks and compute a
 // hash from its contents
 // hash from its contents
-hash_stream :: proc(s: io.Stream) -> ([32]byte, bool) {
-    hash: [32]byte
+hash_stream :: proc(s: io.Stream) -> ([DIGEST_SIZE]byte, bool) {
+    hash: [DIGEST_SIZE]byte
     ctx: Gost_Context
     ctx: Gost_Context
     init(&ctx)
     init(&ctx)
     buf := make([]byte, 512)
     buf := make([]byte, 512)
@@ -56,7 +76,7 @@ hash_stream :: proc(s: io.Stream) -> ([32]byte, bool) {
 
 
 // hash_file will read the file provided by the given handle
 // hash_file will read the file provided by the given handle
 // and compute a hash
 // and compute a hash
-hash_file :: proc(hd: os.Handle, load_at_once := false) -> ([32]byte, bool) {
+hash_file :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE]byte, bool) {
     if !load_at_once {
     if !load_at_once {
         return hash_stream(os.stream_from_handle(hd))
         return hash_stream(os.stream_from_handle(hd))
     } else {
     } else {
@@ -64,7 +84,7 @@ hash_file :: proc(hd: os.Handle, load_at_once := false) -> ([32]byte, bool) {
             return hash_bytes(buf[:]), ok
             return hash_bytes(buf[:]), ok
         }
         }
     }
     }
-    return [32]byte{}, false
+    return [DIGEST_SIZE]byte{}, false
 }
 }
 
 
 hash :: proc {
 hash :: proc {
@@ -72,6 +92,8 @@ hash :: proc {
     hash_file,
     hash_file,
     hash_bytes,
     hash_bytes,
     hash_string,
     hash_string,
+    hash_bytes_to_buffer,
+    hash_string_to_buffer,
 }
 }
 
 
 /*
 /*

+ 117 - 28
core/crypto/groestl/groestl.odin

@@ -17,16 +17,21 @@ import "core:io"
     High level API
     High level API
 */
 */
 
 
+DIGEST_SIZE_224 :: 28
+DIGEST_SIZE_256 :: 32
+DIGEST_SIZE_384 :: 48
+DIGEST_SIZE_512 :: 64
+
 // hash_string_224 will hash the given input and return the
 // hash_string_224 will hash the given input and return the
 // computed hash
 // computed hash
-hash_string_224 :: proc(data: string) -> [28]byte {
+hash_string_224 :: proc(data: string) -> [DIGEST_SIZE_224]byte {
     return hash_bytes_224(transmute([]byte)(data))
     return hash_bytes_224(transmute([]byte)(data))
 }
 }
 
 
 // hash_bytes_224 will hash the given input and return the
 // hash_bytes_224 will hash the given input and return the
 // computed hash
 // computed hash
-hash_bytes_224 :: proc(data: []byte) -> [28]byte {
-    hash: [28]byte
+hash_bytes_224 :: proc(data: []byte) -> [DIGEST_SIZE_224]byte {
+    hash: [DIGEST_SIZE_224]byte
     ctx: Groestl_Context
     ctx: Groestl_Context
     ctx.hashbitlen = 224
     ctx.hashbitlen = 224
     init(&ctx)
     init(&ctx)
@@ -35,10 +40,29 @@ hash_bytes_224 :: proc(data: []byte) -> [28]byte {
     return hash
     return hash
 }
 }
 
 
+// hash_string_to_buffer_224 will hash the given input and assign the
+// computed hash to the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_string_to_buffer_224 :: proc(data: string, hash: []byte) {
+    hash_bytes_to_buffer_224(transmute([]byte)(data), hash)
+}
+
+// hash_bytes_to_buffer_224 will hash the given input and write the
+// computed hash into the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_bytes_to_buffer_224 :: proc(data, hash: []byte) {
+    assert(len(hash) >= DIGEST_SIZE_224, "Size of destination buffer is smaller than the digest size")
+    ctx: Groestl_Context
+    ctx.hashbitlen = 224
+    init(&ctx)
+    update(&ctx, data)
+    final(&ctx, hash)
+}
+
 // hash_stream_224 will read the stream in chunks and compute a
 // hash_stream_224 will read the stream in chunks and compute a
 // hash from its contents
 // hash from its contents
-hash_stream_224 :: proc(s: io.Stream) -> ([28]byte, bool) {
-    hash: [28]byte
+hash_stream_224 :: proc(s: io.Stream) -> ([DIGEST_SIZE_224]byte, bool) {
+    hash: [DIGEST_SIZE_224]byte
     ctx: Groestl_Context
     ctx: Groestl_Context
     ctx.hashbitlen = 224
     ctx.hashbitlen = 224
     init(&ctx)
     init(&ctx)
@@ -57,7 +81,7 @@ hash_stream_224 :: proc(s: io.Stream) -> ([28]byte, bool) {
 
 
 // hash_file_224 will read the file provided by the given handle
 // hash_file_224 will read the file provided by the given handle
 // and compute a hash
 // and compute a hash
-hash_file_224 :: proc(hd: os.Handle, load_at_once := false) -> ([28]byte, bool) {
+hash_file_224 :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE_224]byte, bool) {
     if !load_at_once {
     if !load_at_once {
         return hash_stream_224(os.stream_from_handle(hd))
         return hash_stream_224(os.stream_from_handle(hd))
     } else {
     } else {
@@ -65,7 +89,7 @@ hash_file_224 :: proc(hd: os.Handle, load_at_once := false) -> ([28]byte, bool)
             return hash_bytes_224(buf[:]), ok
             return hash_bytes_224(buf[:]), ok
         }
         }
     }
     }
-    return [28]byte{}, false
+    return [DIGEST_SIZE_224]byte{}, false
 }
 }
 
 
 hash_224 :: proc {
 hash_224 :: proc {
@@ -73,18 +97,20 @@ hash_224 :: proc {
     hash_file_224,
     hash_file_224,
     hash_bytes_224,
     hash_bytes_224,
     hash_string_224,
     hash_string_224,
+    hash_bytes_to_buffer_224,
+    hash_string_to_buffer_224,
 }
 }
 
 
 // hash_string_256 will hash the given input and return the
 // hash_string_256 will hash the given input and return the
 // computed hash
 // computed hash
-hash_string_256 :: proc(data: string) -> [32]byte {
+hash_string_256 :: proc(data: string) -> [DIGEST_SIZE_256]byte {
     return hash_bytes_256(transmute([]byte)(data))
     return hash_bytes_256(transmute([]byte)(data))
 }
 }
 
 
 // hash_bytes_256 will hash the given input and return the
 // hash_bytes_256 will hash the given input and return the
 // computed hash
 // computed hash
-hash_bytes_256 :: proc(data: []byte) -> [32]byte {
-    hash: [32]byte
+hash_bytes_256 :: proc(data: []byte) -> [DIGEST_SIZE_256]byte {
+    hash: [DIGEST_SIZE_256]byte
     ctx: Groestl_Context
     ctx: Groestl_Context
     ctx.hashbitlen = 256
     ctx.hashbitlen = 256
     init(&ctx)
     init(&ctx)
@@ -93,10 +119,29 @@ hash_bytes_256 :: proc(data: []byte) -> [32]byte {
     return hash
     return hash
 }
 }
 
 
+// hash_string_to_buffer_256 will hash the given input and assign the
+// computed hash to the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_string_to_buffer_256 :: proc(data: string, hash: []byte) {
+    hash_bytes_to_buffer_256(transmute([]byte)(data), hash)
+}
+
+// hash_bytes_to_buffer_256 will hash the given input and write the
+// computed hash into the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_bytes_to_buffer_256 :: proc(data, hash: []byte) {
+    assert(len(hash) >= DIGEST_SIZE_256, "Size of destination buffer is smaller than the digest size")
+    ctx: Groestl_Context
+    ctx.hashbitlen = 256
+    init(&ctx)
+    update(&ctx, data)
+    final(&ctx, hash)
+}
+
 // hash_stream_256 will read the stream in chunks and compute a
 // hash_stream_256 will read the stream in chunks and compute a
 // hash from its contents
 // hash from its contents
-hash_stream_256 :: proc(s: io.Stream) -> ([32]byte, bool) {
-    hash: [32]byte
+hash_stream_256 :: proc(s: io.Stream) -> ([DIGEST_SIZE_256]byte, bool) {
+    hash: [DIGEST_SIZE_256]byte
     ctx: Groestl_Context
     ctx: Groestl_Context
     ctx.hashbitlen = 256
     ctx.hashbitlen = 256
     init(&ctx)
     init(&ctx)
@@ -115,7 +160,7 @@ hash_stream_256 :: proc(s: io.Stream) -> ([32]byte, bool) {
 
 
 // hash_file_256 will read the file provided by the given handle
 // hash_file_256 will read the file provided by the given handle
 // and compute a hash
 // and compute a hash
-hash_file_256 :: proc(hd: os.Handle, load_at_once := false) -> ([32]byte, bool) {
+hash_file_256 :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE_256]byte, bool) {
     if !load_at_once {
     if !load_at_once {
         return hash_stream_256(os.stream_from_handle(hd))
         return hash_stream_256(os.stream_from_handle(hd))
     } else {
     } else {
@@ -123,7 +168,7 @@ hash_file_256 :: proc(hd: os.Handle, load_at_once := false) -> ([32]byte, bool)
             return hash_bytes_256(buf[:]), ok
             return hash_bytes_256(buf[:]), ok
         }
         }
     }
     }
-    return [32]byte{}, false
+    return [DIGEST_SIZE_256]byte{}, false
 }
 }
 
 
 hash_256 :: proc {
 hash_256 :: proc {
@@ -131,18 +176,20 @@ hash_256 :: proc {
     hash_file_256,
     hash_file_256,
     hash_bytes_256,
     hash_bytes_256,
     hash_string_256,
     hash_string_256,
+    hash_bytes_to_buffer_256,
+    hash_string_to_buffer_256,
 }
 }
 
 
 // hash_string_384 will hash the given input and return the
 // hash_string_384 will hash the given input and return the
 // computed hash
 // computed hash
-hash_string_384 :: proc(data: string) -> [48]byte {
+hash_string_384 :: proc(data: string) -> [DIGEST_SIZE_384]byte {
     return hash_bytes_384(transmute([]byte)(data))
     return hash_bytes_384(transmute([]byte)(data))
 }
 }
 
 
 // hash_bytes_384 will hash the given input and return the
 // hash_bytes_384 will hash the given input and return the
 // computed hash
 // computed hash
-hash_bytes_384 :: proc(data: []byte) -> [48]byte {
-    hash: [48]byte
+hash_bytes_384 :: proc(data: []byte) -> [DIGEST_SIZE_384]byte {
+    hash: [DIGEST_SIZE_384]byte
     ctx: Groestl_Context
     ctx: Groestl_Context
     ctx.hashbitlen = 384
     ctx.hashbitlen = 384
     init(&ctx)
     init(&ctx)
@@ -151,10 +198,29 @@ hash_bytes_384 :: proc(data: []byte) -> [48]byte {
     return hash
     return hash
 }
 }
 
 
+// hash_string_to_buffer_384 will hash the given input and assign the
+// computed hash to the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_string_to_buffer_384 :: proc(data: string, hash: []byte) {
+    hash_bytes_to_buffer_384(transmute([]byte)(data), hash)
+}
+
+// hash_bytes_to_buffer_384 will hash the given input and write the
+// computed hash into the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_bytes_to_buffer_384 :: proc(data, hash: []byte) {
+    assert(len(hash) >= DIGEST_SIZE_384, "Size of destination buffer is smaller than the digest size")
+    ctx: Groestl_Context
+    ctx.hashbitlen = 384
+    init(&ctx)
+    update(&ctx, data)
+    final(&ctx, hash)
+}
+
 // hash_stream_384 will read the stream in chunks and compute a
 // hash_stream_384 will read the stream in chunks and compute a
 // hash from its contents
 // hash from its contents
-hash_stream_384 :: proc(s: io.Stream) -> ([48]byte, bool) {
-    hash: [48]byte
+hash_stream_384 :: proc(s: io.Stream) -> ([DIGEST_SIZE_384]byte, bool) {
+    hash: [DIGEST_SIZE_384]byte
     ctx: Groestl_Context
     ctx: Groestl_Context
     ctx.hashbitlen = 384
     ctx.hashbitlen = 384
     init(&ctx)
     init(&ctx)
@@ -173,7 +239,7 @@ hash_stream_384 :: proc(s: io.Stream) -> ([48]byte, bool) {
 
 
 // hash_file_384 will read the file provided by the given handle
 // hash_file_384 will read the file provided by the given handle
 // and compute a hash
 // and compute a hash
-hash_file_384 :: proc(hd: os.Handle, load_at_once := false) -> ([48]byte, bool) {
+hash_file_384 :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE_384]byte, bool) {
     if !load_at_once {
     if !load_at_once {
         return hash_stream_384(os.stream_from_handle(hd))
         return hash_stream_384(os.stream_from_handle(hd))
     } else {
     } else {
@@ -181,7 +247,7 @@ hash_file_384 :: proc(hd: os.Handle, load_at_once := false) -> ([48]byte, bool)
             return hash_bytes_384(buf[:]), ok
             return hash_bytes_384(buf[:]), ok
         }
         }
     }
     }
-    return [48]byte{}, false
+    return [DIGEST_SIZE_384]byte{}, false
 }
 }
 
 
 hash_384 :: proc {
 hash_384 :: proc {
@@ -189,18 +255,20 @@ hash_384 :: proc {
     hash_file_384,
     hash_file_384,
     hash_bytes_384,
     hash_bytes_384,
     hash_string_384,
     hash_string_384,
+    hash_bytes_to_buffer_384,
+    hash_string_to_buffer_384,
 }
 }
 
 
 // hash_string_512 will hash the given input and return the
 // hash_string_512 will hash the given input and return the
 // computed hash
 // computed hash
-hash_string_512 :: proc(data: string) -> [64]byte {
+hash_string_512 :: proc(data: string) -> [DIGEST_SIZE_512]byte {
     return hash_bytes_512(transmute([]byte)(data))
     return hash_bytes_512(transmute([]byte)(data))
 }
 }
 
 
 // hash_bytes_512 will hash the given input and return the
 // hash_bytes_512 will hash the given input and return the
 // computed hash
 // computed hash
-hash_bytes_512 :: proc(data: []byte) -> [64]byte {
-    hash: [64]byte
+hash_bytes_512 :: proc(data: []byte) -> [DIGEST_SIZE_512]byte {
+    hash: [DIGEST_SIZE_512]byte
     ctx: Groestl_Context
     ctx: Groestl_Context
     ctx.hashbitlen = 512
     ctx.hashbitlen = 512
     init(&ctx)
     init(&ctx)
@@ -209,10 +277,29 @@ hash_bytes_512 :: proc(data: []byte) -> [64]byte {
     return hash
     return hash
 }
 }
 
 
+// hash_string_to_buffer_512 will hash the given input and assign the
+// computed hash to the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_string_to_buffer_512 :: proc(data: string, hash: []byte) {
+    hash_bytes_to_buffer_512(transmute([]byte)(data), hash)
+}
+
+// hash_bytes_to_buffer_512 will hash the given input and write the
+// computed hash into the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_bytes_to_buffer_512 :: proc(data, hash: []byte) {
+    assert(len(hash) >= DIGEST_SIZE_512, "Size of destination buffer is smaller than the digest size")
+    ctx: Groestl_Context
+    ctx.hashbitlen = 512
+    init(&ctx)
+    update(&ctx, data)
+    final(&ctx, hash)
+}
+
 // hash_stream_512 will read the stream in chunks and compute a
 // hash_stream_512 will read the stream in chunks and compute a
 // hash from its contents
 // hash from its contents
-hash_stream_512 :: proc(s: io.Stream) -> ([64]byte, bool) {
-    hash: [64]byte
+hash_stream_512 :: proc(s: io.Stream) -> ([DIGEST_SIZE_512]byte, bool) {
+    hash: [DIGEST_SIZE_512]byte
     ctx: Groestl_Context
     ctx: Groestl_Context
     ctx.hashbitlen = 512
     ctx.hashbitlen = 512
     init(&ctx)
     init(&ctx)
@@ -231,7 +318,7 @@ hash_stream_512 :: proc(s: io.Stream) -> ([64]byte, bool) {
 
 
 // hash_file_512 will read the file provided by the given handle
 // hash_file_512 will read the file provided by the given handle
 // and compute a hash
 // and compute a hash
-hash_file_512 :: proc(hd: os.Handle, load_at_once := false) -> ([64]byte, bool) {
+hash_file_512 :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE_512]byte, bool) {
     if !load_at_once {
     if !load_at_once {
         return hash_stream_512(os.stream_from_handle(hd))
         return hash_stream_512(os.stream_from_handle(hd))
     } else {
     } else {
@@ -239,7 +326,7 @@ hash_file_512 :: proc(hd: os.Handle, load_at_once := false) -> ([64]byte, bool)
             return hash_bytes_512(buf[:]), ok
             return hash_bytes_512(buf[:]), ok
         }
         }
     }
     }
-    return [64]byte{}, false
+    return [DIGEST_SIZE_512]byte{}, false
 }
 }
 
 
 hash_512 :: proc {
 hash_512 :: proc {
@@ -247,6 +334,8 @@ hash_512 :: proc {
     hash_file_512,
     hash_file_512,
     hash_bytes_512,
     hash_bytes_512,
     hash_string_512,
     hash_string_512,
+    hash_bytes_to_buffer_512,
+    hash_string_to_buffer_512,
 }
 }
 
 
 /*
 /*

Plik diff jest za duży
+ 400 - 95
core/crypto/haval/haval.odin


+ 117 - 28
core/crypto/jh/jh.odin

@@ -17,16 +17,21 @@ import "core:io"
     High level API
     High level API
 */
 */
 
 
+DIGEST_SIZE_224 :: 28
+DIGEST_SIZE_256 :: 32
+DIGEST_SIZE_384 :: 48
+DIGEST_SIZE_512 :: 64
+
 // hash_string_224 will hash the given input and return the
 // hash_string_224 will hash the given input and return the
 // computed hash
 // computed hash
-hash_string_224 :: proc(data: string) -> [28]byte {
+hash_string_224 :: proc(data: string) -> [DIGEST_SIZE_224]byte {
     return hash_bytes_224(transmute([]byte)(data))
     return hash_bytes_224(transmute([]byte)(data))
 }
 }
 
 
 // hash_bytes_224 will hash the given input and return the
 // hash_bytes_224 will hash the given input and return the
 // computed hash
 // computed hash
-hash_bytes_224 :: proc(data: []byte) -> [28]byte {
-    hash: [28]byte
+hash_bytes_224 :: proc(data: []byte) -> [DIGEST_SIZE_224]byte {
+    hash: [DIGEST_SIZE_224]byte
     ctx: Jh_Context
     ctx: Jh_Context
     ctx.hashbitlen = 224
     ctx.hashbitlen = 224
     init(&ctx)
     init(&ctx)
@@ -35,10 +40,29 @@ hash_bytes_224 :: proc(data: []byte) -> [28]byte {
     return hash
     return hash
 }
 }
 
 
+// hash_string_to_buffer_224 will hash the given input and assign the
+// computed hash to the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_string_to_buffer_224 :: proc(data: string, hash: []byte) {
+    hash_bytes_to_buffer_224(transmute([]byte)(data), hash)
+}
+
+// hash_bytes_to_buffer_224 will hash the given input and write the
+// computed hash into the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_bytes_to_buffer_224 :: proc(data, hash: []byte) {
+    assert(len(hash) >= DIGEST_SIZE_224, "Size of destination buffer is smaller than the digest size")
+    ctx: Jh_Context
+    ctx.hashbitlen = 224
+    init(&ctx)
+    update(&ctx, data)
+    final(&ctx, hash)
+}
+
 // hash_stream_224 will read the stream in chunks and compute a
 // hash_stream_224 will read the stream in chunks and compute a
 // hash from its contents
 // hash from its contents
-hash_stream_224 :: proc(s: io.Stream) -> ([28]byte, bool) {
-    hash: [28]byte
+hash_stream_224 :: proc(s: io.Stream) -> ([DIGEST_SIZE_224]byte, bool) {
+    hash: [DIGEST_SIZE_224]byte
     ctx: Jh_Context
     ctx: Jh_Context
     ctx.hashbitlen = 224
     ctx.hashbitlen = 224
     init(&ctx)
     init(&ctx)
@@ -57,7 +81,7 @@ hash_stream_224 :: proc(s: io.Stream) -> ([28]byte, bool) {
 
 
 // hash_file_224 will read the file provided by the given handle
 // hash_file_224 will read the file provided by the given handle
 // and compute a hash
 // and compute a hash
-hash_file_224 :: proc(hd: os.Handle, load_at_once := false) -> ([28]byte, bool) {
+hash_file_224 :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE_224]byte, bool) {
     if !load_at_once {
     if !load_at_once {
         return hash_stream_224(os.stream_from_handle(hd))
         return hash_stream_224(os.stream_from_handle(hd))
     } else {
     } else {
@@ -65,7 +89,7 @@ hash_file_224 :: proc(hd: os.Handle, load_at_once := false) -> ([28]byte, bool)
             return hash_bytes_224(buf[:]), ok
             return hash_bytes_224(buf[:]), ok
         }
         }
     }
     }
-    return [28]byte{}, false
+    return [DIGEST_SIZE_224]byte{}, false
 }
 }
 
 
 hash_224 :: proc {
 hash_224 :: proc {
@@ -73,18 +97,20 @@ hash_224 :: proc {
     hash_file_224,
     hash_file_224,
     hash_bytes_224,
     hash_bytes_224,
     hash_string_224,
     hash_string_224,
+    hash_bytes_to_buffer_224,
+    hash_string_to_buffer_224,
 }
 }
 
 
 // hash_string_256 will hash the given input and return the
 // hash_string_256 will hash the given input and return the
 // computed hash
 // computed hash
-hash_string_256 :: proc(data: string) -> [32]byte {
+hash_string_256 :: proc(data: string) -> [DIGEST_SIZE_256]byte {
     return hash_bytes_256(transmute([]byte)(data))
     return hash_bytes_256(transmute([]byte)(data))
 }
 }
 
 
 // hash_bytes_256 will hash the given input and return the
 // hash_bytes_256 will hash the given input and return the
 // computed hash
 // computed hash
-hash_bytes_256 :: proc(data: []byte) -> [32]byte {
-    hash: [32]byte
+hash_bytes_256 :: proc(data: []byte) -> [DIGEST_SIZE_256]byte {
+    hash: [DIGEST_SIZE_256]byte
     ctx: Jh_Context
     ctx: Jh_Context
     ctx.hashbitlen = 256
     ctx.hashbitlen = 256
     init(&ctx)
     init(&ctx)
@@ -93,10 +119,29 @@ hash_bytes_256 :: proc(data: []byte) -> [32]byte {
     return hash
     return hash
 }
 }
 
 
+// hash_string_to_buffer_256 will hash the given input and assign the
+// computed hash to the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_string_to_buffer_256 :: proc(data: string, hash: []byte) {
+    hash_bytes_to_buffer_256(transmute([]byte)(data), hash)
+}
+
+// hash_bytes_to_buffer_256 will hash the given input and write the
+// computed hash into the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_bytes_to_buffer_256 :: proc(data, hash: []byte) {
+    assert(len(hash) >= DIGEST_SIZE_256, "Size of destination buffer is smaller than the digest size")
+    ctx: Jh_Context
+    ctx.hashbitlen = 256
+    init(&ctx)
+    update(&ctx, data)
+    final(&ctx, hash)
+}
+
 // hash_stream_256 will read the stream in chunks and compute a
 // hash_stream_256 will read the stream in chunks and compute a
 // hash from its contents
 // hash from its contents
-hash_stream_256 :: proc(s: io.Stream) -> ([32]byte, bool) {
-    hash: [32]byte
+hash_stream_256 :: proc(s: io.Stream) -> ([DIGEST_SIZE_256]byte, bool) {
+    hash: [DIGEST_SIZE_256]byte
     ctx: Jh_Context
     ctx: Jh_Context
     ctx.hashbitlen = 256
     ctx.hashbitlen = 256
     init(&ctx)
     init(&ctx)
@@ -115,7 +160,7 @@ hash_stream_256 :: proc(s: io.Stream) -> ([32]byte, bool) {
 
 
 // hash_file_256 will read the file provided by the given handle
 // hash_file_256 will read the file provided by the given handle
 // and compute a hash
 // and compute a hash
-hash_file_256 :: proc(hd: os.Handle, load_at_once := false) -> ([32]byte, bool) {
+hash_file_256 :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE_256]byte, bool) {
     if !load_at_once {
     if !load_at_once {
         return hash_stream_256(os.stream_from_handle(hd))
         return hash_stream_256(os.stream_from_handle(hd))
     } else {
     } else {
@@ -123,7 +168,7 @@ hash_file_256 :: proc(hd: os.Handle, load_at_once := false) -> ([32]byte, bool)
             return hash_bytes_256(buf[:]), ok
             return hash_bytes_256(buf[:]), ok
         }
         }
     }
     }
-    return [32]byte{}, false
+    return [DIGEST_SIZE_256]byte{}, false
 }
 }
 
 
 hash_256 :: proc {
 hash_256 :: proc {
@@ -131,18 +176,20 @@ hash_256 :: proc {
     hash_file_256,
     hash_file_256,
     hash_bytes_256,
     hash_bytes_256,
     hash_string_256,
     hash_string_256,
+    hash_bytes_to_buffer_256,
+    hash_string_to_buffer_256,
 }
 }
 
 
 // hash_string_384 will hash the given input and return the
 // hash_string_384 will hash the given input and return the
 // computed hash
 // computed hash
-hash_string_384 :: proc(data: string) -> [48]byte {
+hash_string_384 :: proc(data: string) -> [DIGEST_SIZE_384]byte {
     return hash_bytes_384(transmute([]byte)(data))
     return hash_bytes_384(transmute([]byte)(data))
 }
 }
 
 
 // hash_bytes_384 will hash the given input and return the
 // hash_bytes_384 will hash the given input and return the
 // computed hash
 // computed hash
-hash_bytes_384 :: proc(data: []byte) -> [48]byte {
-    hash: [48]byte
+hash_bytes_384 :: proc(data: []byte) -> [DIGEST_SIZE_384]byte {
+    hash: [DIGEST_SIZE_384]byte
     ctx: Jh_Context
     ctx: Jh_Context
     ctx.hashbitlen = 384
     ctx.hashbitlen = 384
     init(&ctx)
     init(&ctx)
@@ -151,10 +198,29 @@ hash_bytes_384 :: proc(data: []byte) -> [48]byte {
     return hash
     return hash
 }
 }
 
 
+// hash_string_to_buffer_384 will hash the given input and assign the
+// computed hash to the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_string_to_buffer_384 :: proc(data: string, hash: []byte) {
+    hash_bytes_to_buffer_384(transmute([]byte)(data), hash)
+}
+
+// hash_bytes_to_buffer_384 will hash the given input and write the
+// computed hash into the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_bytes_to_buffer_384 :: proc(data, hash: []byte) {
+    assert(len(hash) >= DIGEST_SIZE_384, "Size of destination buffer is smaller than the digest size")
+    ctx: Jh_Context
+    ctx.hashbitlen = 384
+    init(&ctx)
+    update(&ctx, data)
+    final(&ctx, hash)
+}
+
 // hash_stream_384 will read the stream in chunks and compute a
 // hash_stream_384 will read the stream in chunks and compute a
 // hash from its contents
 // hash from its contents
-hash_stream_384 :: proc(s: io.Stream) -> ([48]byte, bool) {
-    hash: [48]byte
+hash_stream_384 :: proc(s: io.Stream) -> ([DIGEST_SIZE_384]byte, bool) {
+    hash: [DIGEST_SIZE_384]byte
     ctx: Jh_Context
     ctx: Jh_Context
     ctx.hashbitlen = 384
     ctx.hashbitlen = 384
     init(&ctx)
     init(&ctx)
@@ -173,7 +239,7 @@ hash_stream_384 :: proc(s: io.Stream) -> ([48]byte, bool) {
 
 
 // hash_file_384 will read the file provided by the given handle
 // hash_file_384 will read the file provided by the given handle
 // and compute a hash
 // and compute a hash
-hash_file_384 :: proc(hd: os.Handle, load_at_once := false) -> ([48]byte, bool) {
+hash_file_384 :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE_384]byte, bool) {
     if !load_at_once {
     if !load_at_once {
         return hash_stream_384(os.stream_from_handle(hd))
         return hash_stream_384(os.stream_from_handle(hd))
     } else {
     } else {
@@ -181,7 +247,7 @@ hash_file_384 :: proc(hd: os.Handle, load_at_once := false) -> ([48]byte, bool)
             return hash_bytes_384(buf[:]), ok
             return hash_bytes_384(buf[:]), ok
         }
         }
     }
     }
-    return [48]byte{}, false
+    return [DIGEST_SIZE_384]byte{}, false
 }
 }
 
 
 hash_384 :: proc {
 hash_384 :: proc {
@@ -189,18 +255,20 @@ hash_384 :: proc {
     hash_file_384,
     hash_file_384,
     hash_bytes_384,
     hash_bytes_384,
     hash_string_384,
     hash_string_384,
+    hash_bytes_to_buffer_384,
+    hash_string_to_buffer_384,
 }
 }
 
 
 // hash_string_512 will hash the given input and return the
 // hash_string_512 will hash the given input and return the
 // computed hash
 // computed hash
-hash_string_512 :: proc(data: string) -> [64]byte {
+hash_string_512 :: proc(data: string) -> [DIGEST_SIZE_512]byte {
     return hash_bytes_512(transmute([]byte)(data))
     return hash_bytes_512(transmute([]byte)(data))
 }
 }
 
 
 // hash_bytes_512 will hash the given input and return the
 // hash_bytes_512 will hash the given input and return the
 // computed hash
 // computed hash
-hash_bytes_512 :: proc(data: []byte) -> [64]byte {
-    hash: [64]byte
+hash_bytes_512 :: proc(data: []byte) -> [DIGEST_SIZE_512]byte {
+    hash: [DIGEST_SIZE_512]byte
     ctx: Jh_Context
     ctx: Jh_Context
     ctx.hashbitlen = 512
     ctx.hashbitlen = 512
     init(&ctx)
     init(&ctx)
@@ -209,10 +277,29 @@ hash_bytes_512 :: proc(data: []byte) -> [64]byte {
     return hash
     return hash
 }
 }
 
 
+// hash_string_to_buffer_512 will hash the given input and assign the
+// computed hash to the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_string_to_buffer_512 :: proc(data: string, hash: []byte) {
+    hash_bytes_to_buffer_512(transmute([]byte)(data), hash)
+}
+
+// hash_bytes_to_buffer_512 will hash the given input and write the
+// computed hash into the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_bytes_to_buffer_512 :: proc(data, hash: []byte) {
+    assert(len(hash) >= DIGEST_SIZE_512, "Size of destination buffer is smaller than the digest size")
+    ctx: Jh_Context
+    ctx.hashbitlen = 512
+    init(&ctx)
+    update(&ctx, data)
+    final(&ctx, hash)
+}
+
 // hash_stream_512 will read the stream in chunks and compute a
 // hash_stream_512 will read the stream in chunks and compute a
 // hash from its contents
 // hash from its contents
-hash_stream_512 :: proc(s: io.Stream) -> ([64]byte, bool) {
-    hash: [64]byte
+hash_stream_512 :: proc(s: io.Stream) -> ([DIGEST_SIZE_512]byte, bool) {
+    hash: [DIGEST_SIZE_512]byte
     ctx: Jh_Context
     ctx: Jh_Context
     ctx.hashbitlen = 512
     ctx.hashbitlen = 512
     init(&ctx)
     init(&ctx)
@@ -231,7 +318,7 @@ hash_stream_512 :: proc(s: io.Stream) -> ([64]byte, bool) {
 
 
 // hash_file_512 will read the file provided by the given handle
 // hash_file_512 will read the file provided by the given handle
 // and compute a hash
 // and compute a hash
-hash_file_512 :: proc(hd: os.Handle, load_at_once := false) -> ([64]byte, bool) {
+hash_file_512 :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE_512]byte, bool) {
     if !load_at_once {
     if !load_at_once {
         return hash_stream_512(os.stream_from_handle(hd))
         return hash_stream_512(os.stream_from_handle(hd))
     } else {
     } else {
@@ -239,7 +326,7 @@ hash_file_512 :: proc(hd: os.Handle, load_at_once := false) -> ([64]byte, bool)
             return hash_bytes_512(buf[:]), ok
             return hash_bytes_512(buf[:]), ok
         }
         }
     }
     }
-    return [64]byte{}, false
+    return [DIGEST_SIZE_512]byte{}, false
 }
 }
 
 
 hash_512 :: proc {
 hash_512 :: proc {
@@ -247,6 +334,8 @@ hash_512 :: proc {
     hash_file_512,
     hash_file_512,
     hash_bytes_512,
     hash_bytes_512,
     hash_string_512,
     hash_string_512,
+    hash_bytes_to_buffer_512,
+    hash_string_to_buffer_512,
 }
 }
 
 
 /*
 /*

+ 130 - 37
core/crypto/keccak/keccak.odin

@@ -21,18 +21,23 @@ import "../_sha3"
     High level API
     High level API
 */
 */
 
 
+DIGEST_SIZE_224 :: 28
+DIGEST_SIZE_256 :: 32
+DIGEST_SIZE_384 :: 48
+DIGEST_SIZE_512 :: 64
+
 // hash_string_224 will hash the given input and return the
 // hash_string_224 will hash the given input and return the
 // computed hash
 // computed hash
-hash_string_224 :: proc(data: string) -> [28]byte {
+hash_string_224 :: proc(data: string) -> [DIGEST_SIZE_224]byte {
     return hash_bytes_224(transmute([]byte)(data))
     return hash_bytes_224(transmute([]byte)(data))
 }
 }
 
 
 // hash_bytes_224 will hash the given input and return the
 // hash_bytes_224 will hash the given input and return the
 // computed hash
 // computed hash
-hash_bytes_224 :: proc(data: []byte) -> [28]byte {
-    hash: [28]byte
+hash_bytes_224 :: proc(data: []byte) -> [DIGEST_SIZE_224]byte {
+    hash: [DIGEST_SIZE_224]byte
     ctx: _sha3.Sha3_Context
     ctx: _sha3.Sha3_Context
-    ctx.mdlen = 28
+    ctx.mdlen = DIGEST_SIZE_224
     ctx.is_keccak = true
     ctx.is_keccak = true
     _sha3.init(&ctx)
     _sha3.init(&ctx)
     _sha3.update(&ctx, data)
     _sha3.update(&ctx, data)
@@ -40,12 +45,32 @@ hash_bytes_224 :: proc(data: []byte) -> [28]byte {
     return hash
     return hash
 }
 }
 
 
+// hash_string_to_buffer_224 will hash the given input and assign the
+// computed hash to the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_string_to_buffer_224 :: proc(data: string, hash: []byte) {
+    hash_bytes_to_buffer_224(transmute([]byte)(data), hash)
+}
+
+// hash_bytes_to_buffer_224 will hash the given input and write the
+// computed hash into the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_bytes_to_buffer_224 :: proc(data, hash: []byte) {
+    assert(len(hash) >= DIGEST_SIZE_224, "Size of destination buffer is smaller than the digest size")
+    ctx: _sha3.Sha3_Context
+    ctx.mdlen = DIGEST_SIZE_224
+    ctx.is_keccak = true
+    _sha3.init(&ctx)
+    _sha3.update(&ctx, data)
+    _sha3.final(&ctx, hash)
+}
+
 // hash_stream_224 will read the stream in chunks and compute a
 // hash_stream_224 will read the stream in chunks and compute a
 // hash from its contents
 // hash from its contents
-hash_stream_224 :: proc(s: io.Stream) -> ([28]byte, bool) {
-    hash: [28]byte
+hash_stream_224 :: proc(s: io.Stream) -> ([DIGEST_SIZE_224]byte, bool) {
+    hash: [DIGEST_SIZE_224]byte
     ctx: _sha3.Sha3_Context
     ctx: _sha3.Sha3_Context
-    ctx.mdlen = 28
+    ctx.mdlen = DIGEST_SIZE_224
     ctx.is_keccak = true
     ctx.is_keccak = true
     _sha3.init(&ctx)
     _sha3.init(&ctx)
     buf := make([]byte, 512)
     buf := make([]byte, 512)
@@ -63,7 +88,7 @@ hash_stream_224 :: proc(s: io.Stream) -> ([28]byte, bool) {
 
 
 // hash_file_224 will read the file provided by the given handle
 // hash_file_224 will read the file provided by the given handle
 // and compute a hash
 // and compute a hash
-hash_file_224 :: proc(hd: os.Handle, load_at_once := false) -> ([28]byte, bool) {
+hash_file_224 :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE_224]byte, bool) {
     if !load_at_once {
     if !load_at_once {
         return hash_stream_224(os.stream_from_handle(hd))
         return hash_stream_224(os.stream_from_handle(hd))
     } else {
     } else {
@@ -71,7 +96,7 @@ hash_file_224 :: proc(hd: os.Handle, load_at_once := false) -> ([28]byte, bool)
             return hash_bytes_224(buf[:]), ok
             return hash_bytes_224(buf[:]), ok
         }
         }
     }
     }
-    return [28]byte{}, false
+    return [DIGEST_SIZE_224]byte{}, false
 }
 }
 
 
 hash_224 :: proc {
 hash_224 :: proc {
@@ -79,20 +104,22 @@ hash_224 :: proc {
     hash_file_224,
     hash_file_224,
     hash_bytes_224,
     hash_bytes_224,
     hash_string_224,
     hash_string_224,
+    hash_bytes_to_buffer_224,
+    hash_string_to_buffer_224,
 }
 }
 
 
 // hash_string_256 will hash the given input and return the
 // hash_string_256 will hash the given input and return the
 // computed hash
 // computed hash
-hash_string_256 :: proc(data: string) -> [32]byte {
+hash_string_256 :: proc(data: string) -> [DIGEST_SIZE_256]byte {
     return hash_bytes_256(transmute([]byte)(data))
     return hash_bytes_256(transmute([]byte)(data))
 }
 }
 
 
 // hash_bytes_256 will hash the given input and return the
 // hash_bytes_256 will hash the given input and return the
 // computed hash
 // computed hash
-hash_bytes_256 :: proc(data: []byte) -> [32]byte {
-    hash: [32]byte
+hash_bytes_256 :: proc(data: []byte) -> [DIGEST_SIZE_256]byte {
+    hash: [DIGEST_SIZE_256]byte
     ctx: _sha3.Sha3_Context
     ctx: _sha3.Sha3_Context
-    ctx.mdlen = 32
+    ctx.mdlen = DIGEST_SIZE_256
     ctx.is_keccak = true
     ctx.is_keccak = true
     _sha3.init(&ctx)
     _sha3.init(&ctx)
     _sha3.update(&ctx, data)
     _sha3.update(&ctx, data)
@@ -100,12 +127,32 @@ hash_bytes_256 :: proc(data: []byte) -> [32]byte {
     return hash
     return hash
 }
 }
 
 
+// hash_string_to_buffer_256 will hash the given input and assign the
+// computed hash to the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_string_to_buffer_256 :: proc(data: string, hash: []byte) {
+    hash_bytes_to_buffer_256(transmute([]byte)(data), hash)
+}
+
+// hash_bytes_to_buffer_256 will hash the given input and write the
+// computed hash into the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_bytes_to_buffer_256 :: proc(data, hash: []byte) {
+    assert(len(hash) >= DIGEST_SIZE_256, "Size of destination buffer is smaller than the digest size")
+    ctx: _sha3.Sha3_Context
+    ctx.mdlen = DIGEST_SIZE_256
+    ctx.is_keccak = true
+    _sha3.init(&ctx)
+    _sha3.update(&ctx, data)
+    _sha3.final(&ctx, hash)
+}
+
 // hash_stream_256 will read the stream in chunks and compute a
 // hash_stream_256 will read the stream in chunks and compute a
 // hash from its contents
 // hash from its contents
-hash_stream_256 :: proc(s: io.Stream) -> ([32]byte, bool) {
-    hash: [32]byte
+hash_stream_256 :: proc(s: io.Stream) -> ([DIGEST_SIZE_256]byte, bool) {
+    hash: [DIGEST_SIZE_256]byte
     ctx: _sha3.Sha3_Context
     ctx: _sha3.Sha3_Context
-    ctx.mdlen = 32
+    ctx.mdlen = DIGEST_SIZE_256
     ctx.is_keccak = true
     ctx.is_keccak = true
     _sha3.init(&ctx)
     _sha3.init(&ctx)
     buf := make([]byte, 512)
     buf := make([]byte, 512)
@@ -123,7 +170,7 @@ hash_stream_256 :: proc(s: io.Stream) -> ([32]byte, bool) {
 
 
 // hash_file_256 will read the file provided by the given handle
 // hash_file_256 will read the file provided by the given handle
 // and compute a hash
 // and compute a hash
-hash_file_256 :: proc(hd: os.Handle, load_at_once := false) -> ([32]byte, bool) {
+hash_file_256 :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE_256]byte, bool) {
     if !load_at_once {
     if !load_at_once {
         return hash_stream_256(os.stream_from_handle(hd))
         return hash_stream_256(os.stream_from_handle(hd))
     } else {
     } else {
@@ -131,7 +178,7 @@ hash_file_256 :: proc(hd: os.Handle, load_at_once := false) -> ([32]byte, bool)
             return hash_bytes_256(buf[:]), ok
             return hash_bytes_256(buf[:]), ok
         }
         }
     }
     }
-    return [32]byte{}, false
+    return [DIGEST_SIZE_256]byte{}, false
 }
 }
 
 
 hash_256 :: proc {
 hash_256 :: proc {
@@ -139,20 +186,22 @@ hash_256 :: proc {
     hash_file_256,
     hash_file_256,
     hash_bytes_256,
     hash_bytes_256,
     hash_string_256,
     hash_string_256,
+    hash_bytes_to_buffer_256,
+    hash_string_to_buffer_256,
 }
 }
 
 
 // hash_string_384 will hash the given input and return the
 // hash_string_384 will hash the given input and return the
 // computed hash
 // computed hash
-hash_string_384 :: proc(data: string) -> [48]byte {
+hash_string_384 :: proc(data: string) -> [DIGEST_SIZE_384]byte {
     return hash_bytes_384(transmute([]byte)(data))
     return hash_bytes_384(transmute([]byte)(data))
 }
 }
 
 
 // hash_bytes_384 will hash the given input and return the
 // hash_bytes_384 will hash the given input and return the
 // computed hash
 // computed hash
-hash_bytes_384 :: proc(data: []byte) -> [48]byte {
-    hash: [48]byte
+hash_bytes_384 :: proc(data: []byte) -> [DIGEST_SIZE_384]byte {
+    hash: [DIGEST_SIZE_384]byte
     ctx: _sha3.Sha3_Context
     ctx: _sha3.Sha3_Context
-    ctx.mdlen = 48
+    ctx.mdlen = DIGEST_SIZE_384
     ctx.is_keccak = true
     ctx.is_keccak = true
     _sha3.init(&ctx)
     _sha3.init(&ctx)
     _sha3.update(&ctx, data)
     _sha3.update(&ctx, data)
@@ -160,12 +209,32 @@ hash_bytes_384 :: proc(data: []byte) -> [48]byte {
     return hash
     return hash
 }
 }
 
 
+// hash_string_to_buffer_384 will hash the given input and assign the
+// computed hash to the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_string_to_buffer_384 :: proc(data: string, hash: []byte) {
+    hash_bytes_to_buffer_384(transmute([]byte)(data), hash)
+}
+
+// hash_bytes_to_buffer_384 will hash the given input and write the
+// computed hash into the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_bytes_to_buffer_384 :: proc(data, hash: []byte) {
+    assert(len(hash) >= DIGEST_SIZE_384, "Size of destination buffer is smaller than the digest size")
+    ctx: _sha3.Sha3_Context
+    ctx.mdlen = DIGEST_SIZE_384
+    ctx.is_keccak = true
+    _sha3.init(&ctx)
+    _sha3.update(&ctx, data)
+    _sha3.final(&ctx, hash)
+}
+
 // hash_stream_384 will read the stream in chunks and compute a
 // hash_stream_384 will read the stream in chunks and compute a
 // hash from its contents
 // hash from its contents
-hash_stream_384 :: proc(s: io.Stream) -> ([48]byte, bool) {
-    hash: [48]byte
+hash_stream_384 :: proc(s: io.Stream) -> ([DIGEST_SIZE_384]byte, bool) {
+    hash: [DIGEST_SIZE_384]byte
     ctx: _sha3.Sha3_Context
     ctx: _sha3.Sha3_Context
-    ctx.mdlen = 48
+    ctx.mdlen = DIGEST_SIZE_384
     ctx.is_keccak = true
     ctx.is_keccak = true
     _sha3.init(&ctx)
     _sha3.init(&ctx)
     buf := make([]byte, 512)
     buf := make([]byte, 512)
@@ -183,7 +252,7 @@ hash_stream_384 :: proc(s: io.Stream) -> ([48]byte, bool) {
 
 
 // hash_file_384 will read the file provided by the given handle
 // hash_file_384 will read the file provided by the given handle
 // and compute a hash
 // and compute a hash
-hash_file_384 :: proc(hd: os.Handle, load_at_once := false) -> ([48]byte, bool) {
+hash_file_384 :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE_384]byte, bool) {
     if !load_at_once {
     if !load_at_once {
         return hash_stream_384(os.stream_from_handle(hd))
         return hash_stream_384(os.stream_from_handle(hd))
     } else {
     } else {
@@ -191,7 +260,7 @@ hash_file_384 :: proc(hd: os.Handle, load_at_once := false) -> ([48]byte, bool)
             return hash_bytes_384(buf[:]), ok
             return hash_bytes_384(buf[:]), ok
         }
         }
     }
     }
-    return [48]byte{}, false
+    return [DIGEST_SIZE_384]byte{}, false
 }
 }
 
 
 hash_384 :: proc {
 hash_384 :: proc {
@@ -199,20 +268,22 @@ hash_384 :: proc {
     hash_file_384,
     hash_file_384,
     hash_bytes_384,
     hash_bytes_384,
     hash_string_384,
     hash_string_384,
+    hash_bytes_to_buffer_384,
+    hash_string_to_buffer_384,
 }
 }
 
 
 // hash_string_512 will hash the given input and return the
 // hash_string_512 will hash the given input and return the
 // computed hash
 // computed hash
-hash_string_512 :: proc(data: string) -> [64]byte {
+hash_string_512 :: proc(data: string) -> [DIGEST_SIZE_512]byte {
     return hash_bytes_512(transmute([]byte)(data))
     return hash_bytes_512(transmute([]byte)(data))
 }
 }
 
 
 // hash_bytes_512 will hash the given input and return the
 // hash_bytes_512 will hash the given input and return the
 // computed hash
 // computed hash
-hash_bytes_512 :: proc(data: []byte) -> [64]byte {
-    hash: [64]byte
+hash_bytes_512 :: proc(data: []byte) -> [DIGEST_SIZE_512]byte {
+    hash: [DIGEST_SIZE_512]byte
     ctx: _sha3.Sha3_Context
     ctx: _sha3.Sha3_Context
-    ctx.mdlen = 64
+    ctx.mdlen = DIGEST_SIZE_512
     ctx.is_keccak = true
     ctx.is_keccak = true
     _sha3.init(&ctx)
     _sha3.init(&ctx)
     _sha3.update(&ctx, data)
     _sha3.update(&ctx, data)
@@ -220,12 +291,32 @@ hash_bytes_512 :: proc(data: []byte) -> [64]byte {
     return hash
     return hash
 }
 }
 
 
+// hash_string_to_buffer_512 will hash the given input and assign the
+// computed hash to the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_string_to_buffer_512 :: proc(data: string, hash: []byte) {
+    hash_bytes_to_buffer_512(transmute([]byte)(data), hash)
+}
+
+// hash_bytes_to_buffer_512 will hash the given input and write the
+// computed hash into the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_bytes_to_buffer_512 :: proc(data, hash: []byte) {
+    assert(len(hash) >= DIGEST_SIZE_512, "Size of destination buffer is smaller than the digest size")
+    ctx: _sha3.Sha3_Context
+    ctx.mdlen = DIGEST_SIZE_512
+    ctx.is_keccak = true
+    _sha3.init(&ctx)
+    _sha3.update(&ctx, data)
+    _sha3.final(&ctx, hash)
+}
+
 // hash_stream_512 will read the stream in chunks and compute a
 // hash_stream_512 will read the stream in chunks and compute a
 // hash from its contents
 // hash from its contents
-hash_stream_512 :: proc(s: io.Stream) -> ([64]byte, bool) {
-    hash: [64]byte
+hash_stream_512 :: proc(s: io.Stream) -> ([DIGEST_SIZE_512]byte, bool) {
+    hash: [DIGEST_SIZE_512]byte
     ctx: _sha3.Sha3_Context
     ctx: _sha3.Sha3_Context
-    ctx.mdlen = 64
+    ctx.mdlen = DIGEST_SIZE_512
     ctx.is_keccak = true
     ctx.is_keccak = true
     _sha3.init(&ctx)
     _sha3.init(&ctx)
     buf := make([]byte, 512)
     buf := make([]byte, 512)
@@ -243,7 +334,7 @@ hash_stream_512 :: proc(s: io.Stream) -> ([64]byte, bool) {
 
 
 // hash_file_512 will read the file provided by the given handle
 // hash_file_512 will read the file provided by the given handle
 // and compute a hash
 // and compute a hash
-hash_file_512 :: proc(hd: os.Handle, load_at_once := false) -> ([64]byte, bool) {
+hash_file_512 :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE_512]byte, bool) {
     if !load_at_once {
     if !load_at_once {
         return hash_stream_512(os.stream_from_handle(hd))
         return hash_stream_512(os.stream_from_handle(hd))
     } else {
     } else {
@@ -251,7 +342,7 @@ hash_file_512 :: proc(hd: os.Handle, load_at_once := false) -> ([64]byte, bool)
             return hash_bytes_512(buf[:]), ok
             return hash_bytes_512(buf[:]), ok
         }
         }
     }
     }
-    return [64]byte{}, false
+    return [DIGEST_SIZE_512]byte{}, false
 }
 }
 
 
 hash_512 :: proc {
 hash_512 :: proc {
@@ -259,13 +350,15 @@ hash_512 :: proc {
     hash_file_512,
     hash_file_512,
     hash_bytes_512,
     hash_bytes_512,
     hash_string_512,
     hash_string_512,
+    hash_bytes_to_buffer_512,
+    hash_string_to_buffer_512,
 }
 }
 
 
 /*
 /*
     Low level API
     Low level API
 */
 */
 
 
-Sha3_Context :: _sha3.Sha3_Context
+Keccak_Context :: _sha3.Sha3_Context
 
 
 init :: proc(ctx: ^_sha3.Sha3_Context) {
 init :: proc(ctx: ^_sha3.Sha3_Context) {
     ctx.is_keccak = true
     ctx.is_keccak = true

+ 43 - 21
core/crypto/md2/md2.odin

@@ -17,16 +17,18 @@ import "core:io"
     High level API
     High level API
 */
 */
 
 
+DIGEST_SIZE :: 16
+
 // hash_string will hash the given input and return the
 // hash_string will hash the given input and return the
 // computed hash
 // computed hash
-hash_string :: proc(data: string) -> [16]byte {
+hash_string :: proc(data: string) -> [DIGEST_SIZE]byte {
     return hash_bytes(transmute([]byte)(data))
     return hash_bytes(transmute([]byte)(data))
 }
 }
 
 
 // hash_bytes will hash the given input and return the
 // hash_bytes will hash the given input and return the
 // computed hash
 // computed hash
-hash_bytes :: proc(data: []byte) -> [16]byte {
-	hash: [16]byte
+hash_bytes :: proc(data: []byte) -> [DIGEST_SIZE]byte {
+	hash: [DIGEST_SIZE]byte
 	ctx: Md2_Context
 	ctx: Md2_Context
     // init(&ctx) No-op
     // init(&ctx) No-op
     update(&ctx, data)
     update(&ctx, data)
@@ -34,10 +36,28 @@ hash_bytes :: proc(data: []byte) -> [16]byte {
     return hash
     return hash
 }
 }
 
 
+// hash_string_to_buffer will hash the given input and assign the
+// computed hash to the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_string_to_buffer :: proc(data: string, hash: []byte) {
+	hash_bytes_to_buffer(transmute([]byte)(data), hash)
+}
+
+// hash_bytes_to_buffer will hash the given input and write the
+// computed hash into the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_bytes_to_buffer :: proc(data, hash: []byte) {
+	assert(len(hash) >= DIGEST_SIZE, "Size of destination buffer is smaller than the digest size")
+    ctx: Md2_Context
+    // init(&ctx) No-op
+    update(&ctx, data)
+    final(&ctx, hash)
+}
+
 // hash_stream will read the stream in chunks and compute a
 // hash_stream will read the stream in chunks and compute a
 // hash from its contents
 // hash from its contents
-hash_stream :: proc(s: io.Stream) -> ([16]byte, bool) {
-	hash: [16]byte
+hash_stream :: proc(s: io.Stream) -> ([DIGEST_SIZE]byte, bool) {
+	hash: [DIGEST_SIZE]byte
 	ctx: Md2_Context
 	ctx: Md2_Context
 	// init(&ctx) No-op
 	// init(&ctx) No-op
 	buf := make([]byte, 512)
 	buf := make([]byte, 512)
@@ -55,7 +75,7 @@ hash_stream :: proc(s: io.Stream) -> ([16]byte, bool) {
 
 
 // hash_file will read the file provided by the given handle
 // hash_file will read the file provided by the given handle
 // and compute a hash
 // and compute a hash
-hash_file :: proc(hd: os.Handle, load_at_once := false) -> ([16]byte, bool) {
+hash_file :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE]byte, bool) {
 	if !load_at_once {
 	if !load_at_once {
         return hash_stream(os.stream_from_handle(hd))
         return hash_stream(os.stream_from_handle(hd))
     } else {
     } else {
@@ -63,7 +83,7 @@ hash_file :: proc(hd: os.Handle, load_at_once := false) -> ([16]byte, bool) {
             return hash_bytes(buf[:]), ok
             return hash_bytes(buf[:]), ok
         }
         }
     }
     }
-    return [16]byte{}, false
+    return [DIGEST_SIZE]byte{}, false
 }
 }
 
 
 hash :: proc {
 hash :: proc {
@@ -71,6 +91,8 @@ hash :: proc {
     hash_file,
     hash_file,
     hash_bytes,
     hash_bytes,
     hash_string,
     hash_string,
+    hash_bytes_to_buffer,
+    hash_string_to_buffer,
 }
 }
 
 
 /*
 /*
@@ -86,7 +108,7 @@ update :: proc(ctx: ^Md2_Context, data: []byte) {
 	for i := 0; i < len(data); i += 1 {
 	for i := 0; i < len(data); i += 1 {
 		ctx.data[ctx.datalen] = data[i]
 		ctx.data[ctx.datalen] = data[i]
 		ctx.datalen += 1
 		ctx.datalen += 1
-		if (ctx.datalen == 16) {
+		if (ctx.datalen == DIGEST_SIZE) {
 			transform(ctx, ctx.data[:])
 			transform(ctx, ctx.data[:])
 			ctx.datalen = 0
 			ctx.datalen = 0
 		}
 		}
@@ -94,14 +116,14 @@ update :: proc(ctx: ^Md2_Context, data: []byte) {
 }
 }
 
 
 final :: proc(ctx: ^Md2_Context, hash: []byte) {
 final :: proc(ctx: ^Md2_Context, hash: []byte) {
-	to_pad := byte(16 - ctx.datalen)
-    for ctx.datalen < 16 {
+	to_pad := byte(DIGEST_SIZE - ctx.datalen)
+    for ctx.datalen < DIGEST_SIZE {
         ctx.data[ctx.datalen] = to_pad
         ctx.data[ctx.datalen] = to_pad
 		ctx.datalen += 1
 		ctx.datalen += 1
     }
     }
 	transform(ctx, ctx.data[:])
 	transform(ctx, ctx.data[:])
 	transform(ctx, ctx.checksum[:])
 	transform(ctx, ctx.checksum[:])
-    for i := 0; i < 16; i += 1 {
+    for i := 0; i < DIGEST_SIZE; i += 1 {
         hash[i] = ctx.state[i]
         hash[i] = ctx.state[i]
     }
     }
 }
 }
@@ -111,9 +133,9 @@ final :: proc(ctx: ^Md2_Context, hash: []byte) {
 */
 */
 
 
 Md2_Context :: struct {
 Md2_Context :: struct {
-    data:     [16]byte,
-    state:    [16 * 3]byte,
-    checksum: [16]byte,
+    data:     [DIGEST_SIZE]byte,
+    state:    [DIGEST_SIZE * 3]byte,
+    checksum: [DIGEST_SIZE]byte,
     datalen:  int,
     datalen:  int,
 }
 }
 
 
@@ -140,20 +162,20 @@ PI_TABLE := [?]byte {
 
 
 transform :: proc(ctx: ^Md2_Context, data: []byte) {
 transform :: proc(ctx: ^Md2_Context, data: []byte) {
     j,k,t: byte
     j,k,t: byte
-	for j = 0; j < 16; j += 1 {
-		ctx.state[j + 16] = data[j]
-		ctx.state[j + 16 * 2] = (ctx.state[j + 16] ~ ctx.state[j])
+	for j = 0; j < DIGEST_SIZE; j += 1 {
+		ctx.state[j + DIGEST_SIZE] = data[j]
+		ctx.state[j + DIGEST_SIZE * 2] = (ctx.state[j + DIGEST_SIZE] ~ ctx.state[j])
 	}
 	}
 	t = 0
 	t = 0
-	for j = 0; j < 16 + 2; j += 1 {
-		for k = 0; k < 16 * 3; k += 1 {
+	for j = 0; j < DIGEST_SIZE + 2; j += 1 {
+		for k = 0; k < DIGEST_SIZE * 3; k += 1 {
 			ctx.state[k] ~= PI_TABLE[t]
 			ctx.state[k] ~= PI_TABLE[t]
 			t = ctx.state[k]
 			t = ctx.state[k]
 		}
 		}
 		t = (t + j) & 0xff
 		t = (t + j) & 0xff
 	}
 	}
-	t = ctx.checksum[16 - 1]
-	for j = 0; j < 16; j += 1 {
+	t = ctx.checksum[DIGEST_SIZE - 1]
+	for j = 0; j < DIGEST_SIZE; j += 1 {
 		ctx.checksum[j] ~= PI_TABLE[data[j] ~ t]
 		ctx.checksum[j] ~= PI_TABLE[data[j] ~ t]
 		t = ctx.checksum[j]
 		t = ctx.checksum[j]
 	}
 	}

+ 31 - 9
core/crypto/md4/md4.odin

@@ -21,16 +21,18 @@ import "../util"
     High level API
     High level API
 */
 */
 
 
+DIGEST_SIZE :: 16
+
 // hash_string will hash the given input and return the
 // hash_string will hash the given input and return the
 // computed hash
 // computed hash
-hash_string :: proc(data: string) -> [16]byte {
+hash_string :: proc(data: string) -> [DIGEST_SIZE]byte {
     return hash_bytes(transmute([]byte)(data))
     return hash_bytes(transmute([]byte)(data))
 }
 }
 
 
 // hash_bytes will hash the given input and return the
 // hash_bytes will hash the given input and return the
 // computed hash
 // computed hash
-hash_bytes :: proc(data: []byte) -> [16]byte {
-    hash: [16]byte
+hash_bytes :: proc(data: []byte) -> [DIGEST_SIZE]byte {
+    hash: [DIGEST_SIZE]byte
     ctx: Md4_Context
     ctx: Md4_Context
     init(&ctx)
     init(&ctx)
     update(&ctx, data)
     update(&ctx, data)
@@ -38,10 +40,28 @@ hash_bytes :: proc(data: []byte) -> [16]byte {
     return hash
     return hash
 }
 }
 
 
+// hash_string_to_buffer will hash the given input and assign the
+// computed hash to the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_string_to_buffer :: proc(data: string, hash: []byte) {
+    hash_bytes_to_buffer(transmute([]byte)(data), hash)
+}
+
+// hash_bytes_to_buffer will hash the given input and write the
+// computed hash into the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_bytes_to_buffer :: proc(data, hash: []byte) {
+    assert(len(hash) >= DIGEST_SIZE, "Size of destination buffer is smaller than the digest size")
+    ctx: Md4_Context
+    init(&ctx)
+    update(&ctx, data)
+    final(&ctx, hash)
+}
+
 // hash_stream will read the stream in chunks and compute a
 // hash_stream will read the stream in chunks and compute a
 // hash from its contents
 // hash from its contents
-hash_stream :: proc(s: io.Stream) -> ([16]byte, bool) {
-    hash: [16]byte
+hash_stream :: proc(s: io.Stream) -> ([DIGEST_SIZE]byte, bool) {
+    hash: [DIGEST_SIZE]byte
     ctx: Md4_Context
     ctx: Md4_Context
     init(&ctx)
     init(&ctx)
     buf := make([]byte, 512)
     buf := make([]byte, 512)
@@ -59,7 +79,7 @@ hash_stream :: proc(s: io.Stream) -> ([16]byte, bool) {
 
 
 // hash_file will read the file provided by the given handle
 // hash_file will read the file provided by the given handle
 // and compute a hash
 // and compute a hash
-hash_file :: proc(hd: os.Handle, load_at_once := false) -> ([16]byte, bool) {
+hash_file :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE]byte, bool) {
     if !load_at_once {
     if !load_at_once {
         return hash_stream(os.stream_from_handle(hd))
         return hash_stream(os.stream_from_handle(hd))
     } else {
     } else {
@@ -67,7 +87,7 @@ hash_file :: proc(hd: os.Handle, load_at_once := false) -> ([16]byte, bool) {
             return hash_bytes(buf[:]), ok
             return hash_bytes(buf[:]), ok
         }
         }
     }
     }
-    return [16]byte{}, false
+    return [DIGEST_SIZE]byte{}, false
 }
 }
 
 
 hash :: proc {
 hash :: proc {
@@ -75,6 +95,8 @@ hash :: proc {
     hash_file,
     hash_file,
     hash_bytes,
     hash_bytes,
     hash_string,
     hash_string,
+    hash_bytes_to_buffer,
+    hash_string_to_buffer,
 }
 }
 
 
 /*
 /*
@@ -171,9 +193,9 @@ HH :: #force_inline proc "contextless"(a, b, c, d, x: u32, s : int) -> u32 {
 
 
 transform :: proc(ctx: ^Md4_Context, data: []byte) {
 transform :: proc(ctx: ^Md4_Context, data: []byte) {
     a, b, c, d, i, j: u32
     a, b, c, d, i, j: u32
-    m: [16]u32
+    m: [DIGEST_SIZE]u32
 
 
-    for i, j = 0, 0; i < 16; i += 1 {
+    for i, j = 0, 0; i < DIGEST_SIZE; i += 1 {
         m[i] = u32(data[j]) | (u32(data[j + 1]) << 8) | (u32(data[j + 2]) << 16) | (u32(data[j + 3]) << 24)
         m[i] = u32(data[j]) | (u32(data[j + 1]) << 8) | (u32(data[j + 2]) << 16) | (u32(data[j + 3]) << 24)
         j += 4
         j += 4
     }
     }

+ 31 - 9
core/crypto/md5/md5.odin

@@ -20,16 +20,18 @@ import "../util"
     High level API
     High level API
 */
 */
 
 
+DIGEST_SIZE :: 16
+
 // hash_string will hash the given input and return the
 // hash_string will hash the given input and return the
 // computed hash
 // computed hash
-hash_string :: proc(data: string) -> [16]byte {
+hash_string :: proc(data: string) -> [DIGEST_SIZE]byte {
     return hash_bytes(transmute([]byte)(data))
     return hash_bytes(transmute([]byte)(data))
 }
 }
 
 
 // hash_bytes will hash the given input and return the
 // hash_bytes will hash the given input and return the
 // computed hash
 // computed hash
-hash_bytes :: proc(data: []byte) -> [16]byte {
-    hash: [16]byte
+hash_bytes :: proc(data: []byte) -> [DIGEST_SIZE]byte {
+    hash: [DIGEST_SIZE]byte
     ctx: Md5_Context
     ctx: Md5_Context
     init(&ctx)
     init(&ctx)
     update(&ctx, data)
     update(&ctx, data)
@@ -37,10 +39,28 @@ hash_bytes :: proc(data: []byte) -> [16]byte {
     return hash
     return hash
 }
 }
 
 
+// hash_string_to_buffer will hash the given input and assign the
+// computed hash to the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_string_to_buffer :: proc(data: string, hash: []byte) {
+    hash_bytes_to_buffer(transmute([]byte)(data), hash)
+}
+
+// hash_bytes_to_buffer will hash the given input and write the
+// computed hash into the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_bytes_to_buffer :: proc(data, hash: []byte) {
+    assert(len(hash) >= DIGEST_SIZE, "Size of destination buffer is smaller than the digest size")
+    ctx: Md5_Context
+    init(&ctx)
+    update(&ctx, data)
+    final(&ctx, hash)
+}
+
 // hash_stream will read the stream in chunks and compute a
 // hash_stream will read the stream in chunks and compute a
 // hash from its contents
 // hash from its contents
-hash_stream :: proc(s: io.Stream) -> ([16]byte, bool) {
-    hash: [16]byte
+hash_stream :: proc(s: io.Stream) -> ([DIGEST_SIZE]byte, bool) {
+    hash: [DIGEST_SIZE]byte
     ctx: Md5_Context
     ctx: Md5_Context
     init(&ctx)
     init(&ctx)
     buf := make([]byte, 512)
     buf := make([]byte, 512)
@@ -58,7 +78,7 @@ hash_stream :: proc(s: io.Stream) -> ([16]byte, bool) {
 
 
 // hash_file will read the file provided by the given handle
 // hash_file will read the file provided by the given handle
 // and compute a hash
 // and compute a hash
-hash_file :: proc(hd: os.Handle, load_at_once := false) -> ([16]byte, bool) {
+hash_file :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE]byte, bool) {
     if !load_at_once {
     if !load_at_once {
         return hash_stream(os.stream_from_handle(hd))
         return hash_stream(os.stream_from_handle(hd))
     } else {
     } else {
@@ -66,7 +86,7 @@ hash_file :: proc(hd: os.Handle, load_at_once := false) -> ([16]byte, bool) {
             return hash_bytes(buf[:]), ok
             return hash_bytes(buf[:]), ok
         }
         }
     }
     }
-    return [16]byte{}, false
+    return [DIGEST_SIZE]byte{}, false
 }
 }
 
 
 hash :: proc {
 hash :: proc {
@@ -74,6 +94,8 @@ hash :: proc {
     hash_file,
     hash_file,
     hash_bytes,
     hash_bytes,
     hash_string,
     hash_string,
+    hash_bytes_to_buffer,
+    hash_string_to_buffer,
 }
 }
 
 
 /*
 /*
@@ -176,9 +198,9 @@ II :: #force_inline proc "contextless" (a, b, c, d, m: u32, s: int, t: u32) -> u
 
 
 transform :: proc(ctx: ^Md5_Context, data: []byte) {
 transform :: proc(ctx: ^Md5_Context, data: []byte) {
     i, j: u32
     i, j: u32
-    m: [16]u32
+    m: [DIGEST_SIZE]u32
 
 
-    for i, j = 0, 0; i < 16; i+=1 {
+    for i, j = 0, 0; i < DIGEST_SIZE; i+=1 {
         m[i] = u32(data[j]) + u32(data[j + 1]) << 8 + u32(data[j + 2]) << 16 + u32(data[j + 3]) << 24
         m[i] = u32(data[j]) + u32(data[j + 1]) << 8 + u32(data[j + 2]) << 16 + u32(data[j + 3]) << 24
         j += 4
         j += 4
     }
     }

+ 163 - 0
core/crypto/poly1305/poly1305.odin

@@ -0,0 +1,163 @@
+package poly1305
+
+import "core:crypto"
+import "core:crypto/util"
+import field "core:crypto/_fiat/field_poly1305"
+import "core:mem"
+
+KEY_SIZE :: 32
+TAG_SIZE :: 16
+
+_BLOCK_SIZE :: 16
+
+sum :: proc (dst, msg, key: []byte) {
+	ctx: Context = ---
+
+	init(&ctx, key)
+	update(&ctx, msg)
+	final(&ctx, dst)
+}
+
+verify :: proc (tag, msg, key: []byte) -> bool {
+	ctx: Context = ---
+	derived_tag: [16]byte = ---
+
+	if len(tag) != TAG_SIZE {
+		panic("crypto/poly1305: invalid tag size")
+	}
+
+	init(&ctx, key)
+	update(&ctx, msg)
+	final(&ctx, derived_tag[:])
+
+	return crypto.compare_constant_time(derived_tag[:], tag) == 1
+}
+
+Context :: struct {
+	_r: field.Tight_Field_Element,
+	_a: field.Tight_Field_Element,
+	_s: field.Tight_Field_Element,
+
+	_buffer: [_BLOCK_SIZE]byte,
+	_leftover: int,
+
+	_is_initialized: bool,
+}
+
+init :: proc (ctx: ^Context, key: []byte) {
+	if len(key) != KEY_SIZE {
+		panic("crypto/poly1305: invalid key size")
+	}
+
+	// r = le_bytes_to_num(key[0..15])
+	// r = clamp(r) (r &= 0xffffffc0ffffffc0ffffffc0fffffff)
+	tmp_lo := util.U64_LE(key[0:8]) & 0x0ffffffc0fffffff
+	tmp_hi := util.U64_LE(key[8:16]) & 0xffffffc0ffffffc
+	field.fe_from_u64s(&ctx._r, tmp_lo, tmp_hi)
+
+	// s = le_bytes_to_num(key[16..31])
+	field.fe_from_bytes(&ctx._s, key[16:32], 0)
+
+	// a = 0
+	field.fe_zero(&ctx._a)
+
+	// No leftover in buffer
+	ctx._leftover = 0
+
+	ctx._is_initialized = true
+}
+
+update :: proc (ctx: ^Context, data: []byte) {
+	assert(ctx._is_initialized)
+
+	msg := data
+	msg_len := len(data)
+
+	// Handle leftover
+	if ctx._leftover > 0 {
+		want := min(_BLOCK_SIZE - ctx._leftover, msg_len)
+		copy_slice(ctx._buffer[ctx._leftover:], msg[:want])
+		msg_len = msg_len - want
+		msg = msg[want:]
+		ctx._leftover = ctx._leftover + want
+		if ctx._leftover < _BLOCK_SIZE {
+			return
+		}
+		_blocks(ctx, ctx._buffer[:])
+		ctx._leftover = 0
+	}
+
+	// Process full blocks
+	if msg_len >= _BLOCK_SIZE {
+		want := msg_len & (~int(_BLOCK_SIZE - 1))
+		_blocks(ctx, msg[:want])
+		msg = msg[want:]
+		msg_len = msg_len - want
+	}
+
+	// Store leftover
+	if msg_len > 0 {
+		// TODO: While -donna does it this way, I'm fairly sure that
+		// `ctx._leftover == 0` is an invariant at this point.
+		copy(ctx._buffer[ctx._leftover:], msg)
+		ctx._leftover = ctx._leftover + msg_len
+	}
+}
+
+final :: proc (ctx: ^Context, dst: []byte) {
+	assert(ctx._is_initialized)
+
+	if len(dst) != TAG_SIZE {
+		panic("poly1305: invalid destination tag size")
+	}
+
+	// Process remaining block
+	if ctx._leftover > 0 {
+		ctx._buffer[ctx._leftover] = 1
+		for i := ctx._leftover + 1; i < _BLOCK_SIZE; i = i + 1 {
+			ctx._buffer[i] = 0
+		}
+		_blocks(ctx, ctx._buffer[:], true)
+	}
+
+	// a += s
+	field.fe_add(field.fe_relax_cast(&ctx._a), &ctx._a, &ctx._s) // _a unreduced
+	field.fe_carry(&ctx._a, field.fe_relax_cast(&ctx._a)) // _a reduced
+
+	// return num_to_16_le_bytes(a)
+	tmp: [32]byte = ---
+	field.fe_to_bytes(&tmp, &ctx._a)
+	copy_slice(dst, tmp[0:16])
+
+	reset(ctx)
+}
+
+reset :: proc (ctx: ^Context) {
+	mem.zero_explicit(&ctx._r, size_of(ctx._r))
+	mem.zero_explicit(&ctx._a, size_of(ctx._a))
+	mem.zero_explicit(&ctx._s, size_of(ctx._s))
+	mem.zero_explicit(&ctx._buffer, size_of(ctx._buffer))
+
+	ctx._is_initialized = false
+}
+
+_blocks :: proc (ctx: ^Context, msg: []byte, final := false) {
+	n: field.Tight_Field_Element = ---
+	final_byte := byte(!final)
+
+	data := msg
+	data_len := len(data)
+	for data_len >= _BLOCK_SIZE {
+		// n = le_bytes_to_num(msg[((i-1)*16)..*i*16] | [0x01])
+		field.fe_from_bytes(&n, data[:_BLOCK_SIZE], final_byte, false)
+
+		// a += n
+		field.fe_add(field.fe_relax_cast(&ctx._a), &ctx._a, &n) // _a unreduced
+
+		// a = (r * a) % p
+		field.fe_carry_mul(&ctx._a, field.fe_relax_cast(&ctx._a), field.fe_relax_cast(&ctx._r)) // _a reduced
+
+		data = data[_BLOCK_SIZE:]
+		data_len = data_len - _BLOCK_SIZE
+	}
+}

+ 7 - 0
core/crypto/rand_generic.odin

@@ -0,0 +1,7 @@
+package crypto
+
+when ODIN_OS != .Linux && ODIN_OS != .OpenBSD && ODIN_OS != .Windows {
+	_rand_bytes :: proc (dst: []byte) {
+		unimplemented("crypto: rand_bytes not supported on this OS")
+	}
+}

+ 37 - 0
core/crypto/rand_linux.odin

@@ -0,0 +1,37 @@
+package crypto
+
+import "core:fmt"
+import "core:os"
+import "core:sys/unix"
+
+_MAX_PER_CALL_BYTES :: 33554431 // 2^25 - 1
+
+_rand_bytes :: proc (dst: []byte) {
+	dst := dst
+	l := len(dst)
+
+	for l > 0 {
+		to_read := min(l, _MAX_PER_CALL_BYTES)
+		ret := unix.sys_getrandom(raw_data(dst), to_read, 0)
+		if ret < 0 {
+			switch os.Errno(-ret) {
+			case os.EINTR:
+				// Call interupted by a signal handler, just retry the
+				// request.
+				continue
+			case os.ENOSYS:
+				// The kernel is apparently prehistoric (< 3.17 circa 2014)
+				// and does not support getrandom.
+				panic("crypto: getrandom not available in kernel")
+			case:
+				// All other failures are things that should NEVER happen
+				// unless the kernel interface changes (ie: the Linux
+				// developers break userland).
+				panic(fmt.tprintf("crypto: getrandom failed: %d", ret))
+			}
+		}
+
+		l -= ret
+		dst = dst[ret:]
+	}
+}

+ 12 - 0
core/crypto/rand_openbsd.odin

@@ -0,0 +1,12 @@
+package crypto
+
+import "core:c"
+
+foreign import libc "system:c"
+foreign libc {
+	arc4random_buf :: proc "c" (buf: rawptr, nbytes: c.size_t) ---
+}
+
+_rand_bytes :: proc (dst: []byte) {
+	arc4random_buf(raw_data(dst), len(dst))
+}

+ 23 - 0
core/crypto/rand_windows.odin

@@ -0,0 +1,23 @@
+package crypto
+
+import win32 "core:sys/windows"
+import "core:os"
+import "core:fmt"
+
+_rand_bytes :: proc(dst: []byte) {
+	ret := (os.Errno)(win32.BCryptGenRandom(nil, raw_data(dst), u32(len(dst)), win32.BCRYPT_USE_SYSTEM_PREFERRED_RNG))
+	if ret != os.ERROR_NONE {
+		switch ret {
+			case os.ERROR_INVALID_HANDLE:
+				// The handle to the first parameter is invalid.
+				// This should not happen here, since we explicitly pass nil to it
+				panic("crypto: BCryptGenRandom Invalid handle for hAlgorithm")
+			case os.ERROR_INVALID_PARAMETER:
+				// One of the parameters was invalid
+				panic("crypto: BCryptGenRandom Invalid parameter")
+			case:
+				// Unknown error
+				panic(fmt.tprintf("crypto: BCryptGenRandom failed: %d\n", ret))
+		}
+	}
+}

+ 113 - 28
core/crypto/ripemd/ripemd.odin

@@ -19,16 +19,21 @@ import "../util"
     High level API
     High level API
 */
 */
 
 
+DIGEST_SIZE_128 :: 16
+DIGEST_SIZE_160 :: 20
+DIGEST_SIZE_256 :: 32
+DIGEST_SIZE_320 :: 40
+
 // hash_string_128 will hash the given input and return the
 // hash_string_128 will hash the given input and return the
 // computed hash
 // computed hash
-hash_string_128 :: proc(data: string) -> [16]byte {
+hash_string_128 :: proc(data: string) -> [DIGEST_SIZE_128]byte {
     return hash_bytes_128(transmute([]byte)(data))
     return hash_bytes_128(transmute([]byte)(data))
 }
 }
 
 
 // hash_bytes_128 will hash the given input and return the
 // hash_bytes_128 will hash the given input and return the
 // computed hash
 // computed hash
-hash_bytes_128 :: proc(data: []byte) -> [16]byte {
-    hash: [16]byte
+hash_bytes_128 :: proc(data: []byte) -> [DIGEST_SIZE_128]byte {
+    hash: [DIGEST_SIZE_128]byte
     ctx: Ripemd128_Context
     ctx: Ripemd128_Context
     init(&ctx)
     init(&ctx)
     update(&ctx, data)
     update(&ctx, data)
@@ -36,10 +41,28 @@ hash_bytes_128 :: proc(data: []byte) -> [16]byte {
     return hash
     return hash
 }
 }
 
 
+// hash_string_to_buffer_128 will hash the given input and assign the
+// computed hash to the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_string_to_buffer_128 :: proc(data: string, hash: []byte) {
+    hash_bytes_to_buffer_128(transmute([]byte)(data), hash)
+}
+
+// hash_bytes_to_buffer_128 will hash the given input and write the
+// computed hash into the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_bytes_to_buffer_128 :: proc(data, hash: []byte) {
+    assert(len(hash) >= DIGEST_SIZE_128, "Size of destination buffer is smaller than the digest size")
+    ctx: Ripemd128_Context
+    init(&ctx)
+    update(&ctx, data)
+    final(&ctx, hash)
+}
+
 // hash_stream_128 will read the stream in chunks and compute a
 // hash_stream_128 will read the stream in chunks and compute a
 // hash from its contents
 // hash from its contents
-hash_stream_128 :: proc(s: io.Stream) -> ([16]byte, bool) {
-    hash: [16]byte
+hash_stream_128 :: proc(s: io.Stream) -> ([DIGEST_SIZE_128]byte, bool) {
+    hash: [DIGEST_SIZE_128]byte
     ctx: Ripemd128_Context
     ctx: Ripemd128_Context
     init(&ctx)
     init(&ctx)
     buf := make([]byte, 512)
     buf := make([]byte, 512)
@@ -57,7 +80,7 @@ hash_stream_128 :: proc(s: io.Stream) -> ([16]byte, bool) {
 
 
 // hash_file_128 will read the file provided by the given handle
 // hash_file_128 will read the file provided by the given handle
 // and compute a hash
 // and compute a hash
-hash_file_128 :: proc(hd: os.Handle, load_at_once := false) -> ([16]byte, bool) {
+hash_file_128 :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE_128]byte, bool) {
     if !load_at_once {
     if !load_at_once {
         return hash_stream_128(os.stream_from_handle(hd))
         return hash_stream_128(os.stream_from_handle(hd))
     } else {
     } else {
@@ -65,7 +88,7 @@ hash_file_128 :: proc(hd: os.Handle, load_at_once := false) -> ([16]byte, bool)
             return hash_bytes_128(buf[:]), ok
             return hash_bytes_128(buf[:]), ok
         }
         }
     }
     }
-    return [16]byte{}, false
+    return [DIGEST_SIZE_128]byte{}, false
 }
 }
 
 
 hash_128 :: proc {
 hash_128 :: proc {
@@ -73,18 +96,20 @@ hash_128 :: proc {
     hash_file_128,
     hash_file_128,
     hash_bytes_128,
     hash_bytes_128,
     hash_string_128,
     hash_string_128,
+    hash_bytes_to_buffer_128,
+    hash_string_to_buffer_128,
 }
 }
 
 
 // hash_string_160 will hash the given input and return the
 // hash_string_160 will hash the given input and return the
 // computed hash
 // computed hash
-hash_string_160 :: proc(data: string) -> [20]byte {
+hash_string_160 :: proc(data: string) -> [DIGEST_SIZE_160]byte {
     return hash_bytes_160(transmute([]byte)(data))
     return hash_bytes_160(transmute([]byte)(data))
 }
 }
 
 
 // hash_bytes_160 will hash the given input and return the
 // hash_bytes_160 will hash the given input and return the
 // computed hash
 // computed hash
-hash_bytes_160 :: proc(data: []byte) -> [20]byte {
-    hash: [20]byte
+hash_bytes_160 :: proc(data: []byte) -> [DIGEST_SIZE_160]byte {
+    hash: [DIGEST_SIZE_160]byte
     ctx: Ripemd160_Context
     ctx: Ripemd160_Context
     init(&ctx)
     init(&ctx)
     update(&ctx, data)
     update(&ctx, data)
@@ -92,10 +117,28 @@ hash_bytes_160 :: proc(data: []byte) -> [20]byte {
     return hash
     return hash
 }
 }
 
 
+// hash_string_to_buffer_160 will hash the given input and assign the
+// computed hash to the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_string_to_buffer_160 :: proc(data: string, hash: []byte) {
+    hash_bytes_to_buffer_160(transmute([]byte)(data), hash)
+}
+
+// hash_bytes_to_buffer_160 will hash the given input and write the
+// computed hash into the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_bytes_to_buffer_160 :: proc(data, hash: []byte) {
+    assert(len(hash) >= DIGEST_SIZE_160, "Size of destination buffer is smaller than the digest size")
+    ctx: Ripemd160_Context
+    init(&ctx)
+    update(&ctx, data)
+    final(&ctx, hash)
+}
+
 // hash_stream_160 will read the stream in chunks and compute a
 // hash_stream_160 will read the stream in chunks and compute a
 // hash from its contents
 // hash from its contents
-hash_stream_160 :: proc(s: io.Stream) -> ([20]byte, bool) {
-    hash: [20]byte
+hash_stream_160 :: proc(s: io.Stream) -> ([DIGEST_SIZE_160]byte, bool) {
+    hash: [DIGEST_SIZE_160]byte
     ctx: Ripemd160_Context
     ctx: Ripemd160_Context
     init(&ctx)
     init(&ctx)
     buf := make([]byte, 512)
     buf := make([]byte, 512)
@@ -113,7 +156,7 @@ hash_stream_160 :: proc(s: io.Stream) -> ([20]byte, bool) {
 
 
 // hash_file_160 will read the file provided by the given handle
 // hash_file_160 will read the file provided by the given handle
 // and compute a hash
 // and compute a hash
-hash_file_160 :: proc(hd: os.Handle, load_at_once := false) -> ([20]byte, bool) {
+hash_file_160 :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE_160]byte, bool) {
     if !load_at_once {
     if !load_at_once {
         return hash_stream_160(os.stream_from_handle(hd))
         return hash_stream_160(os.stream_from_handle(hd))
     } else {
     } else {
@@ -121,7 +164,7 @@ hash_file_160 :: proc(hd: os.Handle, load_at_once := false) -> ([20]byte, bool)
             return hash_bytes_160(buf[:]), ok
             return hash_bytes_160(buf[:]), ok
         }
         }
     }
     }
-    return [20]byte{}, false
+    return [DIGEST_SIZE_160]byte{}, false
 }
 }
 
 
 hash_160 :: proc {
 hash_160 :: proc {
@@ -129,18 +172,20 @@ hash_160 :: proc {
     hash_file_160,
     hash_file_160,
     hash_bytes_160,
     hash_bytes_160,
     hash_string_160,
     hash_string_160,
+    hash_bytes_to_buffer_160,
+    hash_string_to_buffer_160,
 }
 }
 
 
 // hash_string_256 will hash the given input and return the
 // hash_string_256 will hash the given input and return the
 // computed hash
 // computed hash
-hash_string_256 :: proc(data: string) -> [32]byte {
+hash_string_256 :: proc(data: string) -> [DIGEST_SIZE_256]byte {
     return hash_bytes_256(transmute([]byte)(data))
     return hash_bytes_256(transmute([]byte)(data))
 }
 }
 
 
 // hash_bytes_256 will hash the given input and return the
 // hash_bytes_256 will hash the given input and return the
 // computed hash
 // computed hash
-hash_bytes_256 :: proc(data: []byte) -> [32]byte {
-    hash: [32]byte
+hash_bytes_256 :: proc(data: []byte) -> [DIGEST_SIZE_256]byte {
+    hash: [DIGEST_SIZE_256]byte
     ctx: Ripemd256_Context
     ctx: Ripemd256_Context
     init(&ctx)
     init(&ctx)
     update(&ctx, data)
     update(&ctx, data)
@@ -148,10 +193,28 @@ hash_bytes_256 :: proc(data: []byte) -> [32]byte {
     return hash
     return hash
 }
 }
 
 
+// hash_string_to_buffer_256 will hash the given input and assign the
+// computed hash to the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_string_to_buffer_256 :: proc(data: string, hash: []byte) {
+    hash_bytes_to_buffer_256(transmute([]byte)(data), hash)
+}
+
+// hash_bytes_to_buffer_256 will hash the given input and write the
+// computed hash into the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_bytes_to_buffer_256 :: proc(data, hash: []byte) {
+    assert(len(hash) >= DIGEST_SIZE_256, "Size of destination buffer is smaller than the digest size")
+    ctx: Ripemd256_Context
+    init(&ctx)
+    update(&ctx, data)
+    final(&ctx, hash)
+}
+
 // hash_stream_256 will read the stream in chunks and compute a
 // hash_stream_256 will read the stream in chunks and compute a
 // hash from its contents
 // hash from its contents
-hash_stream_256 :: proc(s: io.Stream) -> ([32]byte, bool) {
-    hash: [32]byte
+hash_stream_256 :: proc(s: io.Stream) -> ([DIGEST_SIZE_256]byte, bool) {
+    hash: [DIGEST_SIZE_256]byte
     ctx: Ripemd256_Context
     ctx: Ripemd256_Context
     init(&ctx)
     init(&ctx)
     buf := make([]byte, 512)
     buf := make([]byte, 512)
@@ -169,7 +232,7 @@ hash_stream_256 :: proc(s: io.Stream) -> ([32]byte, bool) {
 
 
 // hash_file_256 will read the file provided by the given handle
 // hash_file_256 will read the file provided by the given handle
 // and compute a hash
 // and compute a hash
-hash_file_256 :: proc(hd: os.Handle, load_at_once := false) -> ([32]byte, bool) {
+hash_file_256 :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE_256]byte, bool) {
     if !load_at_once {
     if !load_at_once {
         return hash_stream_256(os.stream_from_handle(hd))
         return hash_stream_256(os.stream_from_handle(hd))
     } else {
     } else {
@@ -177,7 +240,7 @@ hash_file_256 :: proc(hd: os.Handle, load_at_once := false) -> ([32]byte, bool)
             return hash_bytes_256(buf[:]), ok
             return hash_bytes_256(buf[:]), ok
         }
         }
     }
     }
-    return [32]byte{}, false
+    return [DIGEST_SIZE_256]byte{}, false
 }
 }
 
 
 hash_256 :: proc {
 hash_256 :: proc {
@@ -185,18 +248,20 @@ hash_256 :: proc {
     hash_file_256,
     hash_file_256,
     hash_bytes_256,
     hash_bytes_256,
     hash_string_256,
     hash_string_256,
+    hash_bytes_to_buffer_256,
+    hash_string_to_buffer_256,
 }
 }
 
 
 // hash_string_320 will hash the given input and return the
 // hash_string_320 will hash the given input and return the
 // computed hash
 // computed hash
-hash_string_320 :: proc(data: string) -> [40]byte {
+hash_string_320 :: proc(data: string) -> [DIGEST_SIZE_320]byte {
     return hash_bytes_320(transmute([]byte)(data))
     return hash_bytes_320(transmute([]byte)(data))
 }
 }
 
 
 // hash_bytes_320 will hash the given input and return the
 // hash_bytes_320 will hash the given input and return the
 // computed hash
 // computed hash
-hash_bytes_320 :: proc(data: []byte) -> [40]byte {
-    hash: [40]byte
+hash_bytes_320 :: proc(data: []byte) -> [DIGEST_SIZE_320]byte {
+    hash: [DIGEST_SIZE_320]byte
     ctx: Ripemd320_Context
     ctx: Ripemd320_Context
     init(&ctx)
     init(&ctx)
     update(&ctx, data)
     update(&ctx, data)
@@ -204,10 +269,28 @@ hash_bytes_320 :: proc(data: []byte) -> [40]byte {
     return hash
     return hash
 }
 }
 
 
+// hash_string_to_buffer_320 will hash the given input and assign the
+// computed hash to the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_string_to_buffer_320 :: proc(data: string, hash: []byte) {
+    hash_bytes_to_buffer_320(transmute([]byte)(data), hash)
+}
+
+// hash_bytes_to_buffer_320 will hash the given input and write the
+// computed hash into the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_bytes_to_buffer_320 :: proc(data, hash: []byte) {
+    assert(len(hash) >= DIGEST_SIZE_320, "Size of destination buffer is smaller than the digest size")
+    ctx: Ripemd320_Context
+    init(&ctx)
+    update(&ctx, data)
+    final(&ctx, hash)
+}
+
 // hash_stream_320 will read the stream in chunks and compute a
 // hash_stream_320 will read the stream in chunks and compute a
 // hash from its contents
 // hash from its contents
-hash_stream_320 :: proc(s: io.Stream) -> ([40]byte, bool) {
-    hash: [40]byte
+hash_stream_320 :: proc(s: io.Stream) -> ([DIGEST_SIZE_320]byte, bool) {
+    hash: [DIGEST_SIZE_320]byte
     ctx: Ripemd320_Context
     ctx: Ripemd320_Context
     init(&ctx)
     init(&ctx)
     buf := make([]byte, 512)
     buf := make([]byte, 512)
@@ -225,7 +308,7 @@ hash_stream_320 :: proc(s: io.Stream) -> ([40]byte, bool) {
 
 
 // hash_file_320 will read the file provided by the given handle
 // hash_file_320 will read the file provided by the given handle
 // and compute a hash
 // and compute a hash
-hash_file_320 :: proc(hd: os.Handle, load_at_once := false) -> ([40]byte, bool) {
+hash_file_320 :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE_320]byte, bool) {
     if !load_at_once {
     if !load_at_once {
         return hash_stream_320(os.stream_from_handle(hd))
         return hash_stream_320(os.stream_from_handle(hd))
     } else {
     } else {
@@ -233,7 +316,7 @@ hash_file_320 :: proc(hd: os.Handle, load_at_once := false) -> ([40]byte, bool)
             return hash_bytes_320(buf[:]), ok
             return hash_bytes_320(buf[:]), ok
         }
         }
     }
     }
-    return [40]byte{}, false
+    return [DIGEST_SIZE_320]byte{}, false
 }
 }
 
 
 hash_320 :: proc {
 hash_320 :: proc {
@@ -241,6 +324,8 @@ hash_320 :: proc {
     hash_file_320,
     hash_file_320,
     hash_bytes_320,
     hash_bytes_320,
     hash_string_320,
     hash_string_320,
+    hash_bytes_to_buffer_320,
+    hash_string_to_buffer_320,
 }
 }
 
 
 /*
 /*

+ 30 - 7
core/crypto/sha1/sha1.odin

@@ -19,16 +19,19 @@ import "../util"
 /*
 /*
     High level API
     High level API
 */
 */
+
+DIGEST_SIZE :: 20
+
 // hash_string will hash the given input and return the
 // hash_string will hash the given input and return the
 // computed hash
 // computed hash
-hash_string :: proc(data: string) -> [20]byte {
+hash_string :: proc(data: string) -> [DIGEST_SIZE]byte {
     return hash_bytes(transmute([]byte)(data))
     return hash_bytes(transmute([]byte)(data))
 }
 }
 
 
 // hash_bytes will hash the given input and return the
 // hash_bytes will hash the given input and return the
 // computed hash
 // computed hash
-hash_bytes :: proc(data: []byte) -> [20]byte {
-    hash: [20]byte
+hash_bytes :: proc(data: []byte) -> [DIGEST_SIZE]byte {
+    hash: [DIGEST_SIZE]byte
     ctx: Sha1_Context
     ctx: Sha1_Context
     init(&ctx)
     init(&ctx)
     update(&ctx, data)
     update(&ctx, data)
@@ -36,10 +39,28 @@ hash_bytes :: proc(data: []byte) -> [20]byte {
     return hash
     return hash
 }
 }
 
 
+// hash_string_to_buffer will hash the given input and assign the
+// computed hash to the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_string_to_buffer :: proc(data: string, hash: []byte) {
+    hash_bytes_to_buffer(transmute([]byte)(data), hash)
+}
+
+// hash_bytes_to_buffer will hash the given input and write the
+// computed hash into the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_bytes_to_buffer :: proc(data, hash: []byte) {
+    assert(len(hash) >= DIGEST_SIZE, "Size of destination buffer is smaller than the digest size")
+    ctx: Sha1_Context
+    init(&ctx)
+    update(&ctx, data)
+    final(&ctx, hash)
+}
+
 // hash_stream will read the stream in chunks and compute a
 // hash_stream will read the stream in chunks and compute a
 // hash from its contents
 // hash from its contents
-hash_stream :: proc(s: io.Stream) -> ([20]byte, bool) {
-    hash: [20]byte
+hash_stream :: proc(s: io.Stream) -> ([DIGEST_SIZE]byte, bool) {
+    hash: [DIGEST_SIZE]byte
     ctx: Sha1_Context
     ctx: Sha1_Context
     init(&ctx)
     init(&ctx)
     buf := make([]byte, 512)
     buf := make([]byte, 512)
@@ -57,7 +78,7 @@ hash_stream :: proc(s: io.Stream) -> ([20]byte, bool) {
 
 
 // hash_file will read the file provided by the given handle
 // hash_file will read the file provided by the given handle
 // and compute a hash
 // and compute a hash
-hash_file :: proc(hd: os.Handle, load_at_once := false) -> ([20]byte, bool) {
+hash_file :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE]byte, bool) {
     if !load_at_once {
     if !load_at_once {
         return hash_stream(os.stream_from_handle(hd))
         return hash_stream(os.stream_from_handle(hd))
     } else {
     } else {
@@ -65,7 +86,7 @@ hash_file :: proc(hd: os.Handle, load_at_once := false) -> ([20]byte, bool) {
             return hash_bytes(buf[:]), ok
             return hash_bytes(buf[:]), ok
         }
         }
     }
     }
-    return [20]byte{}, false
+    return [DIGEST_SIZE]byte{}, false
 }
 }
 
 
 hash :: proc {
 hash :: proc {
@@ -73,6 +94,8 @@ hash :: proc {
     hash_file,
     hash_file,
     hash_bytes,
     hash_bytes,
     hash_string,
     hash_string,
+    hash_bytes_to_buffer,
+    hash_string_to_buffer,
 }
 }
 
 
 /*
 /*

+ 121 - 30
core/crypto/sha2/sha2.odin

@@ -21,16 +21,21 @@ import "../util"
     High level API
     High level API
 */
 */
 
 
+DIGEST_SIZE_224 :: 28
+DIGEST_SIZE_256 :: 32
+DIGEST_SIZE_384 :: 48
+DIGEST_SIZE_512 :: 64
+
 // hash_string_224 will hash the given input and return the
 // hash_string_224 will hash the given input and return the
 // computed hash
 // computed hash
-hash_string_224 :: proc(data: string) -> [28]byte {
+hash_string_224 :: proc(data: string) -> [DIGEST_SIZE_224]byte {
     return hash_bytes_224(transmute([]byte)(data))
     return hash_bytes_224(transmute([]byte)(data))
 }
 }
 
 
 // hash_bytes_224 will hash the given input and return the
 // hash_bytes_224 will hash the given input and return the
 // computed hash
 // computed hash
-hash_bytes_224 :: proc(data: []byte) -> [28]byte {
-    hash: [28]byte
+hash_bytes_224 :: proc(data: []byte) -> [DIGEST_SIZE_224]byte {
+    hash: [DIGEST_SIZE_224]byte
 	ctx: Sha256_Context
 	ctx: Sha256_Context
     ctx.is224 = true
     ctx.is224 = true
     init(&ctx)
     init(&ctx)
@@ -39,10 +44,29 @@ hash_bytes_224 :: proc(data: []byte) -> [28]byte {
     return hash
     return hash
 }
 }
 
 
+// hash_string_to_buffer_224 will hash the given input and assign the
+// computed hash to the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_string_to_buffer_224 :: proc(data: string, hash: []byte) {
+    hash_bytes_to_buffer_224(transmute([]byte)(data), hash)
+}
+
+// hash_bytes_to_buffer_224 will hash the given input and write the
+// computed hash into the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_bytes_to_buffer_224 :: proc(data, hash: []byte) {
+    assert(len(hash) >= DIGEST_SIZE_224, "Size of destination buffer is smaller than the digest size")
+    ctx: Sha256_Context
+    ctx.is224 = true
+    init(&ctx)
+    update(&ctx, data)
+    final(&ctx, hash)
+}
+
 // hash_stream_224 will read the stream in chunks and compute a
 // hash_stream_224 will read the stream in chunks and compute a
 // hash from its contents
 // hash from its contents
-hash_stream_224 :: proc(s: io.Stream) -> ([28]byte, bool) {
-	hash: [28]byte
+hash_stream_224 :: proc(s: io.Stream) -> ([DIGEST_SIZE_224]byte, bool) {
+	hash: [DIGEST_SIZE_224]byte
     ctx: Sha512_Context
     ctx: Sha512_Context
     ctx.is384 = false
     ctx.is384 = false
     init(&ctx)
     init(&ctx)
@@ -61,7 +85,7 @@ hash_stream_224 :: proc(s: io.Stream) -> ([28]byte, bool) {
 
 
 // hash_file_224 will read the file provided by the given handle
 // hash_file_224 will read the file provided by the given handle
 // and compute a hash
 // and compute a hash
-hash_file_224 :: proc(hd: os.Handle, load_at_once := false) -> ([28]byte, bool) {
+hash_file_224 :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE_224]byte, bool) {
     if !load_at_once {
     if !load_at_once {
         return hash_stream_224(os.stream_from_handle(hd))
         return hash_stream_224(os.stream_from_handle(hd))
     } else {
     } else {
@@ -69,7 +93,7 @@ hash_file_224 :: proc(hd: os.Handle, load_at_once := false) -> ([28]byte, bool)
             return hash_bytes_224(buf[:]), ok
             return hash_bytes_224(buf[:]), ok
         }
         }
     }
     }
-    return [28]byte{}, false
+    return [DIGEST_SIZE_224]byte{}, false
 }
 }
 
 
 hash_224 :: proc {
 hash_224 :: proc {
@@ -77,18 +101,20 @@ hash_224 :: proc {
     hash_file_224,
     hash_file_224,
     hash_bytes_224,
     hash_bytes_224,
     hash_string_224,
     hash_string_224,
+    hash_bytes_to_buffer_224,
+    hash_string_to_buffer_224,
 }
 }
 
 
 // hash_string_256 will hash the given input and return the
 // hash_string_256 will hash the given input and return the
 // computed hash
 // computed hash
-hash_string_256 :: proc(data: string) -> [32]byte {
+hash_string_256 :: proc(data: string) -> [DIGEST_SIZE_256]byte {
     return hash_bytes_256(transmute([]byte)(data))
     return hash_bytes_256(transmute([]byte)(data))
 }
 }
 
 
 // hash_bytes_256 will hash the given input and return the
 // hash_bytes_256 will hash the given input and return the
 // computed hash
 // computed hash
-hash_bytes_256 :: proc(data: []byte) -> [32]byte {
-    hash: [32]byte
+hash_bytes_256 :: proc(data: []byte) -> [DIGEST_SIZE_256]byte {
+    hash: [DIGEST_SIZE_256]byte
 	ctx: Sha256_Context
 	ctx: Sha256_Context
     ctx.is224 = false
     ctx.is224 = false
     init(&ctx)
     init(&ctx)
@@ -97,10 +123,29 @@ hash_bytes_256 :: proc(data: []byte) -> [32]byte {
     return hash
     return hash
 }
 }
 
 
+// hash_string_to_buffer_256 will hash the given input and assign the
+// computed hash to the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_string_to_buffer_256 :: proc(data: string, hash: []byte) {
+    hash_bytes_to_buffer_256(transmute([]byte)(data), hash)
+}
+
+// hash_bytes_to_buffer_256 will hash the given input and write the
+// computed hash into the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_bytes_to_buffer_256 :: proc(data, hash: []byte) {
+    assert(len(hash) >= DIGEST_SIZE_256, "Size of destination buffer is smaller than the digest size")
+    ctx: Sha256_Context
+    ctx.is224 = false
+    init(&ctx)
+    update(&ctx, data)
+    final(&ctx, hash)
+}
+
 // hash_stream_256 will read the stream in chunks and compute a
 // hash_stream_256 will read the stream in chunks and compute a
 // hash from its contents
 // hash from its contents
-hash_stream_256 :: proc(s: io.Stream) -> ([32]byte, bool) {
-	hash: [32]byte
+hash_stream_256 :: proc(s: io.Stream) -> ([DIGEST_SIZE_256]byte, bool) {
+	hash: [DIGEST_SIZE_256]byte
     ctx: Sha512_Context
     ctx: Sha512_Context
     ctx.is384 = false
     ctx.is384 = false
     init(&ctx)
     init(&ctx)
@@ -119,7 +164,7 @@ hash_stream_256 :: proc(s: io.Stream) -> ([32]byte, bool) {
 
 
 // hash_file_256 will read the file provided by the given handle
 // hash_file_256 will read the file provided by the given handle
 // and compute a hash
 // and compute a hash
-hash_file_256 :: proc(hd: os.Handle, load_at_once := false) -> ([32]byte, bool) {
+hash_file_256 :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE_256]byte, bool) {
     if !load_at_once {
     if !load_at_once {
         return hash_stream_256(os.stream_from_handle(hd))
         return hash_stream_256(os.stream_from_handle(hd))
     } else {
     } else {
@@ -127,7 +172,7 @@ hash_file_256 :: proc(hd: os.Handle, load_at_once := false) -> ([32]byte, bool)
             return hash_bytes_256(buf[:]), ok
             return hash_bytes_256(buf[:]), ok
         }
         }
     }
     }
-    return [32]byte{}, false
+    return [DIGEST_SIZE_256]byte{}, false
 }
 }
 
 
 hash_256 :: proc {
 hash_256 :: proc {
@@ -135,18 +180,20 @@ hash_256 :: proc {
     hash_file_256,
     hash_file_256,
     hash_bytes_256,
     hash_bytes_256,
     hash_string_256,
     hash_string_256,
+    hash_bytes_to_buffer_256,
+    hash_string_to_buffer_256,
 }
 }
 
 
 // hash_string_384 will hash the given input and return the
 // hash_string_384 will hash the given input and return the
 // computed hash
 // computed hash
-hash_string_384 :: proc(data: string) -> [48]byte {
+hash_string_384 :: proc(data: string) -> [DIGEST_SIZE_384]byte {
     return hash_bytes_384(transmute([]byte)(data))
     return hash_bytes_384(transmute([]byte)(data))
 }
 }
 
 
 // hash_bytes_384 will hash the given input and return the
 // hash_bytes_384 will hash the given input and return the
 // computed hash
 // computed hash
-hash_bytes_384 :: proc(data: []byte) -> [48]byte {
-    hash: [48]byte
+hash_bytes_384 :: proc(data: []byte) -> [DIGEST_SIZE_384]byte {
+    hash: [DIGEST_SIZE_384]byte
 	ctx: Sha512_Context
 	ctx: Sha512_Context
     ctx.is384 = true
     ctx.is384 = true
     init(&ctx)
     init(&ctx)
@@ -155,10 +202,29 @@ hash_bytes_384 :: proc(data: []byte) -> [48]byte {
     return hash
     return hash
 }
 }
 
 
+// hash_string_to_buffer_384 will hash the given input and assign the
+// computed hash to the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_string_to_buffer_384 :: proc(data: string, hash: []byte) {
+    hash_bytes_to_buffer_384(transmute([]byte)(data), hash)
+}
+
+// hash_bytes_to_buffer_384 will hash the given input and write the
+// computed hash into the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_bytes_to_buffer_384 :: proc(data, hash: []byte) {
+    assert(len(hash) >= DIGEST_SIZE_384, "Size of destination buffer is smaller than the digest size")
+    ctx: Sha512_Context
+    ctx.is384 = true
+    init(&ctx)
+    update(&ctx, data)
+    final(&ctx, hash)
+}
+
 // hash_stream_384 will read the stream in chunks and compute a
 // hash_stream_384 will read the stream in chunks and compute a
 // hash from its contents
 // hash from its contents
-hash_stream_384 :: proc(s: io.Stream) -> ([48]byte, bool) {
-	hash: [48]byte
+hash_stream_384 :: proc(s: io.Stream) -> ([DIGEST_SIZE_384]byte, bool) {
+	hash: [DIGEST_SIZE_384]byte
     ctx: Sha512_Context
     ctx: Sha512_Context
     ctx.is384 = true
     ctx.is384 = true
     init(&ctx)
     init(&ctx)
@@ -177,7 +243,7 @@ hash_stream_384 :: proc(s: io.Stream) -> ([48]byte, bool) {
 
 
 // hash_file_384 will read the file provided by the given handle
 // hash_file_384 will read the file provided by the given handle
 // and compute a hash
 // and compute a hash
-hash_file_384 :: proc(hd: os.Handle, load_at_once := false) -> ([48]byte, bool) {
+hash_file_384 :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE_384]byte, bool) {
     if !load_at_once {
     if !load_at_once {
         return hash_stream_384(os.stream_from_handle(hd))
         return hash_stream_384(os.stream_from_handle(hd))
     } else {
     } else {
@@ -185,7 +251,7 @@ hash_file_384 :: proc(hd: os.Handle, load_at_once := false) -> ([48]byte, bool)
             return hash_bytes_384(buf[:]), ok
             return hash_bytes_384(buf[:]), ok
         }
         }
     }
     }
-    return [48]byte{}, false
+    return [DIGEST_SIZE_384]byte{}, false
 }
 }
 
 
 hash_384 :: proc {
 hash_384 :: proc {
@@ -193,18 +259,20 @@ hash_384 :: proc {
     hash_file_384,
     hash_file_384,
     hash_bytes_384,
     hash_bytes_384,
     hash_string_384,
     hash_string_384,
+    hash_bytes_to_buffer_384,
+    hash_string_to_buffer_384,
 }
 }
 
 
 // hash_string_512 will hash the given input and return the
 // hash_string_512 will hash the given input and return the
 // computed hash
 // computed hash
-hash_string_512 :: proc(data: string) -> [64]byte {
+hash_string_512 :: proc(data: string) -> [DIGEST_SIZE_512]byte {
     return hash_bytes_512(transmute([]byte)(data))
     return hash_bytes_512(transmute([]byte)(data))
 }
 }
 
 
 // hash_bytes_512 will hash the given input and return the
 // hash_bytes_512 will hash the given input and return the
 // computed hash
 // computed hash
-hash_bytes_512 :: proc(data: []byte) -> [64]byte {
-    hash: [64]byte
+hash_bytes_512 :: proc(data: []byte) -> [DIGEST_SIZE_512]byte {
+    hash: [DIGEST_SIZE_512]byte
 	ctx: Sha512_Context
 	ctx: Sha512_Context
     ctx.is384 = false
     ctx.is384 = false
     init(&ctx)
     init(&ctx)
@@ -213,10 +281,29 @@ hash_bytes_512 :: proc(data: []byte) -> [64]byte {
     return hash
     return hash
 }
 }
 
 
+// hash_string_to_buffer_512 will hash the given input and assign the
+// computed hash to the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_string_to_buffer_512 :: proc(data: string, hash: []byte) {
+    hash_bytes_to_buffer_512(transmute([]byte)(data), hash)
+}
+
+// hash_bytes_to_buffer_512 will hash the given input and write the
+// computed hash into the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_bytes_to_buffer_512 :: proc(data, hash: []byte) {
+    assert(len(hash) >= DIGEST_SIZE_512, "Size of destination buffer is smaller than the digest size")
+    ctx: Sha512_Context
+    ctx.is384 = false
+    init(&ctx)
+    update(&ctx, data)
+    final(&ctx, hash)
+}
+
 // hash_stream_512 will read the stream in chunks and compute a
 // hash_stream_512 will read the stream in chunks and compute a
 // hash from its contents
 // hash from its contents
-hash_stream_512 :: proc(s: io.Stream) -> ([64]byte, bool) {
-    hash: [64]byte
+hash_stream_512 :: proc(s: io.Stream) -> ([DIGEST_SIZE_512]byte, bool) {
+    hash: [DIGEST_SIZE_512]byte
     ctx: Sha512_Context
     ctx: Sha512_Context
     ctx.is384 = false
     ctx.is384 = false
 	init(&ctx)
 	init(&ctx)
@@ -235,7 +322,7 @@ hash_stream_512 :: proc(s: io.Stream) -> ([64]byte, bool) {
 
 
 // hash_file_512 will read the file provided by the given handle
 // hash_file_512 will read the file provided by the given handle
 // and compute a hash
 // and compute a hash
-hash_file_512 :: proc(hd: os.Handle, load_at_once := false) -> ([64]byte, bool) {
+hash_file_512 :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE_512]byte, bool) {
     if !load_at_once {
     if !load_at_once {
         return hash_stream_512(os.stream_from_handle(hd))
         return hash_stream_512(os.stream_from_handle(hd))
     } else {
     } else {
@@ -243,7 +330,7 @@ hash_file_512 :: proc(hd: os.Handle, load_at_once := false) -> ([64]byte, bool)
             return hash_bytes_512(buf[:]), ok
             return hash_bytes_512(buf[:]), ok
         }
         }
     }
     }
-    return [64]byte{}, false
+    return [DIGEST_SIZE_512]byte{}, false
 }
 }
 
 
 hash_512 :: proc {
 hash_512 :: proc {
@@ -251,6 +338,8 @@ hash_512 :: proc {
     hash_file_512,
     hash_file_512,
     hash_bytes_512,
     hash_bytes_512,
     hash_string_512,
     hash_string_512,
+    hash_bytes_to_buffer_512,
+    hash_string_to_buffer_512,
 }
 }
 
 
 /*
 /*
@@ -330,8 +419,10 @@ update :: proc(ctx: ^$T, data: []byte) {
     sha2_transf(ctx, shifted_message, block_nb)
     sha2_transf(ctx, shifted_message, block_nb)
 
 
     rem_len = new_len % CURR_BLOCK_SIZE
     rem_len = new_len % CURR_BLOCK_SIZE
-    when T == Sha256_Context      {copy(ctx.block[:], shifted_message[block_nb << 6:rem_len])}
-    else when T == Sha512_Context {copy(ctx.block[:], shifted_message[block_nb << 7:rem_len])}
+    if rem_len > 0 {
+        when T == Sha256_Context      {copy(ctx.block[:], shifted_message[block_nb << 6:rem_len])} 
+        else when T == Sha512_Context {copy(ctx.block[:], shifted_message[block_nb << 7:rem_len])}
+    }
 
 
     ctx.length = rem_len
     ctx.length = rem_len
     when T == Sha256_Context      {ctx.tot_len += (block_nb + 1) << 6}
     when T == Sha256_Context      {ctx.tot_len += (block_nb + 1) << 6}

+ 125 - 36
core/crypto/sha3/sha3.odin

@@ -20,30 +20,54 @@ import "../_sha3"
     High level API
     High level API
 */
 */
 
 
+DIGEST_SIZE_224 :: 28
+DIGEST_SIZE_256 :: 32
+DIGEST_SIZE_384 :: 48
+DIGEST_SIZE_512 :: 64
+
 // hash_string_224 will hash the given input and return the
 // hash_string_224 will hash the given input and return the
 // computed hash
 // computed hash
-hash_string_224 :: proc(data: string) -> [28]byte {
+hash_string_224 :: proc(data: string) -> [DIGEST_SIZE_224]byte {
     return hash_bytes_224(transmute([]byte)(data))
     return hash_bytes_224(transmute([]byte)(data))
 }
 }
 
 
 // hash_bytes_224 will hash the given input and return the
 // hash_bytes_224 will hash the given input and return the
 // computed hash
 // computed hash
-hash_bytes_224 :: proc(data: []byte) -> [28]byte {
-    hash: [28]byte
+hash_bytes_224 :: proc(data: []byte) -> [DIGEST_SIZE_224]byte {
+    hash: [DIGEST_SIZE_224]byte
     ctx: _sha3.Sha3_Context
     ctx: _sha3.Sha3_Context
-    ctx.mdlen = 28
+    ctx.mdlen = DIGEST_SIZE_224
     _sha3.init(&ctx)
     _sha3.init(&ctx)
     _sha3.update(&ctx, data)
     _sha3.update(&ctx, data)
     _sha3.final(&ctx, hash[:])
     _sha3.final(&ctx, hash[:])
     return hash
     return hash
 }
 }
 
 
+// hash_string_to_buffer_224 will hash the given input and assign the
+// computed hash to the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_string_to_buffer_224 :: proc(data: string, hash: []byte) {
+    hash_bytes_to_buffer_224(transmute([]byte)(data), hash)
+}
+
+// hash_bytes_to_buffer_224 will hash the given input and write the
+// computed hash into the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_bytes_to_buffer_224 :: proc(data, hash: []byte) {
+    assert(len(hash) >= DIGEST_SIZE_224, "Size of destination buffer is smaller than the digest size")
+    ctx: _sha3.Sha3_Context
+    ctx.mdlen = DIGEST_SIZE_224
+    _sha3.init(&ctx)
+    _sha3.update(&ctx, data)
+    _sha3.final(&ctx, hash)
+}
+
 // hash_stream_224 will read the stream in chunks and compute a
 // hash_stream_224 will read the stream in chunks and compute a
 // hash from its contents
 // hash from its contents
-hash_stream_224 :: proc(s: io.Stream) -> ([28]byte, bool) {
-    hash: [28]byte
+hash_stream_224 :: proc(s: io.Stream) -> ([DIGEST_SIZE_224]byte, bool) {
+    hash: [DIGEST_SIZE_224]byte
     ctx: _sha3.Sha3_Context
     ctx: _sha3.Sha3_Context
-    ctx.mdlen = 28
+    ctx.mdlen = DIGEST_SIZE_224
     _sha3.init(&ctx)
     _sha3.init(&ctx)
     buf := make([]byte, 512)
     buf := make([]byte, 512)
     defer delete(buf)
     defer delete(buf)
@@ -60,7 +84,7 @@ hash_stream_224 :: proc(s: io.Stream) -> ([28]byte, bool) {
 
 
 // hash_file_224 will read the file provided by the given handle
 // hash_file_224 will read the file provided by the given handle
 // and compute a hash
 // and compute a hash
-hash_file_224 :: proc(hd: os.Handle, load_at_once := false) -> ([28]byte, bool) {
+hash_file_224 :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE_224]byte, bool) {
     if !load_at_once {
     if !load_at_once {
         return hash_stream_224(os.stream_from_handle(hd))
         return hash_stream_224(os.stream_from_handle(hd))
     } else {
     } else {
@@ -68,7 +92,7 @@ hash_file_224 :: proc(hd: os.Handle, load_at_once := false) -> ([28]byte, bool)
             return hash_bytes_224(buf[:]), ok
             return hash_bytes_224(buf[:]), ok
         }
         }
     }
     }
-    return [28]byte{}, false
+    return [DIGEST_SIZE_224]byte{}, false
 }
 }
 
 
 hash_224 :: proc {
 hash_224 :: proc {
@@ -76,32 +100,53 @@ hash_224 :: proc {
     hash_file_224,
     hash_file_224,
     hash_bytes_224,
     hash_bytes_224,
     hash_string_224,
     hash_string_224,
+    hash_bytes_to_buffer_224,
+    hash_string_to_buffer_224,
 }
 }
 
 
 // hash_string_256 will hash the given input and return the
 // hash_string_256 will hash the given input and return the
 // computed hash
 // computed hash
-hash_string_256 :: proc(data: string) -> [32]byte {
+hash_string_256 :: proc(data: string) -> [DIGEST_SIZE_256]byte {
     return hash_bytes_256(transmute([]byte)(data))
     return hash_bytes_256(transmute([]byte)(data))
 }
 }
 
 
 // hash_bytes_256 will hash the given input and return the
 // hash_bytes_256 will hash the given input and return the
 // computed hash
 // computed hash
-hash_bytes_256 :: proc(data: []byte) -> [32]byte {
-    hash: [32]byte
+hash_bytes_256 :: proc(data: []byte) -> [DIGEST_SIZE_256]byte {
+    hash: [DIGEST_SIZE_256]byte
     ctx: _sha3.Sha3_Context
     ctx: _sha3.Sha3_Context
-    ctx.mdlen = 32
+    ctx.mdlen = DIGEST_SIZE_256
     _sha3.init(&ctx)
     _sha3.init(&ctx)
     _sha3.update(&ctx, data)
     _sha3.update(&ctx, data)
     _sha3.final(&ctx, hash[:])
     _sha3.final(&ctx, hash[:])
     return hash
     return hash
 }
 }
 
 
+// hash_string_to_buffer_256 will hash the given input and assign the
+// computed hash to the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_string_to_buffer_256 :: proc(data: string, hash: []byte) {
+    hash_bytes_to_buffer_256(transmute([]byte)(data), hash)
+}
+
+// hash_bytes_to_buffer_256 will hash the given input and write the
+// computed hash into the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_bytes_to_buffer_256 :: proc(data, hash: []byte) {
+    assert(len(hash) >= DIGEST_SIZE_256, "Size of destination buffer is smaller than the digest size")
+    ctx: _sha3.Sha3_Context
+    ctx.mdlen = DIGEST_SIZE_256
+    _sha3.init(&ctx)
+    _sha3.update(&ctx, data)
+    _sha3.final(&ctx, hash)
+}
+
 // hash_stream_256 will read the stream in chunks and compute a
 // hash_stream_256 will read the stream in chunks and compute a
 // hash from its contents
 // hash from its contents
-hash_stream_256 :: proc(s: io.Stream) -> ([32]byte, bool) {
-    hash: [32]byte
+hash_stream_256 :: proc(s: io.Stream) -> ([DIGEST_SIZE_256]byte, bool) {
+    hash: [DIGEST_SIZE_256]byte
     ctx: _sha3.Sha3_Context
     ctx: _sha3.Sha3_Context
-    ctx.mdlen = 32
+    ctx.mdlen = DIGEST_SIZE_256
     _sha3.init(&ctx)
     _sha3.init(&ctx)
     buf := make([]byte, 512)
     buf := make([]byte, 512)
     defer delete(buf)
     defer delete(buf)
@@ -118,7 +163,7 @@ hash_stream_256 :: proc(s: io.Stream) -> ([32]byte, bool) {
 
 
 // hash_file_256 will read the file provided by the given handle
 // hash_file_256 will read the file provided by the given handle
 // and compute a hash
 // and compute a hash
-hash_file_256 :: proc(hd: os.Handle, load_at_once := false) -> ([32]byte, bool) {
+hash_file_256 :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE_256]byte, bool) {
     if !load_at_once {
     if !load_at_once {
         return hash_stream_256(os.stream_from_handle(hd))
         return hash_stream_256(os.stream_from_handle(hd))
     } else {
     } else {
@@ -126,7 +171,7 @@ hash_file_256 :: proc(hd: os.Handle, load_at_once := false) -> ([32]byte, bool)
             return hash_bytes_256(buf[:]), ok
             return hash_bytes_256(buf[:]), ok
         }
         }
     }
     }
-    return [32]byte{}, false
+    return [DIGEST_SIZE_256]byte{}, false
 }
 }
 
 
 hash_256 :: proc {
 hash_256 :: proc {
@@ -134,32 +179,53 @@ hash_256 :: proc {
     hash_file_256,
     hash_file_256,
     hash_bytes_256,
     hash_bytes_256,
     hash_string_256,
     hash_string_256,
+    hash_bytes_to_buffer_256,
+    hash_string_to_buffer_256,
 }
 }
 
 
 // hash_string_384 will hash the given input and return the
 // hash_string_384 will hash the given input and return the
 // computed hash
 // computed hash
-hash_string_384 :: proc(data: string) -> [48]byte {
+hash_string_384 :: proc(data: string) -> [DIGEST_SIZE_384]byte {
     return hash_bytes_384(transmute([]byte)(data))
     return hash_bytes_384(transmute([]byte)(data))
 }
 }
 
 
 // hash_bytes_384 will hash the given input and return the
 // hash_bytes_384 will hash the given input and return the
 // computed hash
 // computed hash
-hash_bytes_384 :: proc(data: []byte) -> [48]byte {
-    hash: [48]byte
+hash_bytes_384 :: proc(data: []byte) -> [DIGEST_SIZE_384]byte {
+    hash: [DIGEST_SIZE_384]byte
     ctx: _sha3.Sha3_Context
     ctx: _sha3.Sha3_Context
-    ctx.mdlen = 48
+    ctx.mdlen = DIGEST_SIZE_384
     _sha3.init(&ctx)
     _sha3.init(&ctx)
     _sha3.update(&ctx, data)
     _sha3.update(&ctx, data)
     _sha3.final(&ctx, hash[:])
     _sha3.final(&ctx, hash[:])
     return hash
     return hash
 }
 }
 
 
+// hash_string_to_buffer_384 will hash the given input and assign the
+// computed hash to the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_string_to_buffer_384 :: proc(data: string, hash: []byte) {
+    hash_bytes_to_buffer_384(transmute([]byte)(data), hash)
+}
+
+// hash_bytes_to_buffer_384 will hash the given input and write the
+// computed hash into the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_bytes_to_buffer_384 :: proc(data, hash: []byte) {
+    assert(len(hash) >= DIGEST_SIZE_384, "Size of destination buffer is smaller than the digest size")
+    ctx: _sha3.Sha3_Context
+    ctx.mdlen = DIGEST_SIZE_384
+    _sha3.init(&ctx)
+    _sha3.update(&ctx, data)
+    _sha3.final(&ctx, hash)
+}
+
 // hash_stream_384 will read the stream in chunks and compute a
 // hash_stream_384 will read the stream in chunks and compute a
 // hash from its contents
 // hash from its contents
-hash_stream_384 :: proc(s: io.Stream) -> ([48]byte, bool) {
-    hash: [48]byte
+hash_stream_384 :: proc(s: io.Stream) -> ([DIGEST_SIZE_384]byte, bool) {
+    hash: [DIGEST_SIZE_384]byte
     ctx: _sha3.Sha3_Context
     ctx: _sha3.Sha3_Context
-    ctx.mdlen = 48
+    ctx.mdlen = DIGEST_SIZE_384
     _sha3.init(&ctx)
     _sha3.init(&ctx)
     buf := make([]byte, 512)
     buf := make([]byte, 512)
     defer delete(buf)
     defer delete(buf)
@@ -176,7 +242,7 @@ hash_stream_384 :: proc(s: io.Stream) -> ([48]byte, bool) {
 
 
 // hash_file_384 will read the file provided by the given handle
 // hash_file_384 will read the file provided by the given handle
 // and compute a hash
 // and compute a hash
-hash_file_384 :: proc(hd: os.Handle, load_at_once := false) -> ([48]byte, bool) {
+hash_file_384 :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE_384]byte, bool) {
     if !load_at_once {
     if !load_at_once {
         return hash_stream_384(os.stream_from_handle(hd))
         return hash_stream_384(os.stream_from_handle(hd))
     } else {
     } else {
@@ -184,7 +250,7 @@ hash_file_384 :: proc(hd: os.Handle, load_at_once := false) -> ([48]byte, bool)
             return hash_bytes_384(buf[:]), ok
             return hash_bytes_384(buf[:]), ok
         }
         }
     }
     }
-    return [48]byte{}, false
+    return [DIGEST_SIZE_384]byte{}, false
 }
 }
 
 
 hash_384 :: proc {
 hash_384 :: proc {
@@ -192,32 +258,53 @@ hash_384 :: proc {
     hash_file_384,
     hash_file_384,
     hash_bytes_384,
     hash_bytes_384,
     hash_string_384,
     hash_string_384,
+    hash_bytes_to_buffer_384,
+    hash_string_to_buffer_384,
 }
 }
 
 
 // hash_string_512 will hash the given input and return the
 // hash_string_512 will hash the given input and return the
 // computed hash
 // computed hash
-hash_string_512 :: proc(data: string) -> [64]byte {
+hash_string_512 :: proc(data: string) -> [DIGEST_SIZE_512]byte {
     return hash_bytes_512(transmute([]byte)(data))
     return hash_bytes_512(transmute([]byte)(data))
 }
 }
 
 
 // hash_bytes_512 will hash the given input and return the
 // hash_bytes_512 will hash the given input and return the
 // computed hash
 // computed hash
-hash_bytes_512 :: proc(data: []byte) -> [64]byte {
-    hash: [64]byte
+hash_bytes_512 :: proc(data: []byte) -> [DIGEST_SIZE_512]byte {
+    hash: [DIGEST_SIZE_512]byte
     ctx: _sha3.Sha3_Context
     ctx: _sha3.Sha3_Context
-    ctx.mdlen = 64
+    ctx.mdlen = DIGEST_SIZE_512
     _sha3.init(&ctx)
     _sha3.init(&ctx)
     _sha3.update(&ctx, data)
     _sha3.update(&ctx, data)
     _sha3.final(&ctx, hash[:])
     _sha3.final(&ctx, hash[:])
     return hash
     return hash
 }
 }
 
 
+// hash_string_to_buffer_512 will hash the given input and assign the
+// computed hash to the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_string_to_buffer_512 :: proc(data: string, hash: []byte) {
+    hash_bytes_to_buffer_512(transmute([]byte)(data), hash)
+}
+
+// hash_bytes_to_buffer_512 will hash the given input and write the
+// computed hash into the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_bytes_to_buffer_512 :: proc(data, hash: []byte) {
+    assert(len(hash) >= DIGEST_SIZE_512, "Size of destination buffer is smaller than the digest size")
+    ctx: _sha3.Sha3_Context
+    ctx.mdlen = DIGEST_SIZE_512
+    _sha3.init(&ctx)
+    _sha3.update(&ctx, data)
+    _sha3.final(&ctx, hash)
+}
+
 // hash_stream_512 will read the stream in chunks and compute a
 // hash_stream_512 will read the stream in chunks and compute a
 // hash from its contents
 // hash from its contents
-hash_stream_512 :: proc(s: io.Stream) -> ([64]byte, bool) {
-    hash: [64]byte
+hash_stream_512 :: proc(s: io.Stream) -> ([DIGEST_SIZE_512]byte, bool) {
+    hash: [DIGEST_SIZE_512]byte
     ctx: _sha3.Sha3_Context
     ctx: _sha3.Sha3_Context
-    ctx.mdlen = 64
+    ctx.mdlen = DIGEST_SIZE_512
     _sha3.init(&ctx)
     _sha3.init(&ctx)
     buf := make([]byte, 512)
     buf := make([]byte, 512)
     defer delete(buf)
     defer delete(buf)
@@ -234,7 +321,7 @@ hash_stream_512 :: proc(s: io.Stream) -> ([64]byte, bool) {
 
 
 // hash_file_512 will read the file provided by the given handle
 // hash_file_512 will read the file provided by the given handle
 // and compute a hash
 // and compute a hash
-hash_file_512 :: proc(hd: os.Handle, load_at_once := false) -> ([64]byte, bool) {
+hash_file_512 :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE_512]byte, bool) {
     if !load_at_once {
     if !load_at_once {
         return hash_stream_512(os.stream_from_handle(hd))
         return hash_stream_512(os.stream_from_handle(hd))
     } else {
     } else {
@@ -242,7 +329,7 @@ hash_file_512 :: proc(hd: os.Handle, load_at_once := false) -> ([64]byte, bool)
             return hash_bytes_512(buf[:]), ok
             return hash_bytes_512(buf[:]), ok
         }
         }
     }
     }
-    return [64]byte{}, false
+    return [DIGEST_SIZE_512]byte{}, false
 }
 }
 
 
 hash_512 :: proc {
 hash_512 :: proc {
@@ -250,6 +337,8 @@ hash_512 :: proc {
     hash_file_512,
     hash_file_512,
     hash_bytes_512,
     hash_bytes_512,
     hash_string_512,
     hash_string_512,
+    hash_bytes_to_buffer_512,
+    hash_string_to_buffer_512,
 }
 }
 
 
 /*
 /*

+ 66 - 19
core/crypto/shake/shake.odin

@@ -20,18 +20,21 @@ import "../_sha3"
     High level API
     High level API
 */
 */
 
 
+DIGEST_SIZE_128 :: 16
+DIGEST_SIZE_256 :: 32
+
 // hash_string_128 will hash the given input and return the
 // hash_string_128 will hash the given input and return the
 // computed hash
 // computed hash
-hash_string_128 :: proc(data: string) -> [16]byte {
+hash_string_128 :: proc(data: string) -> [DIGEST_SIZE_128]byte {
     return hash_bytes_128(transmute([]byte)(data))
     return hash_bytes_128(transmute([]byte)(data))
 }
 }
 
 
 // hash_bytes_128 will hash the given input and return the
 // hash_bytes_128 will hash the given input and return the
 // computed hash
 // computed hash
-hash_bytes_128 :: proc(data: []byte) -> [16]byte {
-    hash: [16]byte
+hash_bytes_128 :: proc(data: []byte) -> [DIGEST_SIZE_128]byte {
+    hash: [DIGEST_SIZE_128]byte
     ctx: _sha3.Sha3_Context
     ctx: _sha3.Sha3_Context
-    ctx.mdlen = 16
+    ctx.mdlen = DIGEST_SIZE_128
     _sha3.init(&ctx)
     _sha3.init(&ctx)
     _sha3.update(&ctx, data)
     _sha3.update(&ctx, data)
     _sha3.shake_xof(&ctx)
     _sha3.shake_xof(&ctx)
@@ -39,12 +42,32 @@ hash_bytes_128 :: proc(data: []byte) -> [16]byte {
     return hash
     return hash
 }
 }
 
 
+// hash_string_to_buffer_128 will hash the given input and assign the
+// computed hash to the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_string_to_buffer_128 :: proc(data: string, hash: []byte) {
+    hash_bytes_to_buffer_128(transmute([]byte)(data), hash)
+}
+
+// hash_bytes_to_buffer_128 will hash the given input and write the
+// computed hash into the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_bytes_to_buffer_128 :: proc(data, hash: []byte) {
+    assert(len(hash) >= DIGEST_SIZE_128, "Size of destination buffer is smaller than the digest size")
+    ctx: _sha3.Sha3_Context
+    ctx.mdlen = DIGEST_SIZE_128
+    _sha3.init(&ctx)
+    _sha3.update(&ctx, data)
+    _sha3.shake_xof(&ctx)
+    _sha3.shake_out(&ctx, hash)
+}
+
 // hash_stream_128 will read the stream in chunks and compute a
 // hash_stream_128 will read the stream in chunks and compute a
 // hash from its contents
 // hash from its contents
-hash_stream_128 :: proc(s: io.Stream) -> ([16]byte, bool) {
-    hash: [16]byte
+hash_stream_128 :: proc(s: io.Stream) -> ([DIGEST_SIZE_128]byte, bool) {
+    hash: [DIGEST_SIZE_128]byte
     ctx: _sha3.Sha3_Context
     ctx: _sha3.Sha3_Context
-    ctx.mdlen = 16
+    ctx.mdlen = DIGEST_SIZE_128
     _sha3.init(&ctx)
     _sha3.init(&ctx)
     buf := make([]byte, 512)
     buf := make([]byte, 512)
     defer delete(buf)
     defer delete(buf)
@@ -62,7 +85,7 @@ hash_stream_128 :: proc(s: io.Stream) -> ([16]byte, bool) {
 
 
 // hash_file_128 will read the file provided by the given handle
 // hash_file_128 will read the file provided by the given handle
 // and compute a hash
 // and compute a hash
-hash_file_128 :: proc(hd: os.Handle, load_at_once := false) -> ([16]byte, bool) {
+hash_file_128 :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE_128]byte, bool) {
     if !load_at_once {
     if !load_at_once {
         return hash_stream_128(os.stream_from_handle(hd))
         return hash_stream_128(os.stream_from_handle(hd))
     } else {
     } else {
@@ -70,7 +93,7 @@ hash_file_128 :: proc(hd: os.Handle, load_at_once := false) -> ([16]byte, bool)
             return hash_bytes_128(buf[:]), ok
             return hash_bytes_128(buf[:]), ok
         }
         }
     }
     }
-    return [16]byte{}, false
+    return [DIGEST_SIZE_128]byte{}, false
 }
 }
 
 
 hash_128 :: proc {
 hash_128 :: proc {
@@ -78,20 +101,22 @@ hash_128 :: proc {
     hash_file_128,
     hash_file_128,
     hash_bytes_128,
     hash_bytes_128,
     hash_string_128,
     hash_string_128,
+    hash_bytes_to_buffer_128,
+    hash_string_to_buffer_128,
 }
 }
 
 
 // hash_string_256 will hash the given input and return the
 // hash_string_256 will hash the given input and return the
 // computed hash
 // computed hash
-hash_string_256 :: proc(data: string) -> [32]byte {
+hash_string_256 :: proc(data: string) -> [DIGEST_SIZE_256]byte {
     return hash_bytes_256(transmute([]byte)(data))
     return hash_bytes_256(transmute([]byte)(data))
 }
 }
 
 
 // hash_bytes_256 will hash the given input and return the
 // hash_bytes_256 will hash the given input and return the
 // computed hash
 // computed hash
-hash_bytes_256 :: proc(data: []byte) -> [32]byte {
-    hash: [32]byte
+hash_bytes_256 :: proc(data: []byte) -> [DIGEST_SIZE_256]byte {
+    hash: [DIGEST_SIZE_256]byte
     ctx: _sha3.Sha3_Context
     ctx: _sha3.Sha3_Context
-    ctx.mdlen = 32
+    ctx.mdlen = DIGEST_SIZE_256
     _sha3.init(&ctx)
     _sha3.init(&ctx)
     _sha3.update(&ctx, data)
     _sha3.update(&ctx, data)
     _sha3.shake_xof(&ctx)
     _sha3.shake_xof(&ctx)
@@ -99,12 +124,32 @@ hash_bytes_256 :: proc(data: []byte) -> [32]byte {
     return hash
     return hash
 }
 }
 
 
+// hash_string_to_buffer_256 will hash the given input and assign the
+// computed hash to the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_string_to_buffer_256 :: proc(data: string, hash: []byte) {
+    hash_bytes_to_buffer_256(transmute([]byte)(data), hash)
+}
+
+// hash_bytes_to_buffer_256 will hash the given input and write the
+// computed hash into the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_bytes_to_buffer_256 :: proc(data, hash: []byte) {
+    assert(len(hash) >= DIGEST_SIZE_256, "Size of destination buffer is smaller than the digest size")
+    ctx: _sha3.Sha3_Context
+    ctx.mdlen = DIGEST_SIZE_256
+    _sha3.init(&ctx)
+    _sha3.update(&ctx, data)
+    _sha3.shake_xof(&ctx)
+    _sha3.shake_out(&ctx, hash)
+}
+
 // hash_stream_256 will read the stream in chunks and compute a
 // hash_stream_256 will read the stream in chunks and compute a
 // hash from its contents
 // hash from its contents
-hash_stream_256 :: proc(s: io.Stream) -> ([32]byte, bool) {
-    hash: [32]byte
+hash_stream_256 :: proc(s: io.Stream) -> ([DIGEST_SIZE_256]byte, bool) {
+    hash: [DIGEST_SIZE_256]byte
     ctx: _sha3.Sha3_Context
     ctx: _sha3.Sha3_Context
-    ctx.mdlen = 32
+    ctx.mdlen = DIGEST_SIZE_256
     _sha3.init(&ctx)
     _sha3.init(&ctx)
     buf := make([]byte, 512)
     buf := make([]byte, 512)
     defer delete(buf)
     defer delete(buf)
@@ -122,7 +167,7 @@ hash_stream_256 :: proc(s: io.Stream) -> ([32]byte, bool) {
 
 
 // hash_file_256 will read the file provided by the given handle
 // hash_file_256 will read the file provided by the given handle
 // and compute a hash
 // and compute a hash
-hash_file_256 :: proc(hd: os.Handle, load_at_once := false) -> ([32]byte, bool) {
+hash_file_256 :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE_256]byte, bool) {
     if !load_at_once {
     if !load_at_once {
         return hash_stream_256(os.stream_from_handle(hd))
         return hash_stream_256(os.stream_from_handle(hd))
     } else {
     } else {
@@ -130,7 +175,7 @@ hash_file_256 :: proc(hd: os.Handle, load_at_once := false) -> ([32]byte, bool)
             return hash_bytes_256(buf[:]), ok
             return hash_bytes_256(buf[:]), ok
         }
         }
     }
     }
-    return [32]byte{}, false
+    return [DIGEST_SIZE_256]byte{}, false
 }
 }
 
 
 hash_256 :: proc {
 hash_256 :: proc {
@@ -138,13 +183,15 @@ hash_256 :: proc {
     hash_file_256,
     hash_file_256,
     hash_bytes_256,
     hash_bytes_256,
     hash_string_256,
     hash_string_256,
+    hash_bytes_to_buffer_256,
+    hash_string_to_buffer_256,
 }
 }
 
 
 /*
 /*
     Low level API
     Low level API
 */
 */
 
 
-Sha3_Context :: _sha3.Sha3_Context
+Shake_Context :: _sha3.Sha3_Context
 
 
 init :: proc(ctx: ^_sha3.Sha3_Context) {
 init :: proc(ctx: ^_sha3.Sha3_Context) {
     _sha3.init(ctx)
     _sha3.init(ctx)

+ 335 - 0
core/crypto/siphash/siphash.odin

@@ -0,0 +1,335 @@
+package siphash
+
+/*
+    Copyright 2022 zhibog
+    Made available under the BSD-3 license.
+
+    List of contributors:
+        zhibog:  Initial implementation.
+
+    Implementation of the SipHash hashing algorithm, as defined at <https://github.com/veorq/SipHash> and <https://www.aumasson.jp/siphash/siphash.pdf>
+
+    Use the specific procedures for a certain setup. The generic procdedures will default to Siphash 2-4
+*/
+
+import "core:crypto"
+import "core:crypto/util"
+
+/*
+    High level API
+*/
+
+KEY_SIZE    :: 16
+DIGEST_SIZE :: 8
+
+// sum_string_1_3 will hash the given message with the key and return
+// the computed hash as a u64
+sum_string_1_3 :: proc(msg, key: string) -> u64 {
+    return sum_bytes_1_3(transmute([]byte)(msg), transmute([]byte)(key))
+}
+
+// sum_bytes_1_3 will hash the given message with the key and return
+// the computed hash as a u64
+sum_bytes_1_3 :: proc (msg, key: []byte) -> u64 {
+    ctx: Context
+    hash: u64
+    init(&ctx, key, 1, 3)
+    update(&ctx, msg)
+    final(&ctx, &hash)
+    return hash
+}
+
+// sum_string_to_buffer_1_3 will hash the given message with the key and write
+// the computed hash into the provided destination buffer
+sum_string_to_buffer_1_3 :: proc(msg, key: string, dst: []byte) {
+    sum_bytes_to_buffer_1_3(transmute([]byte)(msg), transmute([]byte)(key), dst)
+}
+
+// sum_bytes_to_buffer_1_3 will hash the given message with the key and write
+// the computed hash into the provided destination buffer
+sum_bytes_to_buffer_1_3 :: proc(msg, key, dst: []byte) {
+    assert(len(dst) >= DIGEST_SIZE, "crypto/siphash: Destination buffer needs to be at least of size 8")
+    hash  := sum_bytes_1_3(msg, key)
+    _collect_output(dst[:], hash)
+}
+
+sum_1_3 :: proc {
+    sum_string_1_3,
+    sum_bytes_1_3,
+    sum_string_to_buffer_1_3,
+    sum_bytes_to_buffer_1_3,
+}
+
+// verify_u64_1_3 will check if the supplied tag matches with the output you 
+// will get from the provided message and key
+verify_u64_1_3 :: proc (tag: u64 msg, key: []byte) -> bool {
+    return sum_bytes_1_3(msg, key) == tag
+}
+
+// verify_bytes will check if the supplied tag matches with the output you 
+// will get from the provided message and key
+verify_bytes_1_3 :: proc (tag, msg, key: []byte) -> bool {
+    derived_tag: [8]byte
+    sum_bytes_to_buffer_1_3(msg, key, derived_tag[:])
+    return crypto.compare_constant_time(derived_tag[:], tag) == 1
+}
+
+verify_1_3 :: proc {
+    verify_bytes_1_3,
+    verify_u64_1_3,
+}
+
+// sum_string_2_4 will hash the given message with the key and return
+// the computed hash as a u64
+sum_string_2_4 :: proc(msg, key: string) -> u64 {
+    return sum_bytes_2_4(transmute([]byte)(msg), transmute([]byte)(key))
+}
+
+// sum_bytes_2_4 will hash the given message with the key and return
+// the computed hash as a u64
+sum_bytes_2_4 :: proc (msg, key: []byte) -> u64 {
+    ctx: Context
+    hash: u64
+    init(&ctx, key, 2, 4)
+    update(&ctx, msg)
+    final(&ctx, &hash)
+    return hash
+}
+
+// sum_string_to_buffer_2_4 will hash the given message with the key and write
+// the computed hash into the provided destination buffer
+sum_string_to_buffer_2_4 :: proc(msg, key: string, dst: []byte) {
+    sum_bytes_to_buffer_2_4(transmute([]byte)(msg), transmute([]byte)(key), dst)
+}
+
+// sum_bytes_to_buffer_2_4 will hash the given message with the key and write
+// the computed hash into the provided destination buffer
+sum_bytes_to_buffer_2_4 :: proc(msg, key, dst: []byte) {
+    assert(len(dst) >= DIGEST_SIZE, "crypto/siphash: Destination buffer needs to be at least of size 8")
+    hash  := sum_bytes_2_4(msg, key)
+    _collect_output(dst[:], hash)
+}
+
+sum_2_4 :: proc {
+    sum_string_2_4,
+    sum_bytes_2_4,
+    sum_string_to_buffer_2_4,
+    sum_bytes_to_buffer_2_4,
+}
+
+sum_string           :: sum_string_2_4
+sum_bytes            :: sum_bytes_2_4
+sum_string_to_buffer :: sum_string_to_buffer_2_4
+sum_bytes_to_buffer  :: sum_bytes_to_buffer_2_4
+sum :: proc {
+    sum_string,
+    sum_bytes,
+    sum_string_to_buffer,
+    sum_bytes_to_buffer,
+}
+
+// verify_u64_2_4 will check if the supplied tag matches with the output you 
+// will get from the provided message and key
+verify_u64_2_4 :: proc (tag: u64 msg, key: []byte) -> bool {
+    return sum_bytes_2_4(msg, key) == tag
+}
+
+// verify_bytes will check if the supplied tag matches with the output you 
+// will get from the provided message and key
+verify_bytes_2_4 :: proc (tag, msg, key: []byte) -> bool {
+    derived_tag: [8]byte
+    sum_bytes_to_buffer_2_4(msg, key, derived_tag[:])
+    return crypto.compare_constant_time(derived_tag[:], tag) == 1
+}
+
+verify_2_4 :: proc {
+    verify_bytes_2_4,
+    verify_u64_2_4,
+}
+
+verify_bytes :: verify_bytes_2_4
+verify_u64   :: verify_u64_2_4
+verify :: proc {
+    verify_bytes,
+    verify_u64,
+}
+
+// sum_string_4_8 will hash the given message with the key and return
+// the computed hash as a u64
+sum_string_4_8 :: proc(msg, key: string) -> u64 {
+    return sum_bytes_4_8(transmute([]byte)(msg), transmute([]byte)(key))
+}
+
+// sum_bytes_4_8 will hash the given message with the key and return
+// the computed hash as a u64
+sum_bytes_4_8 :: proc (msg, key: []byte) -> u64 {
+    ctx: Context
+    hash: u64
+    init(&ctx, key, 4, 8)
+    update(&ctx, msg)
+    final(&ctx, &hash)
+    return hash
+}
+
+// sum_string_to_buffer_4_8 will hash the given message with the key and write
+// the computed hash into the provided destination buffer
+sum_string_to_buffer_4_8 :: proc(msg, key: string, dst: []byte) {
+    sum_bytes_to_buffer_4_8(transmute([]byte)(msg), transmute([]byte)(key), dst)
+}
+
+// sum_bytes_to_buffer_4_8 will hash the given message with the key and write
+// the computed hash into the provided destination buffer
+sum_bytes_to_buffer_4_8 :: proc(msg, key, dst: []byte) {
+    assert(len(dst) >= DIGEST_SIZE, "crypto/siphash: Destination buffer needs to be at least of size 8")
+    hash  := sum_bytes_4_8(msg, key)
+    _collect_output(dst[:], hash)
+}
+
+sum_4_8 :: proc {
+    sum_string_4_8,
+    sum_bytes_4_8,
+    sum_string_to_buffer_4_8,
+    sum_bytes_to_buffer_4_8,
+}
+
+// verify_u64_4_8 will check if the supplied tag matches with the output you 
+// will get from the provided message and key
+verify_u64_4_8 :: proc (tag: u64 msg, key: []byte) -> bool {
+    return sum_bytes_4_8(msg, key) == tag
+}
+
+// verify_bytes will check if the supplied tag matches with the output you 
+// will get from the provided message and key
+verify_bytes_4_8 :: proc (tag, msg, key: []byte) -> bool {
+    derived_tag: [8]byte
+    sum_bytes_to_buffer_4_8(msg, key, derived_tag[:])
+    return crypto.compare_constant_time(derived_tag[:], tag) == 1
+}
+
+verify_4_8 :: proc {
+    verify_bytes_4_8,
+    verify_u64_4_8,
+}
+
+/*
+    Low level API
+*/
+
+init :: proc(ctx: ^Context, key: []byte, c_rounds, d_rounds: int) {
+    assert(len(key) == KEY_SIZE, "crypto/siphash: Invalid key size, want 16")
+    ctx.c_rounds = c_rounds
+    ctx.d_rounds = d_rounds
+    is_valid_setting := (ctx.c_rounds == 1 && ctx.d_rounds == 3) ||
+                        (ctx.c_rounds == 2 && ctx.d_rounds == 4) ||
+                        (ctx.c_rounds == 4 && ctx.d_rounds == 8)
+    assert(is_valid_setting, "crypto/siphash: Incorrect rounds set up. Valid pairs are (1,3), (2,4) and (4,8)")
+    ctx.k0 = util.U64_LE(key[:8])
+    ctx.k1 = util.U64_LE(key[8:])
+    ctx.v0 = 0x736f6d6570736575 ~ ctx.k0
+    ctx.v1 = 0x646f72616e646f6d ~ ctx.k1
+    ctx.v2 = 0x6c7967656e657261 ~ ctx.k0
+    ctx.v3 = 0x7465646279746573 ~ ctx.k1
+    ctx.is_initialized = true
+}
+
+update :: proc(ctx: ^Context, data: []byte) {
+    assert(ctx.is_initialized, "crypto/siphash: Context is not initalized")
+    ctx.last_block = len(data) / 8 * 8
+    ctx.buf = data
+    i := 0
+    m: u64
+    for i < ctx.last_block {
+        m = u64(ctx.buf[i] & 0xff)
+        i += 1
+
+        for r in u64(1)..<8 {
+            m |= u64(ctx.buf[i] & 0xff) << (r * 8)
+            i += 1
+        }
+
+        ctx.v3 ~= m
+        for _ in 0..<ctx.c_rounds {
+            _compress(ctx)
+        }
+
+        ctx.v0 ~= m
+    }
+}
+
+final :: proc(ctx: ^Context, dst: ^u64) {
+    m: u64
+    for i := len(ctx.buf) - 1; i >= ctx.last_block; i -= 1 {
+        m <<= 8
+        m |= u64(ctx.buf[i] & 0xff)
+    }
+    m |= u64(len(ctx.buf) << 56)
+
+    ctx.v3 ~= m
+
+    for _ in 0..<ctx.c_rounds {
+        _compress(ctx)
+    }
+
+    ctx.v0 ~= m
+    ctx.v2 ~= 0xff
+
+    for _ in 0..<ctx.d_rounds {
+        _compress(ctx)
+    }
+
+    dst^ = ctx.v0 ~ ctx.v1 ~ ctx.v2 ~ ctx.v3
+
+    reset(ctx)
+}
+
+reset :: proc(ctx: ^Context) {
+    ctx.k0, ctx.k1 = 0, 0
+    ctx.v0, ctx.v1 = 0, 0
+    ctx.v2, ctx.v3 = 0, 0
+    ctx.last_block = 0
+    ctx.c_rounds = 0
+    ctx.d_rounds = 0
+    ctx.is_initialized = false
+}
+
+Context :: struct {
+    v0, v1, v2, v3: u64,    // State values
+    k0, k1:         u64,    // Split key
+    c_rounds:       int,    // Number of message rounds
+    d_rounds:       int,    // Number of finalization rounds
+    buf:            []byte, // Provided data
+    last_block:     int,    // Offset from the last block
+    is_initialized: bool,
+}
+
+_get_byte :: #force_inline proc "contextless" (byte_num: byte, into: u64) -> byte {
+    return byte(into >> (((~byte_num) & (size_of(u64) - 1)) << 3))
+}
+
+_collect_output :: #force_inline proc "contextless" (dst: []byte, hash: u64) {
+    dst[0] = _get_byte(7, hash)
+    dst[1] = _get_byte(6, hash)
+    dst[2] = _get_byte(5, hash)
+    dst[3] = _get_byte(4, hash)
+    dst[4] = _get_byte(3, hash)
+    dst[5] = _get_byte(2, hash)
+    dst[6] = _get_byte(1, hash)
+    dst[7] = _get_byte(0, hash)
+}
+
+_compress :: #force_inline proc "contextless" (ctx: ^Context) {
+    ctx.v0 += ctx.v1
+    ctx.v1  = util.ROTL64(ctx.v1, 13)
+    ctx.v1 ~= ctx.v0
+    ctx.v0  = util.ROTL64(ctx.v0, 32)
+    ctx.v2 += ctx.v3
+    ctx.v3  = util.ROTL64(ctx.v3, 16)
+    ctx.v3 ~= ctx.v2
+    ctx.v0 += ctx.v3
+    ctx.v3  = util.ROTL64(ctx.v3, 21)
+    ctx.v3 ~= ctx.v0
+    ctx.v2 += ctx.v1
+    ctx.v1  = util.ROTL64(ctx.v1, 17)
+    ctx.v1 ~= ctx.v2
+    ctx.v2  = util.ROTL64(ctx.v2, 32)
+}

+ 33 - 10
core/crypto/sm3/sm3.odin

@@ -15,16 +15,22 @@ import "core:io"
 
 
 import "../util"
 import "../util"
 
 
+/*
+    High level API
+*/
+
+DIGEST_SIZE :: 32
+
 // hash_string will hash the given input and return the
 // hash_string will hash the given input and return the
 // computed hash
 // computed hash
-hash_string :: proc(data: string) -> [32]byte {
+hash_string :: proc(data: string) -> [DIGEST_SIZE]byte {
     return hash_bytes(transmute([]byte)(data))
     return hash_bytes(transmute([]byte)(data))
 }
 }
 
 
 // hash_bytes will hash the given input and return the
 // hash_bytes will hash the given input and return the
 // computed hash
 // computed hash
-hash_bytes :: proc(data: []byte) -> [32]byte {
-    hash: [32]byte
+hash_bytes :: proc(data: []byte) -> [DIGEST_SIZE]byte {
+    hash: [DIGEST_SIZE]byte
     ctx: Sm3_Context
     ctx: Sm3_Context
     init(&ctx)
     init(&ctx)
     update(&ctx, data)
     update(&ctx, data)
@@ -32,10 +38,28 @@ hash_bytes :: proc(data: []byte) -> [32]byte {
     return hash
     return hash
 }
 }
 
 
+// hash_string_to_buffer will hash the given input and assign the
+// computed hash to the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_string_to_buffer :: proc(data: string, hash: []byte) {
+    hash_bytes_to_buffer(transmute([]byte)(data), hash)
+}
+
+// hash_bytes_to_buffer will hash the given input and write the
+// computed hash into the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_bytes_to_buffer :: proc(data, hash: []byte) {
+    assert(len(hash) >= DIGEST_SIZE, "Size of destination buffer is smaller than the digest size")
+    ctx: Sm3_Context
+    init(&ctx)
+    update(&ctx, data)
+    final(&ctx, hash)
+}
+
 // hash_stream will read the stream in chunks and compute a
 // hash_stream will read the stream in chunks and compute a
 // hash from its contents
 // hash from its contents
-hash_stream :: proc(s: io.Stream) -> ([32]byte, bool) {
-    hash: [32]byte
+hash_stream :: proc(s: io.Stream) -> ([DIGEST_SIZE]byte, bool) {
+    hash: [DIGEST_SIZE]byte
     ctx: Sm3_Context
     ctx: Sm3_Context
     init(&ctx)
     init(&ctx)
     buf := make([]byte, 512)
     buf := make([]byte, 512)
@@ -53,7 +77,7 @@ hash_stream :: proc(s: io.Stream) -> ([32]byte, bool) {
 
 
 // hash_file will read the file provided by the given handle
 // hash_file will read the file provided by the given handle
 // and compute a hash
 // and compute a hash
-hash_file :: proc(hd: os.Handle, load_at_once := false) -> ([32]byte, bool) {
+hash_file :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE]byte, bool) {
     if !load_at_once {
     if !load_at_once {
         return hash_stream(os.stream_from_handle(hd))
         return hash_stream(os.stream_from_handle(hd))
     } else {
     } else {
@@ -61,7 +85,7 @@ hash_file :: proc(hd: os.Handle, load_at_once := false) -> ([32]byte, bool) {
             return hash_bytes(buf[:]), ok
             return hash_bytes(buf[:]), ok
         }
         }
     }
     }
-    return [32]byte{}, false
+    return [DIGEST_SIZE]byte{}, false
 }
 }
 
 
 hash :: proc {
 hash :: proc {
@@ -69,6 +93,8 @@ hash :: proc {
     hash_file,
     hash_file,
     hash_bytes,
     hash_bytes,
     hash_string,
     hash_string,
+    hash_bytes_to_buffer,
+    hash_string_to_buffer,
 }
 }
 
 
 /*
 /*
@@ -146,9 +172,6 @@ Sm3_Context :: struct {
     length:    u64,
     length:    u64,
 }
 }
 
 
-BLOCK_SIZE_IN_BYTES :: 64
-BLOCK_SIZE_IN_32    :: 16
-
 IV := [8]u32 {
 IV := [8]u32 {
     0x7380166f, 0x4914b2b9, 0x172442d7, 0xda8a0600,
     0x7380166f, 0x4914b2b9, 0x172442d7, 0xda8a0600,
     0xa96f30bc, 0x163138aa, 0xe38dee4d, 0xb0fb0e4e,
     0xa96f30bc, 0x163138aa, 0xe38dee4d, 0xb0fb0e4e,

+ 58 - 14
core/crypto/streebog/streebog.odin

@@ -19,16 +19,19 @@ import "../util"
     High level API
     High level API
 */
 */
 
 
+DIGEST_SIZE_256 :: 32
+DIGEST_SIZE_512 :: 64
+
 // hash_string_256 will hash the given input and return the
 // hash_string_256 will hash the given input and return the
 // computed hash
 // computed hash
-hash_string_256 :: proc(data: string) -> [32]byte {
+hash_string_256 :: proc(data: string) -> [DIGEST_SIZE_256]byte {
     return hash_bytes_256(transmute([]byte)(data))
     return hash_bytes_256(transmute([]byte)(data))
 }
 }
 
 
 // hash_bytes_256 will hash the given input and return the
 // hash_bytes_256 will hash the given input and return the
 // computed hash
 // computed hash
-hash_bytes_256 :: proc(data: []byte) -> [32]byte {
-    hash: [32]byte
+hash_bytes_256 :: proc(data: []byte) -> [DIGEST_SIZE_256]byte {
+    hash: [DIGEST_SIZE_256]byte
     ctx: Streebog_Context
     ctx: Streebog_Context
     ctx.is256 = true
     ctx.is256 = true
     init(&ctx)
     init(&ctx)
@@ -37,10 +40,29 @@ hash_bytes_256 :: proc(data: []byte) -> [32]byte {
     return hash
     return hash
 }
 }
 
 
+// hash_string_to_buffer_256 will hash the given input and assign the
+// computed hash to the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_string_to_buffer_256 :: proc(data: string, hash: []byte) {
+    hash_bytes_to_buffer_256(transmute([]byte)(data), hash)
+}
+
+// hash_bytes_to_buffer_256 will hash the given input and write the
+// computed hash into the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_bytes_to_buffer_256 :: proc(data, hash: []byte) {
+    assert(len(hash) >= DIGEST_SIZE_256, "Size of destination buffer is smaller than the digest size")
+    ctx: Streebog_Context
+    ctx.is256 = true
+    init(&ctx)
+    update(&ctx, data)
+    final(&ctx, hash[:])
+}
+
 // hash_stream_256 will read the stream in chunks and compute a
 // hash_stream_256 will read the stream in chunks and compute a
 // hash from its contents
 // hash from its contents
-hash_stream_256 :: proc(s: io.Stream) -> ([32]byte, bool) {
-    hash: [32]byte
+hash_stream_256 :: proc(s: io.Stream) -> ([DIGEST_SIZE_256]byte, bool) {
+    hash: [DIGEST_SIZE_256]byte
     ctx: Streebog_Context
     ctx: Streebog_Context
     ctx.is256 = true
     ctx.is256 = true
     init(&ctx)
     init(&ctx)
@@ -59,7 +81,7 @@ hash_stream_256 :: proc(s: io.Stream) -> ([32]byte, bool) {
 
 
 // hash_file_256 will read the file provided by the given handle
 // hash_file_256 will read the file provided by the given handle
 // and compute a hash
 // and compute a hash
-hash_file_256 :: proc(hd: os.Handle, load_at_once := false) -> ([32]byte, bool) {
+hash_file_256 :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE_256]byte, bool) {
     if !load_at_once {
     if !load_at_once {
         return hash_stream_256(os.stream_from_handle(hd))
         return hash_stream_256(os.stream_from_handle(hd))
     } else {
     } else {
@@ -67,7 +89,7 @@ hash_file_256 :: proc(hd: os.Handle, load_at_once := false) -> ([32]byte, bool)
             return hash_bytes_256(buf[:]), ok
             return hash_bytes_256(buf[:]), ok
         }
         }
     }
     }
-    return [32]byte{}, false
+    return [DIGEST_SIZE_256]byte{}, false
 }
 }
 
 
 hash_256 :: proc {
 hash_256 :: proc {
@@ -75,18 +97,20 @@ hash_256 :: proc {
     hash_file_256,
     hash_file_256,
     hash_bytes_256,
     hash_bytes_256,
     hash_string_256,
     hash_string_256,
+	hash_bytes_to_buffer_256,
+    hash_string_to_buffer_256,
 }
 }
 
 
 // hash_string_512 will hash the given input and return the
 // hash_string_512 will hash the given input and return the
 // computed hash
 // computed hash
-hash_string_512 :: proc(data: string) -> [64]byte {
+hash_string_512 :: proc(data: string) -> [DIGEST_SIZE_512]byte {
     return hash_bytes_512(transmute([]byte)(data))
     return hash_bytes_512(transmute([]byte)(data))
 }
 }
 
 
 // hash_bytes_512 will hash the given input and return the
 // hash_bytes_512 will hash the given input and return the
 // computed hash
 // computed hash
-hash_bytes_512 :: proc(data: []byte) -> [64]byte {
-    hash: [64]byte
+hash_bytes_512 :: proc(data: []byte) -> [DIGEST_SIZE_512]byte {
+    hash: [DIGEST_SIZE_512]byte
     ctx: Streebog_Context
     ctx: Streebog_Context
     init(&ctx)
     init(&ctx)
     update(&ctx, data)
     update(&ctx, data)
@@ -94,10 +118,28 @@ hash_bytes_512 :: proc(data: []byte) -> [64]byte {
     return hash
     return hash
 }
 }
 
 
+// hash_string_to_buffer_512 will hash the given input and assign the
+// computed hash to the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_string_to_buffer_512 :: proc(data: string, hash: []byte) {
+    hash_bytes_to_buffer_512(transmute([]byte)(data), hash)
+}
+
+// hash_bytes_to_buffer_512 will hash the given input and write the
+// computed hash into the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_bytes_to_buffer_512 :: proc(data, hash: []byte) {
+    assert(len(hash) >= DIGEST_SIZE_512, "Size of destination buffer is smaller than the digest size")
+    ctx: Streebog_Context
+    init(&ctx)
+    update(&ctx, data)
+    final(&ctx, hash[:])
+}
+
 // hash_stream_512 will read the stream in chunks and compute a
 // hash_stream_512 will read the stream in chunks and compute a
 // hash from its contents
 // hash from its contents
-hash_stream_512 :: proc(s: io.Stream) -> ([64]byte, bool) {
-    hash: [64]byte
+hash_stream_512 :: proc(s: io.Stream) -> ([DIGEST_SIZE_512]byte, bool) {
+    hash: [DIGEST_SIZE_512]byte
     ctx: Streebog_Context
     ctx: Streebog_Context
     init(&ctx)
     init(&ctx)
     buf := make([]byte, 512)
     buf := make([]byte, 512)
@@ -115,7 +157,7 @@ hash_stream_512 :: proc(s: io.Stream) -> ([64]byte, bool) {
 
 
 // hash_file_512 will read the file provided by the given handle
 // hash_file_512 will read the file provided by the given handle
 // and compute a hash
 // and compute a hash
-hash_file_512 :: proc(hd: os.Handle, load_at_once := false) -> ([64]byte, bool) {
+hash_file_512 :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE_512]byte, bool) {
     if !load_at_once {
     if !load_at_once {
         return hash_stream_512(os.stream_from_handle(hd))
         return hash_stream_512(os.stream_from_handle(hd))
     } else {
     } else {
@@ -123,7 +165,7 @@ hash_file_512 :: proc(hd: os.Handle, load_at_once := false) -> ([64]byte, bool)
             return hash_bytes_512(buf[:]), ok
             return hash_bytes_512(buf[:]), ok
         }
         }
     }
     }
-    return [64]byte{}, false
+    return [DIGEST_SIZE_512]byte{}, false
 }
 }
 
 
 hash_512 :: proc {
 hash_512 :: proc {
@@ -131,6 +173,8 @@ hash_512 :: proc {
     hash_file_512,
     hash_file_512,
     hash_bytes_512,
     hash_bytes_512,
     hash_string_512,
     hash_string_512,
+    hash_bytes_to_buffer_512,
+    hash_string_to_buffer_512,
 }
 }
 
 
 /*
 /*

+ 88 - 21
core/crypto/tiger/tiger.odin

@@ -19,16 +19,20 @@ import "../_tiger"
     High level API
     High level API
 */
 */
 
 
+DIGEST_SIZE_128 :: 16
+DIGEST_SIZE_160 :: 20
+DIGEST_SIZE_192 :: 24
+
 // hash_string_128 will hash the given input and return the
 // hash_string_128 will hash the given input and return the
 // computed hash
 // computed hash
-hash_string_128 :: proc(data: string) -> [16]byte {
+hash_string_128 :: proc(data: string) -> [DIGEST_SIZE_128]byte {
     return hash_bytes_128(transmute([]byte)(data))
     return hash_bytes_128(transmute([]byte)(data))
 }
 }
 
 
 // hash_bytes_128 will hash the given input and return the
 // hash_bytes_128 will hash the given input and return the
 // computed hash
 // computed hash
-hash_bytes_128 :: proc(data: []byte) -> [16]byte {
-    hash: [16]byte
+hash_bytes_128 :: proc(data: []byte) -> [DIGEST_SIZE_128]byte {
+    hash: [DIGEST_SIZE_128]byte
     ctx: _tiger.Tiger_Context
     ctx: _tiger.Tiger_Context
     ctx.ver = 1
     ctx.ver = 1
     _tiger.init(&ctx)
     _tiger.init(&ctx)
@@ -37,10 +41,29 @@ hash_bytes_128 :: proc(data: []byte) -> [16]byte {
     return hash
     return hash
 }
 }
 
 
+// hash_string_to_buffer_128 will hash the given input and assign the
+// computed hash to the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_string_to_buffer_128 :: proc(data: string, hash: []byte) {
+    hash_bytes_to_buffer_128(transmute([]byte)(data), hash)
+}
+
+// hash_bytes_to_buffer_128 will hash the given input and write the
+// computed hash into the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_bytes_to_buffer_128 :: proc(data, hash: []byte) {
+    assert(len(hash) >= DIGEST_SIZE_128, "Size of destination buffer is smaller than the digest size")
+    ctx: _tiger.Tiger_Context
+    ctx.ver = 1
+    _tiger.init(&ctx)
+    _tiger.update(&ctx, data)
+    _tiger.final(&ctx, hash)
+}
+
 // hash_stream_128 will read the stream in chunks and compute a
 // hash_stream_128 will read the stream in chunks and compute a
 // hash from its contents
 // hash from its contents
-hash_stream_128 :: proc(s: io.Stream) -> ([16]byte, bool) {
-    hash: [16]byte
+hash_stream_128 :: proc(s: io.Stream) -> ([DIGEST_SIZE_128]byte, bool) {
+    hash: [DIGEST_SIZE_128]byte
     ctx: _tiger.Tiger_Context
     ctx: _tiger.Tiger_Context
     ctx.ver = 1
     ctx.ver = 1
     _tiger.init(&ctx)
     _tiger.init(&ctx)
@@ -59,7 +82,7 @@ hash_stream_128 :: proc(s: io.Stream) -> ([16]byte, bool) {
 
 
 // hash_file_128 will read the file provided by the given handle
 // hash_file_128 will read the file provided by the given handle
 // and compute a hash
 // and compute a hash
-hash_file_128 :: proc(hd: os.Handle, load_at_once := false) -> ([16]byte, bool) {
+hash_file_128 :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE_128]byte, bool) {
     if !load_at_once {
     if !load_at_once {
         return hash_stream_128(os.stream_from_handle(hd))
         return hash_stream_128(os.stream_from_handle(hd))
     } else {
     } else {
@@ -67,7 +90,7 @@ hash_file_128 :: proc(hd: os.Handle, load_at_once := false) -> ([16]byte, bool)
             return hash_bytes_128(buf[:]), ok
             return hash_bytes_128(buf[:]), ok
         }
         }
     }
     }
-    return [16]byte{}, false
+    return [DIGEST_SIZE_128]byte{}, false
 }
 }
 
 
 hash_128 :: proc {
 hash_128 :: proc {
@@ -75,18 +98,20 @@ hash_128 :: proc {
     hash_file_128,
     hash_file_128,
     hash_bytes_128,
     hash_bytes_128,
     hash_string_128,
     hash_string_128,
+    hash_bytes_to_buffer_128,
+    hash_string_to_buffer_128,
 }
 }
 
 
 // hash_string_160 will hash the given input and return the
 // hash_string_160 will hash the given input and return the
 // computed hash
 // computed hash
-hash_string_160 :: proc(data: string) -> [20]byte {
+hash_string_160 :: proc(data: string) -> [DIGEST_SIZE_160]byte {
     return hash_bytes_160(transmute([]byte)(data))
     return hash_bytes_160(transmute([]byte)(data))
 }
 }
 
 
 // hash_bytes_160 will hash the given input and return the
 // hash_bytes_160 will hash the given input and return the
 // computed hash
 // computed hash
-hash_bytes_160 :: proc(data: []byte) -> [20]byte {
-    hash: [20]byte
+hash_bytes_160 :: proc(data: []byte) -> [DIGEST_SIZE_160]byte {
+    hash: [DIGEST_SIZE_160]byte
     ctx: _tiger.Tiger_Context
     ctx: _tiger.Tiger_Context
     ctx.ver = 1
     ctx.ver = 1
     _tiger.init(&ctx)
     _tiger.init(&ctx)
@@ -95,10 +120,29 @@ hash_bytes_160 :: proc(data: []byte) -> [20]byte {
     return hash
     return hash
 }
 }
 
 
+// hash_string_to_buffer_160 will hash the given input and assign the
+// computed hash to the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_string_to_buffer_160 :: proc(data: string, hash: []byte) {
+    hash_bytes_to_buffer_160(transmute([]byte)(data), hash)
+}
+
+// hash_bytes_to_buffer_160 will hash the given input and write the
+// computed hash into the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_bytes_to_buffer_160 :: proc(data, hash: []byte) {
+    assert(len(hash) >= DIGEST_SIZE_160, "Size of destination buffer is smaller than the digest size")
+    ctx: _tiger.Tiger_Context
+    ctx.ver = 1
+    _tiger.init(&ctx)
+    _tiger.update(&ctx, data)
+    _tiger.final(&ctx, hash)
+}
+
 // hash_stream_160 will read the stream in chunks and compute a
 // hash_stream_160 will read the stream in chunks and compute a
 // hash from its contents
 // hash from its contents
-hash_stream_160 :: proc(s: io.Stream) -> ([20]byte, bool) {
-    hash: [20]byte
+hash_stream_160 :: proc(s: io.Stream) -> ([DIGEST_SIZE_160]byte, bool) {
+    hash: [DIGEST_SIZE_160]byte
     ctx: _tiger.Tiger_Context
     ctx: _tiger.Tiger_Context
     ctx.ver = 1
     ctx.ver = 1
     _tiger.init(&ctx)
     _tiger.init(&ctx)
@@ -117,7 +161,7 @@ hash_stream_160 :: proc(s: io.Stream) -> ([20]byte, bool) {
 
 
 // hash_file_160 will read the file provided by the given handle
 // hash_file_160 will read the file provided by the given handle
 // and compute a hash
 // and compute a hash
-hash_file_160 :: proc(hd: os.Handle, load_at_once := false) -> ([20]byte, bool) {
+hash_file_160 :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE_160]byte, bool) {
     if !load_at_once {
     if !load_at_once {
         return hash_stream_160(os.stream_from_handle(hd))
         return hash_stream_160(os.stream_from_handle(hd))
     } else {
     } else {
@@ -125,7 +169,7 @@ hash_file_160 :: proc(hd: os.Handle, load_at_once := false) -> ([20]byte, bool)
             return hash_bytes_160(buf[:]), ok
             return hash_bytes_160(buf[:]), ok
         }
         }
     }
     }
-    return [20]byte{}, false
+    return [DIGEST_SIZE_160]byte{}, false
 }
 }
 
 
 hash_160 :: proc {
 hash_160 :: proc {
@@ -133,18 +177,20 @@ hash_160 :: proc {
     hash_file_160,
     hash_file_160,
     hash_bytes_160,
     hash_bytes_160,
     hash_string_160,
     hash_string_160,
+    hash_bytes_to_buffer_160,
+    hash_string_to_buffer_160,
 }
 }
 
 
 // hash_string_192 will hash the given input and return the
 // hash_string_192 will hash the given input and return the
 // computed hash
 // computed hash
-hash_string_192 :: proc(data: string) -> [24]byte {
+hash_string_192 :: proc(data: string) -> [DIGEST_SIZE_192]byte {
     return hash_bytes_192(transmute([]byte)(data))
     return hash_bytes_192(transmute([]byte)(data))
 }
 }
 
 
 // hash_bytes_192 will hash the given input and return the
 // hash_bytes_192 will hash the given input and return the
 // computed hash
 // computed hash
-hash_bytes_192 :: proc(data: []byte) -> [24]byte {
-    hash: [24]byte
+hash_bytes_192 :: proc(data: []byte) -> [DIGEST_SIZE_192]byte {
+    hash: [DIGEST_SIZE_192]byte
     ctx: _tiger.Tiger_Context
     ctx: _tiger.Tiger_Context
     ctx.ver = 1
     ctx.ver = 1
     _tiger.init(&ctx)
     _tiger.init(&ctx)
@@ -153,10 +199,29 @@ hash_bytes_192 :: proc(data: []byte) -> [24]byte {
     return hash
     return hash
 }
 }
 
 
+// hash_string_to_buffer_192 will hash the given input and assign the
+// computed hash to the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_string_to_buffer_192 :: proc(data: string, hash: []byte) {
+    hash_bytes_to_buffer_192(transmute([]byte)(data), hash)
+}
+
+// hash_bytes_to_buffer_192 will hash the given input and write the
+// computed hash into the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_bytes_to_buffer_192 :: proc(data, hash: []byte) {
+    assert(len(hash) >= DIGEST_SIZE_192, "Size of destination buffer is smaller than the digest size")
+    ctx: _tiger.Tiger_Context
+    ctx.ver = 1
+    _tiger.init(&ctx)
+    _tiger.update(&ctx, data)
+    _tiger.final(&ctx, hash)
+}
+
 // hash_stream_192 will read the stream in chunks and compute a
 // hash_stream_192 will read the stream in chunks and compute a
 // hash from its contents
 // hash from its contents
-hash_stream_192 :: proc(s: io.Stream) -> ([24]byte, bool) {
-    hash: [24]byte
+hash_stream_192 :: proc(s: io.Stream) -> ([DIGEST_SIZE_192]byte, bool) {
+    hash: [DIGEST_SIZE_192]byte
     ctx: _tiger.Tiger_Context
     ctx: _tiger.Tiger_Context
     ctx.ver = 1
     ctx.ver = 1
     _tiger.init(&ctx)
     _tiger.init(&ctx)
@@ -175,7 +240,7 @@ hash_stream_192 :: proc(s: io.Stream) -> ([24]byte, bool) {
 
 
 // hash_file_192 will read the file provided by the given handle
 // hash_file_192 will read the file provided by the given handle
 // and compute a hash
 // and compute a hash
-hash_file_192 :: proc(hd: os.Handle, load_at_once := false) -> ([24]byte, bool) {
+hash_file_192 :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE_192]byte, bool) {
     if !load_at_once {
     if !load_at_once {
         return hash_stream_192(os.stream_from_handle(hd))
         return hash_stream_192(os.stream_from_handle(hd))
     } else {
     } else {
@@ -183,7 +248,7 @@ hash_file_192 :: proc(hd: os.Handle, load_at_once := false) -> ([24]byte, bool)
             return hash_bytes_192(buf[:]), ok
             return hash_bytes_192(buf[:]), ok
         }
         }
     }
     }
-    return [24]byte{}, false
+    return [DIGEST_SIZE_192]byte{}, false
 }
 }
 
 
 hash_192 :: proc {
 hash_192 :: proc {
@@ -191,6 +256,8 @@ hash_192 :: proc {
     hash_file_192,
     hash_file_192,
     hash_bytes_192,
     hash_bytes_192,
     hash_string_192,
     hash_string_192,
+    hash_bytes_to_buffer_192,
+    hash_string_to_buffer_192,
 }
 }
 
 
 /*
 /*

+ 88 - 21
core/crypto/tiger2/tiger2.odin

@@ -19,16 +19,20 @@ import "../_tiger"
     High level API
     High level API
 */
 */
 
 
+DIGEST_SIZE_128 :: 16
+DIGEST_SIZE_160 :: 20
+DIGEST_SIZE_192 :: 24
+
 // hash_string_128 will hash the given input and return the
 // hash_string_128 will hash the given input and return the
 // computed hash
 // computed hash
-hash_string_128 :: proc(data: string) -> [16]byte {
+hash_string_128 :: proc(data: string) -> [DIGEST_SIZE_128]byte {
     return hash_bytes_128(transmute([]byte)(data))
     return hash_bytes_128(transmute([]byte)(data))
 }
 }
 
 
 // hash_bytes_128 will hash the given input and return the
 // hash_bytes_128 will hash the given input and return the
 // computed hash
 // computed hash
-hash_bytes_128 :: proc(data: []byte) -> [16]byte {
-    hash: [16]byte
+hash_bytes_128 :: proc(data: []byte) -> [DIGEST_SIZE_128]byte {
+    hash: [DIGEST_SIZE_128]byte
     ctx: _tiger.Tiger_Context
     ctx: _tiger.Tiger_Context
     ctx.ver = 2
     ctx.ver = 2
     _tiger.init(&ctx)
     _tiger.init(&ctx)
@@ -37,10 +41,29 @@ hash_bytes_128 :: proc(data: []byte) -> [16]byte {
     return hash
     return hash
 }
 }
 
 
+// hash_string_to_buffer_128 will hash the given input and assign the
+// computed hash to the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_string_to_buffer_128 :: proc(data: string, hash: []byte) {
+    hash_bytes_to_buffer_128(transmute([]byte)(data), hash)
+}
+
+// hash_bytes_to_buffer_128 will hash the given input and write the
+// computed hash into the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_bytes_to_buffer_128 :: proc(data, hash: []byte) {
+    assert(len(hash) >= DIGEST_SIZE_128, "Size of destination buffer is smaller than the digest size")
+    ctx: _tiger.Tiger_Context
+    ctx.ver = 2
+    _tiger.init(&ctx)
+    _tiger.update(&ctx, data)
+    _tiger.final(&ctx, hash)
+}
+
 // hash_stream_128 will read the stream in chunks and compute a
 // hash_stream_128 will read the stream in chunks and compute a
 // hash from its contents
 // hash from its contents
-hash_stream_128 :: proc(s: io.Stream) -> ([16]byte, bool) {
-    hash: [16]byte
+hash_stream_128 :: proc(s: io.Stream) -> ([DIGEST_SIZE_128]byte, bool) {
+    hash: [DIGEST_SIZE_128]byte
     ctx: _tiger.Tiger_Context
     ctx: _tiger.Tiger_Context
     ctx.ver = 2
     ctx.ver = 2
     _tiger.init(&ctx)
     _tiger.init(&ctx)
@@ -59,7 +82,7 @@ hash_stream_128 :: proc(s: io.Stream) -> ([16]byte, bool) {
 
 
 // hash_file_128 will read the file provided by the given handle
 // hash_file_128 will read the file provided by the given handle
 // and compute a hash
 // and compute a hash
-hash_file_128 :: proc(hd: os.Handle, load_at_once := false) -> ([16]byte, bool) {
+hash_file_128 :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE_128]byte, bool) {
     if !load_at_once {
     if !load_at_once {
         return hash_stream_128(os.stream_from_handle(hd))
         return hash_stream_128(os.stream_from_handle(hd))
     } else {
     } else {
@@ -67,7 +90,7 @@ hash_file_128 :: proc(hd: os.Handle, load_at_once := false) -> ([16]byte, bool)
             return hash_bytes_128(buf[:]), ok
             return hash_bytes_128(buf[:]), ok
         }
         }
     }
     }
-    return [16]byte{}, false
+    return [DIGEST_SIZE_128]byte{}, false
 }
 }
 
 
 hash_128 :: proc {
 hash_128 :: proc {
@@ -75,18 +98,20 @@ hash_128 :: proc {
     hash_file_128,
     hash_file_128,
     hash_bytes_128,
     hash_bytes_128,
     hash_string_128,
     hash_string_128,
+    hash_bytes_to_buffer_128,
+    hash_string_to_buffer_128,
 }
 }
 
 
 // hash_string_160 will hash the given input and return the
 // hash_string_160 will hash the given input and return the
 // computed hash
 // computed hash
-hash_string_160 :: proc(data: string) -> [20]byte {
+hash_string_160 :: proc(data: string) -> [DIGEST_SIZE_160]byte {
     return hash_bytes_160(transmute([]byte)(data))
     return hash_bytes_160(transmute([]byte)(data))
 }
 }
 
 
 // hash_bytes_160 will hash the given input and return the
 // hash_bytes_160 will hash the given input and return the
 // computed hash
 // computed hash
-hash_bytes_160 :: proc(data: []byte) -> [20]byte {
-    hash: [20]byte
+hash_bytes_160 :: proc(data: []byte) -> [DIGEST_SIZE_160]byte {
+    hash: [DIGEST_SIZE_160]byte
     ctx: _tiger.Tiger_Context
     ctx: _tiger.Tiger_Context
     ctx.ver = 2
     ctx.ver = 2
     _tiger.init(&ctx)
     _tiger.init(&ctx)
@@ -95,10 +120,29 @@ hash_bytes_160 :: proc(data: []byte) -> [20]byte {
     return hash
     return hash
 }
 }
 
 
+// hash_string_to_buffer_160 will hash the given input and assign the
+// computed hash to the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_string_to_buffer_160 :: proc(data: string, hash: []byte) {
+    hash_bytes_to_buffer_160(transmute([]byte)(data), hash)
+}
+
+// hash_bytes_to_buffer_160 will hash the given input and write the
+// computed hash into the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_bytes_to_buffer_160 :: proc(data, hash: []byte) {
+    assert(len(hash) >= DIGEST_SIZE_160, "Size of destination buffer is smaller than the digest size")
+    ctx: _tiger.Tiger_Context
+    ctx.ver = 2
+    _tiger.init(&ctx)
+    _tiger.update(&ctx, data)
+    _tiger.final(&ctx, hash)
+}
+
 // hash_stream_160 will read the stream in chunks and compute a
 // hash_stream_160 will read the stream in chunks and compute a
 // hash from its contents
 // hash from its contents
-hash_stream_160 :: proc(s: io.Stream) -> ([20]byte, bool) {
-    hash: [20]byte
+hash_stream_160 :: proc(s: io.Stream) -> ([DIGEST_SIZE_160]byte, bool) {
+    hash: [DIGEST_SIZE_160]byte
     ctx: _tiger.Tiger_Context
     ctx: _tiger.Tiger_Context
     ctx.ver = 2
     ctx.ver = 2
     _tiger.init(&ctx)
     _tiger.init(&ctx)
@@ -117,7 +161,7 @@ hash_stream_160 :: proc(s: io.Stream) -> ([20]byte, bool) {
 
 
 // hash_file_160 will read the file provided by the given handle
 // hash_file_160 will read the file provided by the given handle
 // and compute a hash
 // and compute a hash
-hash_file_160 :: proc(hd: os.Handle, load_at_once := false) -> ([20]byte, bool) {
+hash_file_160 :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE_160]byte, bool) {
     if !load_at_once {
     if !load_at_once {
         return hash_stream_160(os.stream_from_handle(hd))
         return hash_stream_160(os.stream_from_handle(hd))
     } else {
     } else {
@@ -125,7 +169,7 @@ hash_file_160 :: proc(hd: os.Handle, load_at_once := false) -> ([20]byte, bool)
             return hash_bytes_160(buf[:]), ok
             return hash_bytes_160(buf[:]), ok
         }
         }
     }
     }
-    return [20]byte{}, false
+    return [DIGEST_SIZE_160]byte{}, false
 }
 }
 
 
 hash_160 :: proc {
 hash_160 :: proc {
@@ -133,18 +177,20 @@ hash_160 :: proc {
     hash_file_160,
     hash_file_160,
     hash_bytes_160,
     hash_bytes_160,
     hash_string_160,
     hash_string_160,
+    hash_bytes_to_buffer_160,
+    hash_string_to_buffer_160,
 }
 }
 
 
 // hash_string_192 will hash the given input and return the
 // hash_string_192 will hash the given input and return the
 // computed hash
 // computed hash
-hash_string_192 :: proc(data: string) -> [24]byte {
+hash_string_192 :: proc(data: string) -> [DIGEST_SIZE_192]byte {
     return hash_bytes_192(transmute([]byte)(data))
     return hash_bytes_192(transmute([]byte)(data))
 }
 }
 
 
 // hash_bytes_192 will hash the given input and return the
 // hash_bytes_192 will hash the given input and return the
 // computed hash
 // computed hash
-hash_bytes_192 :: proc(data: []byte) -> [24]byte {
-    hash: [24]byte
+hash_bytes_192 :: proc(data: []byte) -> [DIGEST_SIZE_192]byte {
+    hash: [DIGEST_SIZE_192]byte
     ctx: _tiger.Tiger_Context
     ctx: _tiger.Tiger_Context
     ctx.ver = 2
     ctx.ver = 2
     _tiger.init(&ctx)
     _tiger.init(&ctx)
@@ -153,10 +199,29 @@ hash_bytes_192 :: proc(data: []byte) -> [24]byte {
     return hash
     return hash
 }
 }
 
 
+// hash_string_to_buffer_192 will hash the given input and assign the
+// computed hash to the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_string_to_buffer_192 :: proc(data: string, hash: []byte) {
+    hash_bytes_to_buffer_192(transmute([]byte)(data), hash)
+}
+
+// hash_bytes_to_buffer_192 will hash the given input and write the
+// computed hash into the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_bytes_to_buffer_192 :: proc(data, hash: []byte) {
+    assert(len(hash) >= DIGEST_SIZE_192, "Size of destination buffer is smaller than the digest size")
+    ctx: _tiger.Tiger_Context
+    ctx.ver = 2
+    _tiger.init(&ctx)
+    _tiger.update(&ctx, data)
+    _tiger.final(&ctx, hash)
+}
+
 // hash_stream_192 will read the stream in chunks and compute a
 // hash_stream_192 will read the stream in chunks and compute a
 // hash from its contents
 // hash from its contents
-hash_stream_192 :: proc(s: io.Stream) -> ([24]byte, bool) {
-    hash: [24]byte
+hash_stream_192 :: proc(s: io.Stream) -> ([DIGEST_SIZE_192]byte, bool) {
+    hash: [DIGEST_SIZE_192]byte
     ctx: _tiger.Tiger_Context
     ctx: _tiger.Tiger_Context
     ctx.ver = 2
     ctx.ver = 2
     _tiger.init(&ctx)
     _tiger.init(&ctx)
@@ -175,7 +240,7 @@ hash_stream_192 :: proc(s: io.Stream) -> ([24]byte, bool) {
 
 
 // hash_file_192 will read the file provided by the given handle
 // hash_file_192 will read the file provided by the given handle
 // and compute a hash
 // and compute a hash
-hash_file_192 :: proc(hd: os.Handle, load_at_once := false) -> ([24]byte, bool) {
+hash_file_192 :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE_192]byte, bool) {
     if !load_at_once {
     if !load_at_once {
         return hash_stream_192(os.stream_from_handle(hd))
         return hash_stream_192(os.stream_from_handle(hd))
     } else {
     } else {
@@ -183,7 +248,7 @@ hash_file_192 :: proc(hd: os.Handle, load_at_once := false) -> ([24]byte, bool)
             return hash_bytes_192(buf[:]), ok
             return hash_bytes_192(buf[:]), ok
         }
         }
     }
     }
-    return [24]byte{}, false
+    return [DIGEST_SIZE_192]byte{}, false
 }
 }
 
 
 hash_192 :: proc {
 hash_192 :: proc {
@@ -191,6 +256,8 @@ hash_192 :: proc {
     hash_file_192,
     hash_file_192,
     hash_bytes_192,
     hash_bytes_192,
     hash_string_192,
     hash_string_192,
+    hash_bytes_to_buffer_192,
+    hash_string_to_buffer_192,
 }
 }
 
 
 /*
 /*

+ 29 - 7
core/crypto/whirlpool/whirlpool.odin

@@ -19,16 +19,18 @@ import "../util"
     High level API
     High level API
 */
 */
 
 
+DIGEST_SIZE :: 64
+
 // hash_string will hash the given input and return the
 // hash_string will hash the given input and return the
 // computed hash
 // computed hash
-hash_string :: proc(data: string) -> [64]byte {
+hash_string :: proc(data: string) -> [DIGEST_SIZE]byte {
     return hash_bytes(transmute([]byte)(data))
     return hash_bytes(transmute([]byte)(data))
 }
 }
 
 
 // hash_bytes will hash the given input and return the
 // hash_bytes will hash the given input and return the
 // computed hash
 // computed hash
-hash_bytes :: proc(data: []byte) -> [64]byte {
-	hash: [64]byte
+hash_bytes :: proc(data: []byte) -> [DIGEST_SIZE]byte {
+	hash: [DIGEST_SIZE]byte
 	ctx: Whirlpool_Context
 	ctx: Whirlpool_Context
     // init(&ctx) No-op
     // init(&ctx) No-op
     update(&ctx, data)
     update(&ctx, data)
@@ -36,10 +38,28 @@ hash_bytes :: proc(data: []byte) -> [64]byte {
     return hash
     return hash
 }
 }
 
 
+// hash_string_to_buffer will hash the given input and assign the
+// computed hash to the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_string_to_buffer :: proc(data: string, hash: []byte) {
+    hash_bytes_to_buffer(transmute([]byte)(data), hash)
+}
+
+// hash_bytes_to_buffer will hash the given input and write the
+// computed hash into the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_bytes_to_buffer :: proc(data, hash: []byte) {
+    assert(len(hash) >= DIGEST_SIZE, "Size of destination buffer is smaller than the digest size")
+    ctx: Whirlpool_Context
+    // init(&ctx) No-op
+    update(&ctx, data)
+    final(&ctx, hash)
+}
+
 // hash_stream will read the stream in chunks and compute a
 // hash_stream will read the stream in chunks and compute a
 // hash from its contents
 // hash from its contents
-hash_stream :: proc(s: io.Stream) -> ([64]byte, bool) {
-	hash: [64]byte
+hash_stream :: proc(s: io.Stream) -> ([DIGEST_SIZE]byte, bool) {
+	hash: [DIGEST_SIZE]byte
 	ctx: Whirlpool_Context
 	ctx: Whirlpool_Context
 	// init(&ctx) No-op
 	// init(&ctx) No-op
 	buf := make([]byte, 512)
 	buf := make([]byte, 512)
@@ -57,7 +77,7 @@ hash_stream :: proc(s: io.Stream) -> ([64]byte, bool) {
 
 
 // hash_file will read the file provided by the given handle
 // hash_file will read the file provided by the given handle
 // and compute a hash
 // and compute a hash
-hash_file :: proc(hd: os.Handle, load_at_once := false) -> ([64]byte, bool) {
+hash_file :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE]byte, bool) {
 	if !load_at_once {
 	if !load_at_once {
         return hash_stream(os.stream_from_handle(hd))
         return hash_stream(os.stream_from_handle(hd))
     } else {
     } else {
@@ -65,7 +85,7 @@ hash_file :: proc(hd: os.Handle, load_at_once := false) -> ([64]byte, bool) {
             return hash_bytes(buf[:]), ok
             return hash_bytes(buf[:]), ok
         }
         }
     }
     }
-    return [64]byte{}, false
+    return [DIGEST_SIZE]byte{}, false
 }
 }
 
 
 hash :: proc {
 hash :: proc {
@@ -73,6 +93,8 @@ hash :: proc {
     hash_file,
     hash_file,
     hash_bytes,
     hash_bytes,
     hash_string,
     hash_string,
+    hash_bytes_to_buffer,
+    hash_string_to_buffer,
 }
 }
 
 
 /*
 /*

+ 126 - 0
core/crypto/x25519/x25519.odin

@@ -0,0 +1,126 @@
+package x25519
+
+import field "core:crypto/_fiat/field_curve25519"
+import "core:mem"
+
+SCALAR_SIZE :: 32
+POINT_SIZE :: 32
+
+_BASE_POINT: [32]byte = {9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
+
+_scalar_bit :: #force_inline proc "contextless" (s: ^[32]byte, i: int) -> u8 {
+	if i < 0 {
+		return 0
+	}
+	return (s[i>>3] >> uint(i&7)) & 1
+}
+
+_scalarmult :: proc (out, scalar, point: ^[32]byte) {
+	// Montgomery pseduo-multiplication taken from Monocypher.
+
+	// computes the scalar product
+	x1: field.Tight_Field_Element = ---
+	field.fe_from_bytes(&x1, point)
+
+	// computes the actual scalar product (the result is in x2 and z2)
+	x2, x3, z2, z3: field.Tight_Field_Element =  ---, ---, ---, ---
+	t0, t1: field.Loose_Field_Element = ---, ---
+
+	// Montgomery ladder
+	// In projective coordinates, to avoid divisions: x = X / Z
+	// We don't care about the y coordinate, it's only 1 bit of information
+	field.fe_one(&x2) // "zero" point
+	field.fe_zero(&z2)
+	field.fe_set(&x3, &x1) // "one" point
+	field.fe_one(&z3)
+
+	swap: int
+	for pos := 255-1; pos >= 0; pos = pos - 1 	{
+		// constant time conditional swap before ladder step
+		b := int(_scalar_bit(scalar, pos))
+		swap ~= b // xor trick avoids swapping at the end of the loop
+		field.fe_cond_swap(&x2, &x3, swap)
+		field.fe_cond_swap(&z2, &z3, swap)
+		swap = b // anticipates one last swap after the loop
+
+		// Montgomery ladder step: replaces (P2, P3) by (P2*2, P2+P3)
+		// with differential addition
+		//
+		// Note: This deliberately omits reductions after add/sub operations
+		// if the result is only ever used as the input to a mul/square since
+		// the implementations of those can deal with non-reduced inputs.
+		//
+		// fe_tighten_cast is only used to store a fully reduced
+		// output in a Loose_Field_Element, or to provide such a
+		// Loose_Field_Element as a Tight_Field_Element argument.
+		field.fe_sub(&t0, &x3, &z3)
+		field.fe_sub(&t1, &x2, &z2)
+		field.fe_add(field.fe_relax_cast(&x2), &x2, &z2) // x2 - unreduced
+		field.fe_add(field.fe_relax_cast(&z2), &x3, &z3) // z2 - unreduced
+		field.fe_carry_mul(&z3, &t0, field.fe_relax_cast(&x2))
+		field.fe_carry_mul(&z2, field.fe_relax_cast(&z2), &t1) // z2 - reduced
+		field.fe_carry_square(field.fe_tighten_cast(&t0), &t1) // t0 - reduced
+		field.fe_carry_square(field.fe_tighten_cast(&t1), field.fe_relax_cast(&x2)) // t1 - reduced
+		field.fe_add(field.fe_relax_cast(&x3), &z3, &z2) // x3 - unreduced
+		field.fe_sub(field.fe_relax_cast(&z2), &z3, &z2) // z2 - unreduced
+		field.fe_carry_mul(&x2, &t1, &t0) // x2 - reduced
+		field.fe_sub(&t1, field.fe_tighten_cast(&t1), field.fe_tighten_cast(&t0)) // safe - t1/t0 is reduced
+		field.fe_carry_square(&z2, field.fe_relax_cast(&z2)) // z2 - reduced
+		field.fe_carry_scmul_121666(&z3, &t1)
+		field.fe_carry_square(&x3, field.fe_relax_cast(&x3)) // x3 - reduced
+		field.fe_add(&t0, field.fe_tighten_cast(&t0), &z3) // safe - t0 is reduced
+		field.fe_carry_mul(&z3, field.fe_relax_cast(&x1), field.fe_relax_cast(&z2))
+		field.fe_carry_mul(&z2, &t1, &t0)
+	}
+	// last swap is necessary to compensate for the xor trick
+	// Note: after this swap, P3 == P2 + P1.
+	field.fe_cond_swap(&x2, &x3, swap)
+	field.fe_cond_swap(&z2, &z3, swap)
+
+	// normalises the coordinates: x == X / Z
+	field.fe_carry_inv(&z2, field.fe_relax_cast(&z2))
+	field.fe_carry_mul(&x2, field.fe_relax_cast(&x2), field.fe_relax_cast(&z2))
+	field.fe_to_bytes(out, &x2)
+
+	mem.zero_explicit(&x1, size_of(x1))
+	mem.zero_explicit(&x2, size_of(x2))
+	mem.zero_explicit(&x3, size_of(x3))
+	mem.zero_explicit(&z2, size_of(z2))
+	mem.zero_explicit(&z3, size_of(z3))
+	mem.zero_explicit(&t0, size_of(t0))
+	mem.zero_explicit(&t1, size_of(t1))
+}
+
+scalarmult :: proc (dst, scalar, point: []byte) {
+	if len(scalar) != SCALAR_SIZE {
+		panic("crypto/x25519: invalid scalar size")
+	}
+	if len(point) != POINT_SIZE {
+		panic("crypto/x25519: invalid point size")
+	}
+	if len(dst) != POINT_SIZE {
+		panic("crypto/x25519: invalid destination point size")
+	}
+
+	// "clamp" the scalar
+	e: [32]byte = ---
+	copy_slice(e[:], scalar)
+	e[0] &= 248
+	e[31] &= 127
+	e[31] |= 64
+
+	p: [32]byte = ---
+	copy_slice(p[:], point)
+
+	d: [32]byte = ---
+	_scalarmult(&d, &e, &p)
+	copy_slice(dst, d[:])
+
+	mem.zero_explicit(&e, size_of(e))
+	mem.zero_explicit(&d, size_of(d))
+}
+
+scalarmult_basepoint :: proc (dst, scalar: []byte) {
+	// TODO/perf: Switch to using a precomputed table.
+	scalarmult(dst, scalar, _BASE_POINT[:])
+}

+ 12 - 0
core/dynlib/lib.odin

@@ -1,3 +1,15 @@
 package dynlib
 package dynlib
 
 
 Library :: distinct rawptr
 Library :: distinct rawptr
+
+load_library :: proc(path: string, global_symbols := false) -> (Library, bool) {
+	return _load_library(path, global_symbols)
+}
+
+unload_library :: proc(library: Library) -> bool {
+	return _unload_library(library)
+}
+
+symbol_address :: proc(library: Library, symbol: string) -> (ptr: rawptr, found: bool) #optional_ok {
+	return _symbol_address(library, symbol)
+}

+ 15 - 14
core/dynlib/lib_unix.odin

@@ -1,23 +1,24 @@
-// +build linux, darwin, freebsd
+//+build linux, darwin, freebsd, openbsd
+//+private
 package dynlib
 package dynlib
 
 
 import "core:os"
 import "core:os"
 
 
-load_library :: proc(path: string, global_symbols := false) -> (Library, bool) {
-    flags := os.RTLD_NOW
-    if global_symbols {
-    	flags |= os.RTLD_GLOBAL
-    }
-    lib := os.dlopen(path, flags)
-    return Library(lib), lib != nil
+_load_library :: proc(path: string, global_symbols := false) -> (Library, bool) {
+	flags := os.RTLD_NOW
+	if global_symbols {
+		flags |= os.RTLD_GLOBAL
+	}
+	lib := os.dlopen(path, flags)
+	return Library(lib), lib != nil
 }
 }
 
 
-unload_library :: proc(library: Library) {
-    os.dlclose(rawptr(library))
+_unload_library :: proc(library: Library) -> bool {
+	return os.dlclose(rawptr(library))
 }
 }
 
 
-symbol_address :: proc(library: Library, symbol: string) -> (ptr: rawptr, found: bool) {
-    ptr = os.dlsym(rawptr(library), symbol)
-    found = ptr != nil
-    return
+_symbol_address :: proc(library: Library, symbol: string) -> (ptr: rawptr, found: bool) {
+	ptr = os.dlsym(rawptr(library), symbol)
+	found = ptr != nil
+	return
 }
 }

+ 5 - 4
core/dynlib/lib_windows.odin

@@ -1,10 +1,11 @@
-// +build windows
+//+build windows
+//+private
 package dynlib
 package dynlib
 
 
 import win32 "core:sys/windows"
 import win32 "core:sys/windows"
 import "core:strings"
 import "core:strings"
 
 
-load_library :: proc(path: string, global_symbols := false) -> (Library, bool) {
+_load_library :: proc(path: string, global_symbols := false) -> (Library, bool) {
 	// NOTE(bill): 'global_symbols' is here only for consistency with POSIX which has RTLD_GLOBAL
 	// NOTE(bill): 'global_symbols' is here only for consistency with POSIX which has RTLD_GLOBAL
 
 
 	wide_path := win32.utf8_to_wstring(path, context.temp_allocator)
 	wide_path := win32.utf8_to_wstring(path, context.temp_allocator)
@@ -12,12 +13,12 @@ load_library :: proc(path: string, global_symbols := false) -> (Library, bool) {
 	return handle, handle != nil
 	return handle, handle != nil
 }
 }
 
 
-unload_library :: proc(library: Library) -> bool {
+_unload_library :: proc(library: Library) -> bool {
 	ok := win32.FreeLibrary(cast(win32.HMODULE)library)
 	ok := win32.FreeLibrary(cast(win32.HMODULE)library)
 	return bool(ok)
 	return bool(ok)
 }
 }
 
 
-symbol_address :: proc(library: Library, symbol: string) -> (ptr: rawptr, found: bool) {
+_symbol_address :: proc(library: Library, symbol: string) -> (ptr: rawptr, found: bool) {
 	c_str := strings.clone_to_cstring(symbol, context.temp_allocator)
 	c_str := strings.clone_to_cstring(symbol, context.temp_allocator)
 	ptr = win32.GetProcAddress(cast(win32.HMODULE)library, c_str)
 	ptr = win32.GetProcAddress(cast(win32.HMODULE)library, c_str)
 	found = ptr != nil
 	found = ptr != nil

+ 65 - 21
core/encoding/csv/reader.odin

@@ -34,6 +34,10 @@ Reader :: struct {
 	// If lazy_quotes is true, a quote may appear in an unquoted field and a non-doubled quote may appear in a quoted field
 	// If lazy_quotes is true, a quote may appear in an unquoted field and a non-doubled quote may appear in a quoted field
 	lazy_quotes: bool,
 	lazy_quotes: bool,
 
 
+	// multiline_fields, when set to true, will treat a field starting with a " as a multiline string
+	// therefore, instead of reading until the next \n, it'll read until the next "
+	multiline_fields: bool,
+
 	// reuse_record controls whether calls to 'read' may return a slice using the backing buffer
 	// reuse_record controls whether calls to 'read' may return a slice using the backing buffer
 	// for performance
 	// for performance
 	// By default, each call to 'read' returns a newly allocated slice
 	// By default, each call to 'read' returns a newly allocated slice
@@ -194,32 +198,72 @@ is_valid_delim :: proc(r: rune) -> bool {
 @private
 @private
 _read_record :: proc(r: ^Reader, dst: ^[dynamic]string, allocator := context.allocator) -> ([]string, Error) {
 _read_record :: proc(r: ^Reader, dst: ^[dynamic]string, allocator := context.allocator) -> ([]string, Error) {
 	read_line :: proc(r: ^Reader) -> ([]byte, io.Error) {
 	read_line :: proc(r: ^Reader) -> ([]byte, io.Error) {
-		line, err := bufio.reader_read_slice(&r.r, '\n')
-		if err == .Buffer_Full {
-			clear(&r.raw_buffer)
-			append(&r.raw_buffer, ..line)
-			for err == .Buffer_Full {
-				line, err = bufio.reader_read_slice(&r.r, '\n')
+		if !r.multiline_fields {
+			line, err := bufio.reader_read_slice(&r.r, '\n')
+			if err == .Buffer_Full {
+				clear(&r.raw_buffer)
 				append(&r.raw_buffer, ..line)
 				append(&r.raw_buffer, ..line)
+				for err == .Buffer_Full {
+					line, err = bufio.reader_read_slice(&r.r, '\n')
+					append(&r.raw_buffer, ..line)
+				}
+				line = r.raw_buffer[:]
 			}
 			}
-			line = r.raw_buffer[:]
-		}
-		if len(line) > 0 && err == .EOF {
-			err = nil
-			if line[len(line)-1] == '\r' {
-				line = line[:len(line)-1]
+			if len(line) > 0 && err == .EOF {
+				err = nil
+				if line[len(line)-1] == '\r' {
+					line = line[:len(line)-1]
+				}
 			}
 			}
-		}
-		r.line_count += 1
+			r.line_count += 1
 
 
-		// normalize \r\n to \n
-		n := len(line)
-		for n >= 2 && string(line[n-2:]) == "\r\n" {
-			line[n-2] = '\n'
-			line = line[:n-1]
-		}
+			// normalize \r\n to \n
+			n := len(line)
+			for n >= 2 && string(line[n-2:]) == "\r\n" {
+				line[n-2] = '\n'
+				line = line[:n-1]
+			}
+			return line, err
+
+		} else {
+			// Reading a "line" that can possibly contain multiline fields.
+			// Unfortunately, this means we need to read a character at a time.
 
 
-		return line, err
+			err:       io.Error
+			cur:       rune
+			is_quoted: bool
+
+			field_length := 0
+
+			clear(&r.raw_buffer)
+
+			read_loop: for err == .None {
+				cur, _, err = bufio.reader_read_rune(&r.r)
+
+				if err != .None { break read_loop }
+
+				switch cur {
+				case '"':
+					is_quoted = field_length == 0
+					field_length += 1
+
+				case '\n', '\r':
+					if !is_quoted { break read_loop }
+
+				case r.comma:
+					field_length = 0
+
+				case:
+					field_length += 1
+				}
+
+				rune_buf, rune_len := utf8.encode_rune(cur)
+				append(&r.raw_buffer, ..rune_buf[:rune_len])
+			}
+
+			return r.raw_buffer[:], err
+		}
+		unreachable()
 	}
 	}
 
 
 	length_newline :: proc(b: []byte) -> int {
 	length_newline :: proc(b: []byte) -> int {

+ 23 - 0
core/encoding/endian/doc.odin

@@ -0,0 +1,23 @@
+/*
+    Package endian implements sa simple translation between bytes and numbers with
+    specific endian encodings.
+
+    buf: [100]u8
+    put_u16(buf[:], .Little, 16) or_return
+
+    You may ask yourself, why isn't `byte_order` platform Endianness by default, so we can write:
+    put_u16(buf[:], 16) or_return
+
+    The answer is that very few file formats are written in native/platform endianness. Most of them specify the endianness of
+    each of their fields, or use a header field which specifies it for the entire file.
+
+    e.g. a file which specifies it at the top for all fields could do this:
+    file_order := .Little if buf[0] == 0 else .Big
+    field := get_u16(buf[1:], file_order) or_return
+
+    If on the other hand a field is *always* Big-Endian, you're wise to explicitly state it for the benefit of the reader,
+    be that your future self or someone else.
+
+    field := get_u16(buf[:], .Big) or_return
+*/
+package encoding_endian

Niektóre pliki nie zostały wyświetlone z powodu dużej ilości zmienionych plików