Browse Source

Merge branch 'master' into xml

Jeroen van Rijn 3 years ago
parent
commit
c4e0d1efa1
100 changed files with 6242 additions and 2562 deletions
  1. 1 0
      .github/FUNDING.yml
  2. 63 13
      .github/workflows/ci.yml
  3. 7 7
      .github/workflows/nightly.yml
  4. 5 0
      .gitignore
  5. 1 1
      LICENSE
  6. 6 45
      Makefile
  7. 10 8
      README.md
  8. 150 0
      build_odin.sh
  9. 3 6
      core/bufio/scanner.odin
  10. 1 1
      core/builtin/builtin.odin
  11. 13 38
      core/bytes/bytes.odin
  12. 6 4
      core/c/c.odin
  13. 1 1
      core/c/frontend/preprocessor/preprocess.odin
  14. 2 2
      core/c/libc/complex.odin
  15. 2 2
      core/c/libc/ctype.odin
  16. 18 5
      core/c/libc/errno.odin
  17. 2 2
      core/c/libc/math.odin
  18. 3 3
      core/c/libc/setjmp.odin
  19. 5 5
      core/c/libc/signal.odin
  20. 111 146
      core/c/libc/stdatomic.odin
  21. 56 6
      core/c/libc/stdio.odin
  22. 5 5
      core/c/libc/stdlib.odin
  23. 2 2
      core/c/libc/string.odin
  24. 3 3
      core/c/libc/threads.odin
  25. 10 5
      core/c/libc/time.odin
  26. 2 0
      core/c/libc/types.odin
  27. 2 2
      core/c/libc/uchar.odin
  28. 2 2
      core/c/libc/wchar.odin
  29. 14 7
      core/c/libc/wctype.odin
  30. 16 6
      core/compress/common.odin
  31. 4 3
      core/compress/gzip/gzip.odin
  32. 148 0
      core/compress/shoco/model.odin
  33. 318 0
      core/compress/shoco/shoco.odin
  34. 43 40
      core/compress/zlib/zlib.odin
  35. 0 216
      core/container/array.odin
  36. 239 0
      core/container/bit_array/bit_array.odin
  37. 53 0
      core/container/bit_array/doc.odin
  38. 0 80
      core/container/bloom_filter.odin
  39. 201 0
      core/container/lru/lru_cache.odin
  40. 0 377
      core/container/map.odin
  41. 0 121
      core/container/priority_queue.odin
  42. 143 0
      core/container/priority_queue/priority_queue.odin
  43. 0 175
      core/container/queue.odin
  44. 209 0
      core/container/queue/queue.odin
  45. 0 74
      core/container/ring.odin
  46. 0 240
      core/container/set.odin
  47. 0 95
      core/container/small_array.odin
  48. 117 0
      core/container/small_array/small_array.odin
  49. 98 0
      core/container/topological_sort/topological_sort.odin
  50. 7 1
      core/crypto/README.md
  51. 1 1
      core/crypto/_fiat/field_poly1305/field.odin
  52. 2 2
      core/crypto/_sha3/_sha3.odin
  53. 117 28
      core/crypto/blake/blake.odin
  54. 33 7
      core/crypto/blake2b/blake2b.odin
  55. 33 7
      core/crypto/blake2s/blake2s.odin
  56. 1 1
      core/crypto/chacha20/chacha20.odin
  57. 29 7
      core/crypto/gost/gost.odin
  58. 117 28
      core/crypto/groestl/groestl.odin
  59. 400 95
      core/crypto/haval/haval.odin
  60. 117 28
      core/crypto/jh/jh.odin
  61. 130 37
      core/crypto/keccak/keccak.odin
  62. 43 21
      core/crypto/md2/md2.odin
  63. 31 9
      core/crypto/md4/md4.odin
  64. 31 9
      core/crypto/md5/md5.odin
  65. 1 1
      core/crypto/rand_generic.odin
  66. 12 0
      core/crypto/rand_openbsd.odin
  67. 23 0
      core/crypto/rand_windows.odin
  68. 113 28
      core/crypto/ripemd/ripemd.odin
  69. 30 7
      core/crypto/sha1/sha1.odin
  70. 117 28
      core/crypto/sha2/sha2.odin
  71. 125 36
      core/crypto/sha3/sha3.odin
  72. 66 19
      core/crypto/shake/shake.odin
  73. 335 0
      core/crypto/siphash/siphash.odin
  74. 33 10
      core/crypto/sm3/sm3.odin
  75. 58 14
      core/crypto/streebog/streebog.odin
  76. 88 21
      core/crypto/tiger/tiger.odin
  77. 88 21
      core/crypto/tiger2/tiger2.odin
  78. 29 7
      core/crypto/whirlpool/whirlpool.odin
  79. 1 1
      core/dynlib/lib_unix.odin
  80. 6 6
      core/encoding/hxa/doc.odin
  81. 25 4
      core/encoding/hxa/read.odin
  82. 3 3
      core/encoding/hxa/write.odin
  83. 6 5
      core/encoding/json/marshal.odin
  84. 6 0
      core/encoding/json/parser.odin
  85. 25 25
      core/encoding/json/unmarshal.odin
  86. 28 0
      core/encoding/varint/doc.odin
  87. 165 0
      core/encoding/varint/leb128.odin
  88. 1 1
      core/fmt/doc.odin
  89. 191 160
      core/fmt/fmt.odin
  90. 6 1
      core/fmt/fmt_js.odin
  91. 9 1
      core/fmt/fmt_os.odin
  92. 18 1
      core/hash/hash.odin
  93. 1 1
      core/hash/xxhash/streaming.odin
  94. 829 20
      core/image/common.odin
  95. 2 2
      core/image/png/example.odin
  96. 10 11
      core/image/png/helpers.odin
  97. 45 31
      core/image/png/png.odin
  98. 408 0
      core/image/qoi/qoi.odin
  99. 103 0
      core/image/tga/tga.odin
  100. 49 68
      core/intrinsics/intrinsics.odin

+ 1 - 0
.github/FUNDING.yml

@@ -1,3 +1,4 @@
 # These are supported funding model platforms
 # These are supported funding model platforms
 
 
+github: odin-lang
 patreon: gingerbill
 patreon: gingerbill

+ 63 - 13
.github/workflows/ci.yml

@@ -7,9 +7,9 @@ jobs:
     steps:
     steps:
       - uses: actions/checkout@v1
       - uses: actions/checkout@v1
       - name: Download LLVM, botan
       - name: Download LLVM, botan
-        run: sudo apt-get install llvm-11 clang-11 llvm libbotan-2-dev botan
+        run: sudo apt-get install llvm-11 clang-11 libbotan-2-dev botan
       - name: build odin
       - name: build odin
-        run: make release
+        run: ./build_odin.sh release
       - name: Odin version
       - name: Odin version
         run: ./odin version
         run: ./odin version
         timeout-minutes: 1
         timeout-minutes: 1
@@ -17,13 +17,16 @@ jobs:
         run: ./odin report
         run: ./odin report
         timeout-minutes: 1
         timeout-minutes: 1
       - name: Odin check
       - name: Odin check
-        run: ./odin check examples/demo/demo.odin -vet
+        run: ./odin check examples/demo -vet
         timeout-minutes: 10
         timeout-minutes: 10
       - name: Odin run
       - name: Odin run
-        run: ./odin run examples/demo/demo.odin
+        run: ./odin run examples/demo
         timeout-minutes: 10
         timeout-minutes: 10
       - name: Odin run -debug
       - name: Odin run -debug
-        run: ./odin run examples/demo/demo.odin -debug
+        run: ./odin run examples/demo -debug
+        timeout-minutes: 10
+      - name: Odin check examples/all
+        run: ./odin check examples/all -strict-style
         timeout-minutes: 10
         timeout-minutes: 10
       - name: Core library tests
       - name: Core library tests
         run: |
         run: |
@@ -35,6 +38,20 @@ jobs:
           cd tests/vendor
           cd tests/vendor
           make
           make
         timeout-minutes: 10
         timeout-minutes: 10
+      - name: Odin issues tests
+        run: |
+          cd tests/issues
+          ./run.sh
+        timeout-minutes: 10
+      - name: Odin check examples/all for Linux i386
+        run: ./odin check examples/all -vet -strict-style -target:linux_i386
+        timeout-minutes: 10
+      - name: Odin check examples/all for FreeBSD amd64
+        run: ./odin check examples/all -vet -strict-style -target:freebsd_amd64
+        timeout-minutes: 10
+      - name: Odin check examples/all for OpenBSD amd64
+        run: ./odin check examples/all -vet -strict-style -target:openbsd_amd64
+        timeout-minutes: 10
   build_macOS:
   build_macOS:
     runs-on: macos-latest
     runs-on: macos-latest
     steps:
     steps:
@@ -46,7 +63,7 @@ jobs:
           TMP_PATH=$(xcrun --show-sdk-path)/user/include
           TMP_PATH=$(xcrun --show-sdk-path)/user/include
           echo "CPATH=$TMP_PATH" >> $GITHUB_ENV
           echo "CPATH=$TMP_PATH" >> $GITHUB_ENV
       - name: build odin
       - name: build odin
-        run: make release
+        run: ./build_odin.sh release
       - name: Odin version
       - name: Odin version
         run: ./odin version
         run: ./odin version
         timeout-minutes: 1
         timeout-minutes: 1
@@ -54,13 +71,16 @@ jobs:
         run: ./odin report
         run: ./odin report
         timeout-minutes: 1
         timeout-minutes: 1
       - name: Odin check
       - name: Odin check
-        run: ./odin check examples/demo/demo.odin -vet
+        run: ./odin check examples/demo -vet
         timeout-minutes: 10
         timeout-minutes: 10
       - name: Odin run
       - name: Odin run
-        run: ./odin run examples/demo/demo.odin
+        run: ./odin run examples/demo
         timeout-minutes: 10
         timeout-minutes: 10
       - name: Odin run -debug
       - name: Odin run -debug
-        run: ./odin run examples/demo/demo.odin -debug
+        run: ./odin run examples/demo -debug
+        timeout-minutes: 10
+      - name: Odin check examples/all
+        run: ./odin check examples/all -strict-style
         timeout-minutes: 10
         timeout-minutes: 10
       - name: Core library tests
       - name: Core library tests
         run: |
         run: |
@@ -72,8 +92,19 @@ jobs:
           cd tests/vendor
           cd tests/vendor
           make
           make
         timeout-minutes: 10
         timeout-minutes: 10
+      - name: Odin issues tests
+        run: |
+          cd tests/issues
+          ./run.sh
+        timeout-minutes: 10
+      - name: Odin check examples/all for Darwin arm64
+        run: ./odin check examples/all -vet -strict-style -target:darwin_arm64
+        timeout-minutes: 10
+      - name: Odin check examples/all for Linux arm64
+        run: ./odin check examples/all -vet -strict-style -target:linux_arm64
+        timeout-minutes: 10
   build_windows:
   build_windows:
-    runs-on: windows-latest
+    runs-on: windows-2019
     steps:
     steps:
       - uses: actions/checkout@v1
       - uses: actions/checkout@v1
       - name: build Odin
       - name: build Odin
@@ -91,19 +122,25 @@ jobs:
         shell: cmd
         shell: cmd
         run: |
         run: |
           call "C:\Program Files (x86)\Microsoft Visual Studio\2019\Enterprise\VC\Auxiliary\Build\vcvars64.bat
           call "C:\Program Files (x86)\Microsoft Visual Studio\2019\Enterprise\VC\Auxiliary\Build\vcvars64.bat
-          odin check examples/demo/demo.odin -vet
+          odin check examples/demo -vet
         timeout-minutes: 10
         timeout-minutes: 10
       - name: Odin run
       - name: Odin run
         shell: cmd
         shell: cmd
         run: |
         run: |
           call "C:\Program Files (x86)\Microsoft Visual Studio\2019\Enterprise\VC\Auxiliary\Build\vcvars64.bat
           call "C:\Program Files (x86)\Microsoft Visual Studio\2019\Enterprise\VC\Auxiliary\Build\vcvars64.bat
-          odin run examples/demo/demo.odin
+          odin run examples/demo
         timeout-minutes: 10
         timeout-minutes: 10
       - name: Odin run -debug
       - name: Odin run -debug
         shell: cmd
         shell: cmd
         run: |
         run: |
           call "C:\Program Files (x86)\Microsoft Visual Studio\2019\Enterprise\VC\Auxiliary\Build\vcvars64.bat
           call "C:\Program Files (x86)\Microsoft Visual Studio\2019\Enterprise\VC\Auxiliary\Build\vcvars64.bat
-          odin run examples/demo/demo.odin -debug
+          odin run examples/demo -debug
+        timeout-minutes: 10
+      - name: Odin check examples/all
+        shell: cmd
+        run: |
+          call "C:\Program Files (x86)\Microsoft Visual Studio\2019\Enterprise\VC\Auxiliary\Build\vcvars64.bat
+          odin check examples/all -strict-style
         timeout-minutes: 10
         timeout-minutes: 10
       - name: Core library tests
       - name: Core library tests
         shell: cmd
         shell: cmd
@@ -126,3 +163,16 @@ jobs:
           cd tests\core\math\big
           cd tests\core\math\big
           call build.bat
           call build.bat
         timeout-minutes: 10
         timeout-minutes: 10
+      - name: Odin issues tests
+        shell: cmd
+        run: |
+          call "C:\Program Files (x86)\Microsoft Visual Studio\2019\Enterprise\VC\Auxiliary\Build\vcvars64.bat
+          cd tests\issues
+          call run.bat
+        timeout-minutes: 10
+      - name: Odin check examples/all for Windows 32bits
+        shell: cmd
+        run: |
+          call "C:\Program Files (x86)\Microsoft Visual Studio\2019\Enterprise\VC\Auxiliary\Build\vcvars64.bat
+          odin check examples/all -strict-style -target:windows_i386
+        timeout-minutes: 10

+ 7 - 7
.github/workflows/nightly.yml

@@ -7,7 +7,7 @@ on:
 
 
 jobs:
 jobs:
   build_windows:
   build_windows:
-    runs-on: windows-latest
+    runs-on: windows-2019
     steps:
     steps:
       - uses: actions/checkout@v1
       - uses: actions/checkout@v1
       - name: build Odin
       - name: build Odin
@@ -19,7 +19,7 @@ jobs:
         shell: cmd
         shell: cmd
         run: |
         run: |
           call "C:\Program Files (x86)\Microsoft Visual Studio\2019\Enterprise\VC\Auxiliary\Build\vcvars64.bat
           call "C:\Program Files (x86)\Microsoft Visual Studio\2019\Enterprise\VC\Auxiliary\Build\vcvars64.bat
-          odin run examples/demo/demo.odin
+          odin run examples/demo
       - name: Copy artifacts
       - name: Copy artifacts
         run: |
         run: |
           rm bin/llvm/windows/LLVM-C.lib
           rm bin/llvm/windows/LLVM-C.lib
@@ -41,11 +41,11 @@ jobs:
     steps:
     steps:
       - uses: actions/checkout@v1
       - uses: actions/checkout@v1
       - name: (Linux) Download LLVM
       - name: (Linux) Download LLVM
-        run: sudo apt-get install llvm-11 clang-11 llvm
+        run: sudo apt-get install llvm-11 clang-11
       - name: build odin
       - name: build odin
         run: make nightly
         run: make nightly
       - name: Odin run
       - name: Odin run
-        run: ./odin run examples/demo/demo.odin
+        run: ./odin run examples/demo
       - name: Copy artifacts
       - name: Copy artifacts
         run: |
         run: |
           mkdir dist
           mkdir dist
@@ -72,7 +72,7 @@ jobs:
       - name: build odin
       - name: build odin
         run: make nightly
         run: make nightly
       - name: Odin run
       - name: Odin run
-        run: ./odin run examples/demo/demo.odin
+        run: ./odin run examples/demo
       - name: Copy artifacts
       - name: Copy artifacts
         run: |
         run: |
           mkdir dist
           mkdir dist
@@ -129,7 +129,7 @@ jobs:
         run: |
         run: |
           echo Authorizing B2 account
           echo Authorizing B2 account
           b2 authorize-account "$APPID" "$APPKEY"
           b2 authorize-account "$APPID" "$APPKEY"
-          
+
           echo Uploading artifcates to B2
           echo Uploading artifcates to B2
           chmod +x ./ci/upload_create_nightly.sh
           chmod +x ./ci/upload_create_nightly.sh
           ./ci/upload_create_nightly.sh "$BUCKET" windows-amd64 windows_artifacts/
           ./ci/upload_create_nightly.sh "$BUCKET" windows-amd64 windows_artifacts/
@@ -141,7 +141,7 @@ jobs:
 
 
           echo Creating nightly.json
           echo Creating nightly.json
           python3 ci/create_nightly_json.py "$BUCKET" > nightly.json
           python3 ci/create_nightly_json.py "$BUCKET" > nightly.json
-          
+
           echo Uploading nightly.json
           echo Uploading nightly.json
           b2 upload-file "$BUCKET" nightly.json nightly.json
           b2 upload-file "$BUCKET" nightly.json nightly.json
 
 

+ 5 - 0
.gitignore

@@ -7,6 +7,9 @@
 # User-specific files (MonoDevelop/Xamarin Studio)
 # User-specific files (MonoDevelop/Xamarin Studio)
 *.userprefs
 *.userprefs
 
 
+# For macOS
+.DS_Store
+
 # Build results
 # Build results
 [Dd]ebug/
 [Dd]ebug/
 [Dd]ebugPublic/
 [Dd]ebugPublic/
@@ -276,3 +279,5 @@ shared/
 *.ll
 *.ll
 
 
 *.sublime-workspace
 *.sublime-workspace
+examples/bug/
+build.sh

+ 1 - 1
LICENSE

@@ -1,4 +1,4 @@
-Copyright (c) 2016-2021 Ginger Bill. All rights reserved.
+Copyright (c) 2016-2022 Ginger Bill. All rights reserved.
 
 
 Redistribution and use in source and binary forms, with or without
 Redistribution and use in source and binary forms, with or without
 modification, are permitted provided that the following conditions are met:
 modification, are permitted provided that the following conditions are met:

+ 6 - 45
Makefile

@@ -1,58 +1,19 @@
-GIT_SHA=$(shell git rev-parse --short HEAD)
-DISABLED_WARNINGS=-Wno-switch -Wno-macro-redefined -Wno-unused-value
-LDFLAGS=-pthread -ldl -lm -lstdc++
-CFLAGS=-std=c++14 -DGIT_SHA=\"$(GIT_SHA)\"
-CFLAGS:=$(CFLAGS) -DODIN_VERSION_RAW=\"dev-$(shell date +"%Y-%m")\"
-CC=clang
-
-OS=$(shell uname)
-
-ifeq ($(OS), Darwin)
-    LLVM_CONFIG=llvm-config
-    ifneq ($(shell llvm-config --version | grep '^11\.'),)
-        LLVM_CONFIG=llvm-config
-    else
-        $(error "Requirement: llvm-config must be version 11")
-    endif
-
-    LDFLAGS:=$(LDFLAGS) -liconv
-    CFLAGS:=$(CFLAGS) $(shell $(LLVM_CONFIG) --cxxflags --ldflags)
-    LDFLAGS:=$(LDFLAGS) -lLLVM-C
-endif
-ifeq ($(OS), Linux)
-    LLVM_CONFIG=llvm-config-11
-    ifneq ($(shell which llvm-config-11 2>/dev/null),)
-        LLVM_CONFIG=llvm-config-11
-    else ifneq ($(shell which llvm-config-11-64 2>/dev/null),)
-        LLVM_CONFIG=llvm-config-11-64
-    else
-        ifneq ($(shell llvm-config --version | grep '^11\.'),)
-            LLVM_CONFIG=llvm-config
-        else
-            $(error "Requirement: llvm-config must be version 11")
-        endif
-    endif
-
-    CFLAGS:=$(CFLAGS) $(shell $(LLVM_CONFIG) --cxxflags --ldflags)
-    LDFLAGS:=$(LDFLAGS) $(shell $(LLVM_CONFIG) --libs core native --system-libs)
-endif
-
-all: debug demo
+all: debug
 
 
 demo:
 demo:
-	./odin run examples/demo/demo.odin
+	./odin run examples/demo/demo.odin -file
 
 
 report:
 report:
 	./odin report
 	./odin report
 
 
 debug:
 debug:
-	$(CC) src/main.cpp src/libtommath.cpp $(DISABLED_WARNINGS) $(CFLAGS) -g $(LDFLAGS) -o odin
+	./build_odin.sh debug
 
 
 release:
 release:
-	$(CC) src/main.cpp src/libtommath.cpp $(DISABLED_WARNINGS) $(CFLAGS) -O3 $(LDFLAGS) -o odin
+	./build_odin.sh release
 
 
 release_native:
 release_native:
-	$(CC) src/main.cpp src/libtommath.cpp $(DISABLED_WARNINGS) $(CFLAGS) -O3 -march=native $(LDFLAGS) -o odin
+	./build_odin.sh release-native
 
 
 nightly:
 nightly:
-	$(CC) src/main.cpp src/libtommath.cpp $(DISABLED_WARNINGS) $(CFLAGS) -DNIGHTLY -O3 $(LDFLAGS) -o odin
+	./build_odin.sh nightly

+ 10 - 8
README.md

@@ -11,7 +11,7 @@
         <img src="https://img.shields.io/badge/platforms-Windows%20|%20Linux%20|%20macOS-green.svg">
         <img src="https://img.shields.io/badge/platforms-Windows%20|%20Linux%20|%20macOS-green.svg">
     </a>
     </a>
     <br>
     <br>
-    <a href="https://discord.gg/hnwN2Rj">
+    <a href="https://discord.gg/odinlang">
         <img src="https://img.shields.io/discord/568138951836172421?logo=discord">
         <img src="https://img.shields.io/discord/568138951836172421?logo=discord">
     </a>
     </a>
     <a href="https://github.com/odin-lang/odin/actions">
     <a href="https://github.com/odin-lang/odin/actions">
@@ -58,6 +58,10 @@ main :: proc() {
 
 
 Instructions for downloading and installing the Odin compiler and libraries.
 Instructions for downloading and installing the Odin compiler and libraries.
 
 
+#### [Nightly Builds](https://odin-lang.org/docs/nightly/)
+
+Get the latest nightly builds of Odin.
+
 ### Learning Odin
 ### Learning Odin
 
 
 #### [Overview of Odin](https://odin-lang.org/docs/overview)
 #### [Overview of Odin](https://odin-lang.org/docs/overview)
@@ -68,6 +72,10 @@ An overview of the Odin programming language.
 
 
 Answers to common questions about Odin.
 Answers to common questions about Odin.
 
 
+#### [Packages](https://pkg.odin-lang.org/)
+
+Documentation for all the official packages part of the [core](https://pkg.odin-lang.org/core/) and [vendor](https://pkg.odin-lang.org/vendor/) library collections.
+
 #### [The Odin Wiki](https://github.com/odin-lang/Odin/wiki)
 #### [The Odin Wiki](https://github.com/odin-lang/Odin/wiki)
 
 
 A wiki maintained by the Odin community.
 A wiki maintained by the Odin community.
@@ -76,15 +84,9 @@ A wiki maintained by the Odin community.
 
 
 Get live support and talk with other odiners on the Odin Discord.
 Get live support and talk with other odiners on the Odin Discord.
 
 
-### References
-
-#### [Language Specification](https://odin-lang.org/docs/spec/)
-
-The official Odin Language specification.
-
 ### Articles
 ### Articles
 
 
-#### [The Odin Blog](https://odin-lang.org/blog)
+#### [The Odin Blog](https://odin-lang.org/news/)
 
 
 The official blog of the Odin programming language, featuring announcements, news, and in-depth articles by the Odin team and guests.
 The official blog of the Odin programming language, featuring announcements, news, and in-depth articles by the Odin team and guests.
 
 

+ 150 - 0
build_odin.sh

@@ -0,0 +1,150 @@
+#!/bin/bash
+set -eu
+
+GIT_SHA=$(git rev-parse --short HEAD)
+DISABLED_WARNINGS="-Wno-switch -Wno-macro-redefined -Wno-unused-value"
+LDFLAGS="-pthread -lm -lstdc++"
+CFLAGS="-std=c++14 -DGIT_SHA=\"$GIT_SHA\""
+CFLAGS="$CFLAGS -DODIN_VERSION_RAW=\"dev-$(date +"%Y-%m")\""
+CC=clang
+OS=$(uname)
+
+panic() {
+	printf "%s\n" "$1"
+	exit 1
+}
+
+version() { echo "$@" | awk -F. '{ printf("%d%03d%03d%03d\n", $1,$2,$3,$4); }'; }
+
+config_darwin() {
+	ARCH=$(uname -m)
+	LLVM_CONFIG=llvm-config
+
+	# allow for arm only llvm's with version 13
+	if [ ARCH == arm64 ]; then
+		MIN_LLVM_VERSION=("13.0.0")
+	else
+		# allow for x86 / amd64 all llvm versions begining from 11
+		MIN_LLVM_VERSION=("11.1.0")
+	fi
+
+	if [ $(version $($LLVM_CONFIG --version)) -lt $(version $MIN_LLVM_VERSION) ]; then
+		if [ ARCH == arm64 ]; then
+			panic "Requirement: llvm-config must be base version 13 for arm64"
+		else
+			panic "Requirement: llvm-config must be base version greater than 11 for amd64/x86"
+		fi
+	fi
+
+	LDFLAGS="$LDFLAGS -liconv -ldl"
+	CFLAGS="$CFLAGS $($LLVM_CONFIG --cxxflags --ldflags)"
+	LDFLAGS="$LDFLAGS -lLLVM-C"
+}
+
+config_freebsd() {
+	LLVM_CONFIG=/usr/local/bin/llvm-config11
+
+	CFLAGS="$CFLAGS $($LLVM_CONFIG --cxxflags --ldflags)"
+	LDFLAGS="$LDFLAGS $($LLVM_CONFIG --libs core native --system-libs)"
+}
+
+config_openbsd() {
+	LLVM_CONFIG=/usr/local/bin/llvm-config
+
+	LDFLAGS="$LDFLAGS -liconv"
+	CFLAGS="$CFLAGS $($LLVM_CONFIG --cxxflags --ldflags)"
+	LDFLAGS="$LDFLAGS $($LLVM_CONFIG --libs core native --system-libs)"
+}
+
+config_linux() {
+	if which llvm-config > /dev/null 2>&1; then
+		LLVM_CONFIG=llvm-config
+	elif which llvm-config-11 > /dev/null 2>&1; then
+		LLVM_CONFIG=llvm-config-11
+	elif which llvm-config-11-64 > /dev/null 2>&1; then
+		LLVM_CONFIG=llvm-config-11-64
+	else
+		panic "Unable to find LLVM-config"
+	fi
+
+	MIN_LLVM_VERSION=("11.0.0")
+	if [ $(version $($LLVM_CONFIG --version)) -lt $(version $MIN_LLVM_VERSION) ]; then
+		echo "Tried to use " $(which $LLVM_CONFIG) "version" $($LLVM_CONFIG --version)
+		panic "Requirement: llvm-config must be base version greater than 11"
+	fi
+
+	LDFLAGS="$LDFLAGS -ldl"
+	CFLAGS="$CFLAGS $($LLVM_CONFIG --cxxflags --ldflags)"
+	LDFLAGS="$LDFLAGS $($LLVM_CONFIG --libs core native --system-libs)"
+}
+
+build_odin() {
+	case $1 in
+	debug)
+		EXTRAFLAGS="-g"
+		;;
+	release)
+		EXTRAFLAGS="-O3"
+		;;
+	release-native)
+		EXTRAFLAGS="-O3 -march=native"
+		;;
+	nightly)
+		EXTRAFLAGS="-DNIGHTLY -O3"
+		;;
+	*)
+		panic "Build mode unsupported!"
+	esac
+
+	set -x
+	$CC src/main.cpp src/libtommath.cpp $DISABLED_WARNINGS $CFLAGS $EXTRAFLAGS $LDFLAGS -o odin
+	set +x
+}
+
+run_demo() {
+	./odin run examples/demo/demo.odin -file
+}
+
+case $OS in
+Linux)
+	config_linux
+	;;
+Darwin)
+	config_darwin
+	;;
+OpenBSD)
+	config_openbsd
+	;;
+FreeBSD)
+	config_freebsd
+	;;
+*)
+	panic "Platform unsupported!"
+esac
+
+if [[ $# -eq 0 ]]; then
+	build_odin debug
+	run_demo
+	exit 0
+fi
+
+if [[ $# -eq 1 ]]; then
+	case $1 in
+	report)
+		if [[ ! -f "./odin" ]]; then
+			build_odin debug
+		fi
+
+		./odin report
+		exit 0
+		;;
+	*)
+		build_odin $1
+		;;
+	esac
+
+	run_demo
+	exit 0
+else
+	panic "Too many arguments!"
+fi

+ 3 - 6
core/bufio/scanner.odin

@@ -8,6 +8,7 @@ import "core:intrinsics"
 
 
 // Extra errors returns by scanning procedures
 // Extra errors returns by scanning procedures
 Scanner_Extra_Error :: enum i32 {
 Scanner_Extra_Error :: enum i32 {
+	None,
 	Negative_Advance,
 	Negative_Advance,
 	Advanced_Too_Far,
 	Advanced_Too_Far,
 	Bad_Read_Count,
 	Bad_Read_Count,
@@ -15,7 +16,7 @@ Scanner_Extra_Error :: enum i32 {
 	Too_Short,
 	Too_Short,
 }
 }
 
 
-Scanner_Error :: union {
+Scanner_Error :: union #shared_nil {
 	io.Error,
 	io.Error,
 	Scanner_Extra_Error,
 	Scanner_Extra_Error,
 }
 }
@@ -68,7 +69,7 @@ scanner_destroy :: proc(s: ^Scanner) {
 // Returns the first non-EOF error that was encounted by the scanner
 // Returns the first non-EOF error that was encounted by the scanner
 scanner_error :: proc(s: ^Scanner) -> Scanner_Error {
 scanner_error :: proc(s: ^Scanner) -> Scanner_Error {
 	switch s._err {
 	switch s._err {
-	case .EOF, .None:
+	case .EOF, nil:
 		return nil
 		return nil
 	}
 	}
 	return s._err
 	return s._err
@@ -93,10 +94,6 @@ scanner_text :: proc(s: ^Scanner) -> string {
 // scanner_scan advances the scanner
 // scanner_scan advances the scanner
 scanner_scan :: proc(s: ^Scanner) -> bool {
 scanner_scan :: proc(s: ^Scanner) -> bool {
 	set_err :: proc(s: ^Scanner, err: Scanner_Error) {
 	set_err :: proc(s: ^Scanner, err: Scanner_Error) {
-		err := err
-		if err == .None {
-			err = nil
-		}
 		switch s._err {
 		switch s._err {
 		case nil, .EOF:
 		case nil, .EOF:
 			s._err = err
 			s._err = err

+ 1 - 1
core/builtin/builtin.odin

@@ -2,7 +2,7 @@
 package builtin
 package builtin
 
 
 nil   :: nil;
 nil   :: nil;
-false :: 0!==0;
+false :: 0!=0;
 true  :: 0==0;
 true  :: 0==0;
 
 
 ODIN_OS      :: ODIN_OS;
 ODIN_OS      :: ODIN_OS;

+ 13 - 38
core/bytes/bytes.odin

@@ -5,9 +5,8 @@ import "core:unicode"
 import "core:unicode/utf8"
 import "core:unicode/utf8"
 
 
 clone :: proc(s: []byte, allocator := context.allocator, loc := #caller_location) -> []byte {
 clone :: proc(s: []byte, allocator := context.allocator, loc := #caller_location) -> []byte {
-	c := make([]byte, len(s)+1, allocator, loc)
+	c := make([]byte, len(s), allocator, loc)
 	copy(c, s)
 	copy(c, s)
-	c[len(s)] = 0
 	return c[:len(s)]
 	return c[:len(s)]
 }
 }
 
 
@@ -219,61 +218,37 @@ split_after_n :: proc(s, sep: []byte, n: int, allocator := context.allocator) ->
 
 
 
 
 @private
 @private
-_split_iterator :: proc(s: ^[]byte, sep: []byte, sep_save, n: int) -> (res: []byte, ok: bool) {
-	s, n := s, n
-
-	if n == 0 {
-		return
-	}
-
-	if sep == nil {
+_split_iterator :: proc(s: ^[]byte, sep: []byte, sep_save: int) -> (res: []byte, ok: bool) {
+	if len(sep) == 0 {
 		res = s[:]
 		res = s[:]
 		ok = true
 		ok = true
 		s^ = s[len(s):]
 		s^ = s[len(s):]
 		return
 		return
 	}
 	}
 
 
-	if n < 0 {
-		n = count(s^, sep) + 1
-	}
-
-	n -= 1
-
-	i := 0
-	for ; i < n; i += 1 {
-		m := index(s^, sep)
-		if m < 0 {
-			break
-		}
+	m := index(s^, sep)
+	if m < 0 {
+		// not found
+		res = s[:]
+		ok = len(res) != 0
+		s^ = s[len(s):]
+	} else {
 		res = s[:m+sep_save]
 		res = s[:m+sep_save]
 		ok = true
 		ok = true
 		s^ = s[m+len(sep):]
 		s^ = s[m+len(sep):]
-		return
 	}
 	}
-	res = s[:]
-	ok = res != nil
-	s^ = s[len(s):]
 	return
 	return
 }
 }
 
 
 
 
 split_iterator :: proc(s: ^[]byte, sep: []byte) -> ([]byte, bool) {
 split_iterator :: proc(s: ^[]byte, sep: []byte) -> ([]byte, bool) {
-	return _split_iterator(s, sep, 0, -1)
-}
-
-split_n_iterator :: proc(s: ^[]byte, sep: []byte, n: int) -> ([]byte, bool) {
-	return _split_iterator(s, sep, 0, n)
+	return _split_iterator(s, sep, 0)
 }
 }
 
 
 split_after_iterator :: proc(s: ^[]byte, sep: []byte) -> ([]byte, bool) {
 split_after_iterator :: proc(s: ^[]byte, sep: []byte) -> ([]byte, bool) {
-	return _split_iterator(s, sep, len(sep), -1)
+	return _split_iterator(s, sep, len(sep))
 }
 }
 
 
-split_after_n_iterator :: proc(s: ^[]byte, sep: []byte, n: int) -> ([]byte, bool) {
-	return _split_iterator(s, sep, len(sep), n)
-}
-
-
 
 
 index_byte :: proc(s: []byte, c: byte) -> int {
 index_byte :: proc(s: []byte, c: byte) -> int {
 	for i := 0; i < len(s); i += 1 {
 	for i := 0; i < len(s); i += 1 {
@@ -1143,7 +1118,7 @@ fields_proc :: proc(s: []byte, f: proc(rune) -> bool, allocator := context.alloc
 	}
 	}
 
 
 	if start >= 0 {
 	if start >= 0 {
-		append(&subslices, s[start : end])
+		append(&subslices, s[start : len(s)])
 	}
 	}
 
 
 	return subslices[:]
 	return subslices[:]

+ 6 - 4
core/c/c.odin

@@ -3,22 +3,24 @@ package c
 import builtin "core:builtin"
 import builtin "core:builtin"
 
 
 char           :: builtin.u8  // assuming -funsigned-char
 char           :: builtin.u8  // assuming -funsigned-char
+
+schar          :: builtin.i8
 short          :: builtin.i16
 short          :: builtin.i16
 int            :: builtin.i32
 int            :: builtin.i32
-long           :: builtin.i32 when (ODIN_OS == "windows" || size_of(builtin.rawptr) == 4) else builtin.i64
+long           :: builtin.i32 when (ODIN_OS == .Windows || size_of(builtin.rawptr) == 4) else builtin.i64
 longlong       :: builtin.i64
 longlong       :: builtin.i64
 
 
 uchar          :: builtin.u8
 uchar          :: builtin.u8
 ushort         :: builtin.u16
 ushort         :: builtin.u16
 uint           :: builtin.u32
 uint           :: builtin.u32
-ulong          :: builtin.u32 when (ODIN_OS == "windows" || size_of(builtin.rawptr) == 4) else builtin.u64
+ulong          :: builtin.u32 when (ODIN_OS == .Windows || size_of(builtin.rawptr) == 4) else builtin.u64
 ulonglong      :: builtin.u64
 ulonglong      :: builtin.u64
 
 
 bool           :: builtin.bool
 bool           :: builtin.bool
 
 
 size_t         :: builtin.uint
 size_t         :: builtin.uint
 ssize_t        :: builtin.int
 ssize_t        :: builtin.int
-wchar_t        :: builtin.u16 when (ODIN_OS == "windows") else builtin.u32
+wchar_t        :: builtin.u16 when (ODIN_OS == .Windows) else builtin.u32
 
 
 float          :: builtin.f32
 float          :: builtin.f32
 double         :: builtin.f64
 double         :: builtin.f64
@@ -46,7 +48,7 @@ int_least64_t  :: builtin.i64
 uint_least64_t :: builtin.u64
 uint_least64_t :: builtin.u64
 
 
 // Same on Windows, Linux, and FreeBSD
 // Same on Windows, Linux, and FreeBSD
-when ODIN_ARCH == "386" || ODIN_ARCH == "amd64" {
+when ODIN_ARCH == .i386 || ODIN_ARCH == .amd64 {
 	int_fast8_t    :: builtin.i8
 	int_fast8_t    :: builtin.i8
 	uint_fast8_t   :: builtin.u8
 	uint_fast8_t   :: builtin.u8
 	int_fast16_t   :: builtin.i32
 	int_fast16_t   :: builtin.i32

+ 1 - 1
core/c/frontend/preprocessor/preprocess.odin

@@ -956,7 +956,7 @@ substitute_token :: proc(cpp: ^Preprocessor, tok: ^Token, args: ^Macro_Arg) -> ^
 			continue
 			continue
 		}
 		}
 
 
-		if tok.lit == "__VA__OPT__" && tok.next.lit == "(" {
+		if tok.lit == "__VA_OPT__" && tok.next.lit == "(" {
 			opt_arg := read_macro_arg_one(cpp, &tok, tok.next.next, true)
 			opt_arg := read_macro_arg_one(cpp, &tok, tok.next.next, true)
 			if has_varargs(args) {
 			if has_varargs(args) {
 				for t := opt_arg.tok; t.kind != .EOF; t = t.next {
 				for t := opt_arg.tok; t.kind != .EOF; t = t.next {

+ 2 - 2
core/c/libc/complex.odin

@@ -2,9 +2,9 @@ package libc
 
 
 // 7.3 Complex arithmetic
 // 7.3 Complex arithmetic
 
 
-when ODIN_OS == "windows" {
+when ODIN_OS == .Windows {
 	foreign import libc "system:libucrt.lib"
 	foreign import libc "system:libucrt.lib"
-} else when ODIN_OS == "darwin" {
+} else when ODIN_OS == .Darwin {
 	foreign import libc "system:System.framework"
 	foreign import libc "system:System.framework"
 } else {
 } else {
 	foreign import libc "system:c"
 	foreign import libc "system:c"

+ 2 - 2
core/c/libc/ctype.odin

@@ -1,8 +1,8 @@
 package libc
 package libc
 
 
-when ODIN_OS == "windows" {
+when ODIN_OS == .Windows {
 	foreign import libc "system:libucrt.lib"
 	foreign import libc "system:libucrt.lib"
-} else when ODIN_OS == "darwin" {
+} else when ODIN_OS == .Darwin {
 	foreign import libc "system:System.framework"
 	foreign import libc "system:System.framework"
 } else {
 } else {
 	foreign import libc "system:c"
 	foreign import libc "system:c"

+ 18 - 5
core/c/libc/errno.odin

@@ -2,9 +2,9 @@ package libc
 
 
 // 7.5 Errors
 // 7.5 Errors
 
 
-when ODIN_OS == "windows" {
+when ODIN_OS == .Windows {
 	foreign import libc "system:libucrt.lib"
 	foreign import libc "system:libucrt.lib"
-} else when ODIN_OS == "darwin" {
+} else when ODIN_OS == .Darwin {
 	foreign import libc "system:System.framework"
 	foreign import libc "system:System.framework"
 } else {
 } else {
 	foreign import libc "system:c"
 	foreign import libc "system:c"
@@ -14,7 +14,7 @@ when ODIN_OS == "windows" {
 //	EDOM,
 //	EDOM,
 //	EILSEQ
 //	EILSEQ
 //	ERANGE
 //	ERANGE
-when ODIN_OS == "linux" || ODIN_OS == "freebsd" {
+when ODIN_OS == .Linux || ODIN_OS == .FreeBSD {
 	@(private="file")
 	@(private="file")
 	@(default_calling_convention="c")
 	@(default_calling_convention="c")
 	foreign libc {
 	foreign libc {
@@ -27,7 +27,20 @@ when ODIN_OS == "linux" || ODIN_OS == "freebsd" {
 	ERANGE :: 34
 	ERANGE :: 34
 }
 }
 
 
-when ODIN_OS == "windows" {
+when ODIN_OS == .OpenBSD {
+	@(private="file")
+	@(default_calling_convention="c")
+	foreign libc {
+		@(link_name="__errno")
+		_get_errno :: proc() -> ^int ---
+	}
+
+	EDOM   :: 33
+	EILSEQ :: 84
+	ERANGE :: 34
+}
+
+when ODIN_OS == .Windows {
 	@(private="file")
 	@(private="file")
 	@(default_calling_convention="c")
 	@(default_calling_convention="c")
 	foreign libc {
 	foreign libc {
@@ -40,7 +53,7 @@ when ODIN_OS == "windows" {
 	ERANGE :: 34
 	ERANGE :: 34
 }
 }
 
 
-when ODIN_OS == "darwin" {
+when ODIN_OS == .Darwin {
 	@(private="file")
 	@(private="file")
 	@(default_calling_convention="c")
 	@(default_calling_convention="c")
 	foreign libc {
 	foreign libc {

+ 2 - 2
core/c/libc/math.odin

@@ -4,9 +4,9 @@ package libc
 
 
 import "core:intrinsics"
 import "core:intrinsics"
 
 
-when ODIN_OS == "windows" {
+when ODIN_OS == .Windows {
 	foreign import libc "system:libucrt.lib"
 	foreign import libc "system:libucrt.lib"
-} else when ODIN_OS == "darwin" {
+} else when ODIN_OS == .Darwin {
 	foreign import libc "system:System.framework"
 	foreign import libc "system:System.framework"
 } else {
 } else {
 	foreign import libc "system:c"
 	foreign import libc "system:c"

+ 3 - 3
core/c/libc/setjmp.odin

@@ -2,14 +2,14 @@ package libc
 
 
 // 7.13 Nonlocal jumps
 // 7.13 Nonlocal jumps
 
 
-when ODIN_OS == "windows" {
+when ODIN_OS == .Windows {
 	foreign import libc "system:libucrt.lib"
 	foreign import libc "system:libucrt.lib"
-} else when ODIN_OS == "darwin" {
+} else when ODIN_OS == .Darwin {
 	foreign import libc "system:System.framework"
 	foreign import libc "system:System.framework"
 } else {
 } else {
 	foreign import libc "system:c"
 	foreign import libc "system:c"
 }
 }
-when ODIN_OS == "windows" {
+when ODIN_OS == .Windows {
 	@(default_calling_convention="c")
 	@(default_calling_convention="c")
 	foreign libc {
 	foreign libc {
 		// 7.13.1 Save calling environment
 		// 7.13.1 Save calling environment

+ 5 - 5
core/c/libc/signal.odin

@@ -2,9 +2,9 @@ package libc
 
 
 // 7.14 Signal handling
 // 7.14 Signal handling
 
 
-when ODIN_OS == "windows" {
+when ODIN_OS == .Windows {
 	foreign import libc "system:libucrt.lib"
 	foreign import libc "system:libucrt.lib"
-} else when ODIN_OS == "darwin" {
+} else when ODIN_OS == .Darwin {
 	foreign import libc "system:System.framework"
 	foreign import libc "system:System.framework"
 } else {
 } else {
 	foreign import libc "system:c"
 	foreign import libc "system:c"
@@ -21,7 +21,7 @@ foreign libc {
 	raise  :: proc(sig: int) -> int ---
 	raise  :: proc(sig: int) -> int ---
 }
 }
 
 
-when ODIN_OS == "windows" {
+when ODIN_OS == .Windows {
 	SIG_ERR :: rawptr(~uintptr(0)) 
 	SIG_ERR :: rawptr(~uintptr(0)) 
 	SIG_DFL :: rawptr(uintptr(0))
 	SIG_DFL :: rawptr(uintptr(0))
 	SIG_IGN :: rawptr(uintptr(1))
 	SIG_IGN :: rawptr(uintptr(1))
@@ -34,7 +34,7 @@ when ODIN_OS == "windows" {
 	SIGTERM :: 15
 	SIGTERM :: 15
 }
 }
 
 
-when ODIN_OS == "linux" || ODIN_OS == "freebsd" {
+when ODIN_OS == .Linux || ODIN_OS == .FreeBSD {
 	SIG_ERR  :: rawptr(~uintptr(0))
 	SIG_ERR  :: rawptr(~uintptr(0))
 	SIG_DFL  :: rawptr(uintptr(0))
 	SIG_DFL  :: rawptr(uintptr(0))
 	SIG_IGN  :: rawptr(uintptr(1)) 
 	SIG_IGN  :: rawptr(uintptr(1)) 
@@ -47,7 +47,7 @@ when ODIN_OS == "linux" || ODIN_OS == "freebsd" {
 	SIGTERM  :: 15
 	SIGTERM  :: 15
 }
 }
 
 
-when ODIN_OS == "darwin" {
+when ODIN_OS == .Darwin {
 	SIG_ERR  :: rawptr(~uintptr(0))
 	SIG_ERR  :: rawptr(~uintptr(0))
 	SIG_DFL  :: rawptr(uintptr(0))
 	SIG_DFL  :: rawptr(uintptr(0))
 	SIG_IGN  :: rawptr(uintptr(1)) 
 	SIG_IGN  :: rawptr(uintptr(1)) 

+ 111 - 146
core/c/libc/stdatomic.odin

@@ -47,29 +47,30 @@ kill_dependency :: #force_inline proc(value: $T) -> T {
 
 
 // 7.17.4 Fences
 // 7.17.4 Fences
 atomic_thread_fence :: #force_inline proc(order: memory_order) {
 atomic_thread_fence :: #force_inline proc(order: memory_order) {
-	switch (order) {
-	case .relaxed:
-		return
-	case .consume:
-		intrinsics.atomic_fence_acq()
-	case .acquire:
-		intrinsics.atomic_fence_acq()
-	case .release:
-		intrinsics.atomic_fence_rel()
-	case .acq_rel:
-		intrinsics.atomic_fence_acqrel()
-	case .seq_cst:
-		intrinsics.atomic_fence_acqrel()
+	assert(order != .relaxed)
+	assert(order != .consume)
+	#partial switch order {
+	case .acquire: intrinsics.atomic_thread_fence(.Acquire)
+	case .release: intrinsics.atomic_thread_fence(.Release)
+	case .acq_rel: intrinsics.atomic_thread_fence(.Acq_Rel)
+	case .seq_cst: intrinsics.atomic_thread_fence(.Seq_Cst)
 	}
 	}
 }
 }
 
 
 atomic_signal_fence :: #force_inline proc(order: memory_order) {
 atomic_signal_fence :: #force_inline proc(order: memory_order) {
-	atomic_thread_fence(order)
+	assert(order != .relaxed)
+	assert(order != .consume)
+	#partial switch order {
+	case .acquire: intrinsics.atomic_signal_fence(.Acquire)
+	case .release: intrinsics.atomic_signal_fence(.Release)
+	case .acq_rel: intrinsics.atomic_signal_fence(.Acq_Rel)
+	case .seq_cst: intrinsics.atomic_signal_fence(.Seq_Cst)
+	}
 }
 }
 
 
 // 7.17.5 Lock-free property
 // 7.17.5 Lock-free property
 atomic_is_lock_free :: #force_inline proc(obj: ^$T) -> bool {
 atomic_is_lock_free :: #force_inline proc(obj: ^$T) -> bool {
-	return size_of(T) <= 8 && (intrinsics.type_is_integer(T) || intrinsics.type_is_pointer(T))
+	return intrinsics.atomic_type_is_lock_free(T)
 }
 }
 
 
 // 7.17.6 Atomic integer types
 // 7.17.6 Atomic integer types
@@ -121,13 +122,10 @@ atomic_store_explicit :: #force_inline proc(object: ^$T, desired: T, order: memo
 	assert(order != .acquire)
 	assert(order != .acquire)
 	assert(order != .acq_rel)
 	assert(order != .acq_rel)
 
 
-	#partial switch (order) {
-	case .relaxed:
-		intrinsics.atomic_store_relaxed(object, desired)
-	case .release:
-		intrinsics.atomic_store_rel(object, desired)
-	case .seq_cst:
-		intrinsics.atomic_store(object, desired)
+	#partial switch order {
+	case .relaxed: intrinsics.atomic_store_explicit(object, desired, .Relaxed)
+	case .release: intrinsics.atomic_store_explicit(object, desired, .Release)
+	case .seq_cst: intrinsics.atomic_store_explicit(object, desired, .Seq_Cst)
 	}
 	}
 }
 }
 
 
@@ -139,36 +137,26 @@ atomic_load_explicit :: #force_inline proc(object: ^$T, order: memory_order) {
 	assert(order != .release)
 	assert(order != .release)
 	assert(order != .acq_rel)
 	assert(order != .acq_rel)
 
 
-	#partial switch (order) {
-	case .relaxed:
-		return intrinsics.atomic_load_relaxed(object)
-	case .consume:
-		return intrinsics.atomic_load_acq(object)
-	case .acquire:
-		return intrinsics.atomic_load_acq(object)
-	case .seq_cst:
-		return intrinsics.atomic_load(object)
+	#partial switch order {
+	case .relaxed: return intrinsics.atomic_load_explicit(object, .Relaxed)
+	case .consume: return intrinsics.atomic_load_explicit(object, .Consume)
+	case .acquire: return intrinsics.atomic_load_explicit(object, .Acquire)
+	case .seq_cst: return intrinsics.atomic_load_explicit(object, .Seq_Cst)
 	}
 	}
 }
 }
 
 
 atomic_exchange :: #force_inline proc(object: ^$T, desired: T) -> T {
 atomic_exchange :: #force_inline proc(object: ^$T, desired: T) -> T {
-	return intrinsics.atomic_xchg(object, desired)
+	return intrinsics.atomic_exchange(object, desired)
 }
 }
 
 
 atomic_exchange_explicit :: #force_inline proc(object: ^$T, desired: T, order: memory_order) -> T {
 atomic_exchange_explicit :: #force_inline proc(object: ^$T, desired: T, order: memory_order) -> T {
-	switch (order) {
-	case .relaxed:
-		return intrinsics.atomic_xchg_relaxed(object, desired)
-	case .consume:
-		return intrinsics.atomic_xchg_acq(object, desired)
-	case .acquire:
-		return intrinsics.atomic_xchg_acq(object, desired)
-	case .release:
-		return intrinsics.atomic_xchg_rel(object, desired)
-	case .acq_rel:
-		return intrinsics.atomic_xchg_acqrel(object, desired)
-	case .seq_cst:
-		return intrinsics.atomic_xchg(object, desired)
+	switch order {
+	case .relaxed: return intrinsics.atomic_exchange_explicit(object, desired, .Relaxed)
+	case .consume: return intrinsics.atomic_exchange_explicit(object, desired, .Consume)
+	case .acquire: return intrinsics.atomic_exchange_explicit(object, desired, .Acquire)
+	case .release: return intrinsics.atomic_exchange_explicit(object, desired, .Release)
+	case .acq_rel: return intrinsics.atomic_exchange_explicit(object, desired, .Acq_Rel)
+	case .seq_cst: return intrinsics.atomic_exchange_explicit(object, desired, .Seq_Cst)
 	}
 	}
 	return false
 	return false
 }
 }
@@ -189,102 +177,104 @@ atomic_exchange_explicit :: #force_inline proc(object: ^$T, desired: T, order: m
 // 	[success = seq_cst, failure = acquire] => failacq
 // 	[success = seq_cst, failure = acquire] => failacq
 // 	[success = acquire, failure = relaxed] => acq_failrelaxed
 // 	[success = acquire, failure = relaxed] => acq_failrelaxed
 // 	[success = acq_rel, failure = relaxed] => acqrel_failrelaxed
 // 	[success = acq_rel, failure = relaxed] => acqrel_failrelaxed
-atomic_compare_exchange_strong :: #force_inline proc(object, expected: ^$T, desired: T) {
-	value, ok := intrinsics.atomic_cxchg(object, expected^, desired)
+atomic_compare_exchange_strong :: #force_inline proc(object, expected: ^$T, desired: T) -> bool {
+	value, ok := intrinsics.atomic_compare_exchange_strong(object, expected^, desired)
 	if !ok { expected^ = value } 
 	if !ok { expected^ = value } 
 	return ok
 	return ok
 }
 }
 
 
-atomic_compare_exchange_strong_explicit :: #force_inline proc(object, expected: ^$T, desired: T, success, failure: memory_order) {
+atomic_compare_exchange_strong_explicit :: #force_inline proc(object, expected: ^$T, desired: T, success, failure: memory_order) -> bool {
 	assert(failure != .release)
 	assert(failure != .release)
 	assert(failure != .acq_rel)
 	assert(failure != .acq_rel)
 
 
 	value: T; ok: bool
 	value: T; ok: bool
-	#partial switch (failure) {
+	#partial switch failure {
 	case .seq_cst:
 	case .seq_cst:
 		assert(success != .relaxed)
 		assert(success != .relaxed)
-		#partial switch (success) {
+		#partial switch success {
 		case .seq_cst:
 		case .seq_cst:
-			value, ok := intrinsics.atomic_cxchg(object, expected^, desired)
+			value, ok = intrinsics.atomic_compare_exchange_strong_explicit(object, expected^, desired, .Seq_Cst, .Seq_Cst)
 		case .acquire:
 		case .acquire:
-			value, ok := intrinsics.atomic_cxchg_acq(object, expected^, desired)
+			value, ok = intrinsics.atomic_compare_exchange_strong_explicit(object, expected^, desired, .Acquire, .Seq_Cst)
 		case .consume:
 		case .consume:
-			value, ok := intrinsics.atomic_cxchg_acq(object, expected^, desired)
+			value, ok = intrinsics.atomic_compare_exchange_strong_explicit(object, expected^, desired, .Consume, .Seq_Cst)
 		case .release:
 		case .release:
-			value, ok := intrinsics.atomic_cxchg_rel(object, expected^, desired)
+			value, ok = intrinsics.atomic_compare_exchange_strong_explicit(object, expected^, desired, .Release, .Seq_Cst)
 		case .acq_rel:
 		case .acq_rel:
-			value, ok := intrinsics.atomic_cxchg_acqrel(object, expected^, desired)
+			value, ok = intrinsics.atomic_compare_exchange_strong_explicit(object, expected^, desired, .Acq_Rel, .Seq_Cst)
 		}
 		}
 	case .relaxed:
 	case .relaxed:
 		assert(success != .release)
 		assert(success != .release)
-		#partial switch (success) {
+		#partial switch success {
 		case .relaxed:
 		case .relaxed:
-			value, ok := intrinsics.atomic_cxchg_relaxed(object, expected^, desired)
+			value, ok = intrinsics.atomic_compare_exchange_strong_explicit(object, expected^, desired, .Relaxed, .Relaxed)
 		case .seq_cst:
 		case .seq_cst:
-			value, ok := intrinsics.atomic_cxchg_failrelaxed(object, expected^, desired)
+			value, ok = intrinsics.atomic_compare_exchange_strong_explicit(object, expected^, desired, .Seq_Cst, .Relaxed)
 		case .acquire:
 		case .acquire:
-			value, ok := intrinsics.atomic_cxchg_acq_failrelaxed(object, expected^, desired)
+			value, ok = intrinsics.atomic_compare_exchange_strong_explicit(object, expected^, desired, .Acquire, .Relaxed)
 		case .consume:
 		case .consume:
-			value, ok := intrinsics.atomic_cxchg_acq_failrelaxed(object, expected^, desired)
+			value, ok = intrinsics.atomic_compare_exchange_strong_explicit(object, expected^, desired, .Consume, .Relaxed)
 		case .acq_rel:
 		case .acq_rel:
-			value, ok := intrinsics.atomic_cxchg_acqrel_failrelaxed(object, expected^, desired)
+			value, ok = intrinsics.atomic_compare_exchange_strong_explicit(object, expected^, desired, .Acq_Rel, .Relaxed)
 		}
 		}
 	case .consume:
 	case .consume:
-		fallthrough
+		assert(success == .seq_cst)
+		value, ok = intrinsics.atomic_compare_exchange_strong_explicit(object, expected^, desired, .Seq_Cst, .Consume)
 	case .acquire:
 	case .acquire:
 		assert(success == .seq_cst)
 		assert(success == .seq_cst)
-		value, ok := intrinsics.atomic_cxchg_failacq(object, expected^, desired)
+		value, ok = intrinsics.atomic_compare_exchange_strong_explicit(object, expected^, desired, .Seq_Cst, .Acquire)
 
 
 	}
 	}
 	if !ok { expected^ = value }
 	if !ok { expected^ = value }
 	return ok
 	return ok
 }
 }
 
 
-atomic_compare_exchange_weak :: #force_inline proc(object, expected: ^$T, desired: T) {
-	value, ok := intrinsics.atomic_cxchgweak(object, expected^, desired)
+atomic_compare_exchange_weak :: #force_inline proc(object, expected: ^$T, desired: T) -> bool {
+	value, ok := intrinsics.atomic_compare_exchange_weak(object, expected^, desired)
 	if !ok { expected^ = value }
 	if !ok { expected^ = value }
 	return ok
 	return ok
 }
 }
 
 
-atomic_compare_exchange_weak_explicit :: #force_inline proc(object, expected: ^$T, desited: T, success, failure: memory_order) {
+atomic_compare_exchange_weak_explicit :: #force_inline proc(object, expected: ^$T, desited: T, success, failure: memory_order) -> bool {
 	assert(failure != .release)
 	assert(failure != .release)
 	assert(failure != .acq_rel)
 	assert(failure != .acq_rel)
 
 
 	value: T; ok: bool
 	value: T; ok: bool
-	#partial switch (failure) {
+	#partial switch failure {
 	case .seq_cst:
 	case .seq_cst:
 		assert(success != .relaxed)
 		assert(success != .relaxed)
-		#partial switch (success) {
+		#partial switch success {
 		case .seq_cst:
 		case .seq_cst:
-			value, ok := intrinsics.atomic_cxchgweak(object, expected^, desired)
+			value, ok = intrinsics.atomic_compare_exchange_weak_explicit(object, expected^, desired, .Seq_Cst, .Seq_Cst)
 		case .acquire:
 		case .acquire:
-			value, ok := intrinsics.atomic_cxchgweak_acq(object, expected^, desired)
+			value, ok = intrinsics.atomic_compare_exchange_weak_explicit(object, expected^, desired, .Acquire, .Seq_Cst)
 		case .consume:
 		case .consume:
-			value, ok := intrinsics.atomic_cxchgweak_acq(object, expected^, desired)
+			value, ok = intrinsics.atomic_compare_exchange_weak_explicit(object, expected^, desired, .Consume, .Seq_Cst)
 		case .release:
 		case .release:
-			value, ok := intrinsics.atomic_cxchgweak_rel(object, expected^, desired)
+			value, ok = intrinsics.atomic_compare_exchange_weak_explicit(object, expected^, desired, .Release, .Seq_Cst)
 		case .acq_rel:
 		case .acq_rel:
-			value, ok := intrinsics.atomic_cxchgweak_acqrel(object, expected^, desired)
+			value, ok = intrinsics.atomic_compare_exchange_weak_explicit(object, expected^, desired, .Acq_Rel, .Seq_Cst)
 		}
 		}
 	case .relaxed:
 	case .relaxed:
 		assert(success != .release)
 		assert(success != .release)
-		#partial switch (success) {
+		#partial switch success {
 		case .relaxed:
 		case .relaxed:
-			value, ok := intrinsics.atomic_cxchgweak_relaxed(object, expected^, desired)
+			value, ok = intrinsics.atomic_compare_exchange_weak_explicit(object, expected^, desired, .Relaxed, .Relaxed)
 		case .seq_cst:
 		case .seq_cst:
-			value, ok := intrinsics.atomic_cxchgweak_failrelaxed(object, expected^, desired)
+			value, ok = intrinsics.atomic_compare_exchange_weak_explicit(object, expected^, desired, .Seq_Cst, .Relaxed)
 		case .acquire:
 		case .acquire:
-			value, ok := intrinsics.atomic_cxchgweak_acq_failrelaxed(object, expected^, desired)
+			value, ok = intrinsics.atomic_compare_exchange_weak_explicit(object, expected^, desired, .Acquire, .Relaxed)
 		case .consume:
 		case .consume:
-			value, ok := intrinsics.atomic_cxchgweak_acq_failrelaxed(object, expected^, desired)
+			value, ok = intrinsics.atomic_compare_exchange_weak_explicit(object, expected^, desired, .Consume, .Relaxed)
 		case .acq_rel:
 		case .acq_rel:
-			value, ok := intrinsics.atomic_cxchgweak_acqrel_failrelaxed(object, expected^, desired)
+			value, ok = intrinsics.atomic_compare_exchange_weak_explicit(object, expected^, desired, .Acq_Rel, .Relaxed)
 		}
 		}
 	case .consume:
 	case .consume:
-		fallthrough
+		assert(success == .seq_cst)
+		value, ok = intrinsics.atomic_compare_exchange_weak_explicit(object, expected^, desired, .Seq_Cst, .Consume)
 	case .acquire:
 	case .acquire:
 		assert(success == .seq_cst)
 		assert(success == .seq_cst)
-		value, ok := intrinsics.atomic_cxchgweak_failacq(object, expected^, desired)
+		value, ok = intrinsics.atomic_compare_exchange_weak_explicit(object, expected^, desired, .Seq_Cst, .Acquire)
 
 
 	}
 	}
 	if !ok { expected^ = value }
 	if !ok { expected^ = value }
@@ -297,19 +287,14 @@ atomic_fetch_add :: #force_inline proc(object: ^$T, operand: T) -> T {
 }
 }
 
 
 atomic_fetch_add_explicit :: #force_inline proc(object: ^$T, operand: T, order: memory_order) -> T {
 atomic_fetch_add_explicit :: #force_inline proc(object: ^$T, operand: T, order: memory_order) -> T {
-	switch (order) {
-	case .relaxed:
-		return intrinsics.atomic_add_relaxed(object, operand)
-	case .consume:
-		return intrinsics.atomic_add_acq(object, operand)
-	case .acquire:
-		return intrinsics.atomic_add_acq(object, operand)
-	case .release:
-		return intrinsics.atomic_add_rel(object, operand)
-	case .acq_rel:
-		return intrinsics.atomic_add_acqrel(object, operand)
-	case .seq_cst:
-		return intrinsics.atomic_add(object, operand)
+	switch order {
+	case .relaxed: return intrinsics.atomic_add_explicit(object, operand, .Relaxed)
+	case .consume: return intrinsics.atomic_add_explicit(object, operand, .Consume)
+	case .acquire: return intrinsics.atomic_add_explicit(object, operand, .Acquire)
+	case .release: return intrinsics.atomic_add_explicit(object, operand, .Release)
+	case .acq_rel: return intrinsics.atomic_add_explicit(object, operand, .Acq_Rel)
+	case: fallthrough
+	case .seq_cst: return intrinsics.atomic_add_explicit(object, operand, .Seq_Cst)
 	}
 	}
 }
 }
 
 
@@ -318,19 +303,14 @@ atomic_fetch_sub :: #force_inline proc(object: ^$T, operand: T) -> T {
 }
 }
 
 
 atomic_fetch_sub_explicit :: #force_inline proc(object: ^$T, operand: T, order: memory_order) -> T {
 atomic_fetch_sub_explicit :: #force_inline proc(object: ^$T, operand: T, order: memory_order) -> T {
-	switch (order) {
-	case .relaxed:
-		return intrinsics.atomic_sub_relaxed(object, operand)
-	case .consume:
-		return intrinsics.atomic_sub_acq(object, operand)
-	case .acquire:
-		return intrinsics.atomic_sub_acq(object, operand)
-	case .release:
-		return intrinsics.atomic_sub_rel(object, operand)
-	case .acq_rel:
-		return intrinsics.atomic_sub_acqrel(object, operand)
-	case .seq_cst:
-		return intrinsics.atomic_sub(object, operand)
+	switch order {
+	case .relaxed: return intrinsics.atomic_sub_explicit(object, operand, .Relaxed)
+	case .consume: return intrinsics.atomic_sub_explicit(object, operand, .Consume)
+	case .acquire: return intrinsics.atomic_sub_explicit(object, operand, .Acquire)
+	case .release: return intrinsics.atomic_sub_explicit(object, operand, .Release)
+	case .acq_rel: return intrinsics.atomic_sub_explicit(object, operand, .Acq_Rel)
+	case: fallthrough
+	case .seq_cst: return intrinsics.atomic_sub_explicit(object, operand, .Seq_Cst)
 	}
 	}
 }
 }
 
 
@@ -339,19 +319,14 @@ atomic_fetch_or :: #force_inline proc(object: ^$T, operand: T) -> T {
 }
 }
 
 
 atomic_fetch_or_explicit :: #force_inline proc(object: ^$T, operand: T, order: memory_order) -> T {
 atomic_fetch_or_explicit :: #force_inline proc(object: ^$T, operand: T, order: memory_order) -> T {
-	switch (order) {
-	case .relaxed:
-		return intrinsics.atomic_or_relaxed(object, operand)
-	case .consume:
-		return intrinsics.atomic_or_acq(object, operand)
-	case .acquire:
-		return intrinsics.atomic_or_acq(object, operand)
-	case .release:
-		return intrinsics.atomic_or_rel(object, operand)
-	case .acq_rel:
-		return intrinsics.atomic_or_acqrel(object, operand)
-	case .seq_cst:
-		return intrinsics.atomic_or(object, operand)
+	switch order {
+	case .relaxed: return intrinsics.atomic_or_explicit(object, operand, .Relaxed)
+	case .consume: return intrinsics.atomic_or_explicit(object, operand, .Consume)
+	case .acquire: return intrinsics.atomic_or_explicit(object, operand, .Acquire)
+	case .release: return intrinsics.atomic_or_explicit(object, operand, .Release)
+	case .acq_rel: return intrinsics.atomic_or_explicit(object, operand, .Acq_Rel)
+	case: fallthrough
+	case .seq_cst: return intrinsics.atomic_or_explicit(object, operand, .Seq_Cst)
 	}
 	}
 }
 }
 
 
@@ -360,19 +335,14 @@ atomic_fetch_xor :: #force_inline proc(object: ^$T, operand: T) -> T {
 }
 }
 
 
 atomic_fetch_xor_explicit :: #force_inline proc(object: ^$T, operand: T, order: memory_order) -> T {
 atomic_fetch_xor_explicit :: #force_inline proc(object: ^$T, operand: T, order: memory_order) -> T {
-	switch (order) {
-	case .relaxed:
-		return intrinsics.atomic_xor_relaxed(object, operand)
-	case .consume:
-		return intrinsics.atomic_xor_acq(object, operand)
-	case .acquire:
-		return intrinsics.atomic_xor_acq(object, operand)
-	case .release:
-		return intrinsics.atomic_xor_rel(object, operand)
-	case .acq_rel:
-		return intrinsics.atomic_xor_acqrel(object, operand)
-	case .seq_cst:
-		return intrinsics.atomic_xor(object, operand)
+	switch order {
+	case .relaxed: return intrinsics.atomic_xor_explicit(object, operand, .Relaxed)
+	case .consume: return intrinsics.atomic_xor_explicit(object, operand, .Consume)
+	case .acquire: return intrinsics.atomic_xor_explicit(object, operand, .Acquire)
+	case .release: return intrinsics.atomic_xor_explicit(object, operand, .Release)
+	case .acq_rel: return intrinsics.atomic_xor_explicit(object, operand, .Acq_Rel)
+	case: fallthrough
+	case .seq_cst: return intrinsics.atomic_xor_explicit(object, operand, .Seq_Cst)
 	}
 	}
 }
 }
 
 
@@ -380,19 +350,14 @@ atomic_fetch_and :: #force_inline proc(object: ^$T, operand: T) -> T {
 	return intrinsics.atomic_and(object, operand)
 	return intrinsics.atomic_and(object, operand)
 }
 }
 atomic_fetch_and_explicit :: #force_inline proc(object: ^$T, operand: T, order: memory_order) -> T {
 atomic_fetch_and_explicit :: #force_inline proc(object: ^$T, operand: T, order: memory_order) -> T {
-	switch (order) {
-	case .relaxed:
-		return intrinsics.atomic_and_relaxed(object, operand)
-	case .consume:
-		return intrinsics.atomic_and_acq(object, operand)
-	case .acquire:
-		return intrinsics.atomic_and_acq(object, operand)
-	case .release:
-		return intrinsics.atomic_and_rel(object, operand)
-	case .acq_rel:
-		return intrinsics.atomic_and_acqrel(object, operand)
-	case .seq_cst:
-		return intrinsics.atomic_and(object, operand)
+	switch order {
+	case .relaxed: return intrinsics.atomic_and_explicit(object, operand, .Relaxed)
+	case .consume: return intrinsics.atomic_and_explicit(object, operand, .Consume)
+	case .acquire: return intrinsics.atomic_and_explicit(object, operand, .Acquire)
+	case .release: return intrinsics.atomic_and_explicit(object, operand, .Release)
+	case .acq_rel: return intrinsics.atomic_and_explicit(object, operand, .Acq_Rel)
+	case: fallthrough
+	case .seq_cst: return intrinsics.atomic_and_explicit(object, operand, .Seq_Cst)
 	}
 	}
 }
 }
 
 

+ 56 - 6
core/c/libc/stdio.odin

@@ -1,8 +1,8 @@
 package libc
 package libc
 
 
-when ODIN_OS == "windows" {
+when ODIN_OS == .Windows {
 	foreign import libc "system:libucrt.lib"
 	foreign import libc "system:libucrt.lib"
-} else when ODIN_OS == "darwin" {
+} else when ODIN_OS == .Darwin {
 	foreign import libc "system:System.framework"
 	foreign import libc "system:System.framework"
 } else {
 } else {
 	foreign import libc "system:c"
 	foreign import libc "system:c"
@@ -13,7 +13,7 @@ when ODIN_OS == "windows" {
 FILE :: struct {}
 FILE :: struct {}
 
 
 // MSVCRT compatible.
 // MSVCRT compatible.
-when ODIN_OS == "windows" {
+when ODIN_OS == .Windows {
 	_IOFBF       :: 0x0000
 	_IOFBF       :: 0x0000
 	_IONBF       :: 0x0004
 	_IONBF       :: 0x0004
 	_IOLBF       :: 0x0040
 	_IOLBF       :: 0x0040
@@ -48,7 +48,7 @@ when ODIN_OS == "windows" {
 }
 }
 
 
 // GLIBC and MUSL compatible.
 // GLIBC and MUSL compatible.
-when ODIN_OS == "linux" {
+when ODIN_OS == .Linux {
 	fpos_t        :: struct #raw_union { _: [16]char, _: longlong, _: double, }
 	fpos_t        :: struct #raw_union { _: [16]char, _: longlong, _: double, }
 
 
 	_IOFBF        :: 0
 	_IOFBF        :: 0
@@ -78,7 +78,57 @@ when ODIN_OS == "linux" {
 	}
 	}
 }
 }
 
 
-when ODIN_OS == "darwin" {
+when ODIN_OS == .OpenBSD {
+	fpos_t :: distinct i64
+
+	_IOFBF :: 0
+	_IOLBF :: 1
+	_IONBF :: 1
+
+	BUFSIZ :: 1024
+
+	EOF :: int(-1)
+
+	FOPEN_MAX	:: 20
+	FILENAME_MAX	:: 1024
+
+	SEEK_SET :: 0
+	SEEK_CUR :: 1
+	SEEK_END :: 2
+
+	foreign libc {
+		stderr: ^FILE
+		stdin:  ^FILE
+		stdout: ^FILE
+	}
+}
+
+when ODIN_OS == .FreeBSD {
+	fpos_t :: distinct i64
+
+	_IOFBF :: 0
+	_IOLBF :: 1
+	_IONBF :: 1
+
+	BUFSIZ :: 1024
+
+	EOF :: int(-1)
+
+	FOPEN_MAX	:: 20
+	FILENAME_MAX	:: 1024
+
+	SEEK_SET :: 0
+	SEEK_CUR :: 1
+	SEEK_END :: 2
+
+	foreign libc {
+		stderr: ^FILE
+		stdin:  ^FILE
+		stdout: ^FILE
+	}
+}
+
+when ODIN_OS == .Darwin {
 	fpos_t :: distinct i64
 	fpos_t :: distinct i64
 	
 	
 	_IOFBF        :: 0
 	_IOFBF        :: 0
@@ -149,7 +199,7 @@ foreign libc {
 	putchar   :: proc() -> int ---
 	putchar   :: proc() -> int ---
 	puts      :: proc(s: cstring) -> int ---
 	puts      :: proc(s: cstring) -> int ---
 	ungetc    :: proc(c: int, stream: ^FILE) -> int ---
 	ungetc    :: proc(c: int, stream: ^FILE) -> int ---
-	fread     :: proc(ptr: rawptr, size: size_t, stream: ^FILE) -> size_t ---
+	fread     :: proc(ptr: rawptr, size: size_t, nmemb: size_t, stream: ^FILE) -> size_t ---
 	fwrite    :: proc(ptr: rawptr, size: size_t, nmemb: size_t, stream: ^FILE) -> size_t ---
 	fwrite    :: proc(ptr: rawptr, size: size_t, nmemb: size_t, stream: ^FILE) -> size_t ---
 
 
 	// 7.21.9 File positioning functions
 	// 7.21.9 File positioning functions

+ 5 - 5
core/c/libc/stdlib.odin

@@ -2,15 +2,15 @@ package libc
 
 
 // 7.22 General utilities
 // 7.22 General utilities
 
 
-when ODIN_OS == "windows" {
+when ODIN_OS == .Windows {
 	foreign import libc "system:libucrt.lib"
 	foreign import libc "system:libucrt.lib"
-} else when ODIN_OS == "darwin" {
+} else when ODIN_OS == .Darwin {
 	foreign import libc "system:System.framework"
 	foreign import libc "system:System.framework"
 } else {
 } else {
 	foreign import libc "system:c"
 	foreign import libc "system:c"
 }
 }
 
 
-when ODIN_OS == "windows" {
+when ODIN_OS == .Windows {
 	RAND_MAX :: 0x7fff
 	RAND_MAX :: 0x7fff
 
 
 	@(private="file")
 	@(private="file")
@@ -24,7 +24,7 @@ when ODIN_OS == "windows" {
 	}
 	}
 }
 }
 
 
-when ODIN_OS == "linux" {
+when ODIN_OS == .Linux {
 	RAND_MAX :: 0x7fffffff
 	RAND_MAX :: 0x7fffffff
 
 
 	// GLIBC and MUSL only
 	// GLIBC and MUSL only
@@ -40,7 +40,7 @@ when ODIN_OS == "linux" {
 }
 }
 
 
 
 
-when ODIN_OS == "darwin" {
+when ODIN_OS == .Darwin {
 	RAND_MAX :: 0x7fffffff
 	RAND_MAX :: 0x7fffffff
 
 
 	// GLIBC and MUSL only
 	// GLIBC and MUSL only

+ 2 - 2
core/c/libc/string.odin

@@ -4,9 +4,9 @@ import "core:runtime"
 
 
 // 7.24 String handling
 // 7.24 String handling
 
 
-when ODIN_OS == "windows" {
+when ODIN_OS == .Windows {
 	foreign import libc "system:libucrt.lib"
 	foreign import libc "system:libucrt.lib"
-} else when ODIN_OS == "darwin" {
+} else when ODIN_OS == .Darwin {
 	foreign import libc "system:System.framework"
 	foreign import libc "system:System.framework"
 } else {
 } else {
 	foreign import libc "system:c"
 	foreign import libc "system:c"

+ 3 - 3
core/c/libc/threads.odin

@@ -5,7 +5,7 @@ package libc
 thrd_start_t :: proc "c" (rawptr) -> int
 thrd_start_t :: proc "c" (rawptr) -> int
 tss_dtor_t   :: proc "c" (rawptr)
 tss_dtor_t   :: proc "c" (rawptr)
 
 
-when ODIN_OS == "windows" {
+when ODIN_OS == .Windows {
 	foreign import libc {
 	foreign import libc {
 		"system:libucrt.lib", 
 		"system:libucrt.lib", 
 		"system:msvcprt.lib"
 		"system:msvcprt.lib"
@@ -74,7 +74,7 @@ when ODIN_OS == "windows" {
 }
 }
 
 
 // GLIBC and MUSL compatible constants and types.
 // GLIBC and MUSL compatible constants and types.
-when ODIN_OS == "linux" {
+when ODIN_OS == .Linux {
 	foreign import libc {
 	foreign import libc {
 		"system:c",
 		"system:c",
 		"system:pthread"
 		"system:pthread"
@@ -138,6 +138,6 @@ when ODIN_OS == "linux" {
 }
 }
 
 
 
 
-when ODIN_OS == "darwin" {
+when ODIN_OS == .Darwin {
 	// TODO: find out what this is meant to be!
 	// TODO: find out what this is meant to be!
 }
 }

+ 10 - 5
core/c/libc/time.odin

@@ -2,9 +2,9 @@ package libc
 
 
 // 7.27 Date and time
 // 7.27 Date and time
 
 
-when ODIN_OS == "windows" {
+when ODIN_OS == .Windows {
 	foreign import libc "system:libucrt.lib"
 	foreign import libc "system:libucrt.lib"
-} else when ODIN_OS == "darwin" {
+} else when ODIN_OS == .Darwin {
 	foreign import libc "system:System.framework"
 	foreign import libc "system:System.framework"
 } else {
 } else {
 	foreign import libc "system:c"
 	foreign import libc "system:c"
@@ -12,7 +12,7 @@ when ODIN_OS == "windows" {
 
 
 // We enforce 64-bit time_t and timespec as there is no reason to use 32-bit as
 // We enforce 64-bit time_t and timespec as there is no reason to use 32-bit as
 // we approach the 2038 problem. Windows has defaulted to this since VC8 (2005).
 // we approach the 2038 problem. Windows has defaulted to this since VC8 (2005).
-when ODIN_OS == "windows" {
+when ODIN_OS == .Windows {
 	foreign libc {
 	foreign libc {
 		// 7.27.2 Time manipulation functions
 		// 7.27.2 Time manipulation functions
 		                               clock        :: proc() -> clock_t ---
 		                               clock        :: proc() -> clock_t ---
@@ -45,7 +45,7 @@ when ODIN_OS == "windows" {
 	}
 	}
 }
 }
 
 
-when ODIN_OS == "linux" || ODIN_OS == "freebsd" || ODIN_OS == "darwin" {
+when ODIN_OS == .Linux || ODIN_OS == .FreeBSD || ODIN_OS == .Darwin || ODIN_OS == .OpenBSD {
 	@(default_calling_convention="c")
 	@(default_calling_convention="c")
 	foreign libc {
 	foreign libc {
 		// 7.27.2 Time manipulation functions
 		// 7.27.2 Time manipulation functions
@@ -63,7 +63,12 @@ when ODIN_OS == "linux" || ODIN_OS == "freebsd" || ODIN_OS == "darwin" {
 		strftime     :: proc(s: [^]char, maxsize: size_t, format: cstring, timeptr: ^tm) -> size_t ---
 		strftime     :: proc(s: [^]char, maxsize: size_t, format: cstring, timeptr: ^tm) -> size_t ---
 	}
 	}
 
 
-	CLOCKS_PER_SEC :: 1000000
+	when ODIN_OS == .OpenBSD {
+		CLOCKS_PER_SEC :: 100
+	} else {
+		CLOCKS_PER_SEC :: 1000000
+	}
+
 	TIME_UTC       :: 1
 	TIME_UTC       :: 1
 
 
 	time_t         :: distinct i64
 	time_t         :: distinct i64

+ 2 - 0
core/c/libc/types.odin

@@ -3,6 +3,8 @@ package libc
 import "core:c"
 import "core:c"
 
 
 char           :: c.char // assuming -funsigned-char
 char           :: c.char // assuming -funsigned-char
+
+schar          :: c.schar
 short          :: c.short
 short          :: c.short
 int            :: c.int
 int            :: c.int
 long           :: c.long
 long           :: c.long

+ 2 - 2
core/c/libc/uchar.odin

@@ -2,9 +2,9 @@ package libc
 
 
 // 7.28 Unicode utilities
 // 7.28 Unicode utilities
 
 
-when ODIN_OS == "windows" {
+when ODIN_OS == .Windows {
 	foreign import libc "system:libucrt.lib"
 	foreign import libc "system:libucrt.lib"
-} else when ODIN_OS == "darwin" {
+} else when ODIN_OS == .Darwin {
 	foreign import libc "system:System.framework"
 	foreign import libc "system:System.framework"
 } else {
 } else {
 	foreign import libc "system:c"
 	foreign import libc "system:c"

+ 2 - 2
core/c/libc/wchar.odin

@@ -2,9 +2,9 @@ package libc
 
 
 // 7.29 Extended multibyte and wide character utilities
 // 7.29 Extended multibyte and wide character utilities
 
 
-when ODIN_OS == "windows" {
+when ODIN_OS == .Windows {
 	foreign import libc "system:libucrt.lib"
 	foreign import libc "system:libucrt.lib"
-} else when ODIN_OS == "darwin" {
+} else when ODIN_OS == .Darwin {
 	foreign import libc "system:System.framework"
 	foreign import libc "system:System.framework"
 } else {
 } else {
 	foreign import libc "system:c"
 	foreign import libc "system:c"

+ 14 - 7
core/c/libc/wctype.odin

@@ -2,27 +2,34 @@ package libc
 
 
 // 7.30 Wide character classification and mapping utilities
 // 7.30 Wide character classification and mapping utilities
 
 
-when ODIN_OS == "windows" {
+when ODIN_OS == .Windows {
 	foreign import libc "system:libucrt.lib"
 	foreign import libc "system:libucrt.lib"
-} else when ODIN_OS == "darwin" {
+} else when ODIN_OS == .Darwin {
 	foreign import libc "system:System.framework"
 	foreign import libc "system:System.framework"
 } else {
 } else {
 	foreign import libc "system:c"
 	foreign import libc "system:c"
 }
 }
 
 
-when ODIN_OS == "windows" {
+when ODIN_OS == .Windows {
 	wctrans_t :: distinct wchar_t
 	wctrans_t :: distinct wchar_t
 	wctype_t  :: distinct ushort
 	wctype_t  :: distinct ushort
-}
 
 
-when ODIN_OS == "linux" {
+} else when ODIN_OS == .Linux {
 	wctrans_t :: distinct intptr_t
 	wctrans_t :: distinct intptr_t
 	wctype_t  :: distinct ulong
 	wctype_t  :: distinct ulong
-}
 
 
-when ODIN_OS == "darwin" {
+} else when ODIN_OS == .Darwin {
 	wctrans_t :: distinct int
 	wctrans_t :: distinct int
 	wctype_t  :: distinct u32
 	wctype_t  :: distinct u32
+
+} else when ODIN_OS == .OpenBSD {
+	wctrans_t :: distinct rawptr
+	wctype_t  :: distinct rawptr
+
+} else when ODIN_OS == .FreeBSD {
+	wctrans_t :: distinct int
+	wctype_t  :: distinct ulong
+	
 }
 }
 
 
 @(default_calling_convention="c")
 @(default_calling_convention="c")

+ 16 - 6
core/compress/common.odin

@@ -5,6 +5,9 @@
 	List of contributors:
 	List of contributors:
 		Jeroen van Rijn: Initial implementation, optimization.
 		Jeroen van Rijn: Initial implementation, optimization.
 */
 */
+
+
+// package compress is a collection of utilities to aid with other compression packages
 package compress
 package compress
 
 
 import "core:io"
 import "core:io"
@@ -44,7 +47,7 @@ when size_of(uintptr) == 8 {
 }
 }
 
 
 
 
-Error :: union {
+Error :: union #shared_nil {
 	General_Error,
 	General_Error,
 	Deflate_Error,
 	Deflate_Error,
 	ZLIB_Error,
 	ZLIB_Error,
@@ -55,6 +58,7 @@ Error :: union {
 }
 }
 
 
 General_Error :: enum {
 General_Error :: enum {
+	None = 0,
 	File_Not_Found,
 	File_Not_Found,
 	Cannot_Open_File,
 	Cannot_Open_File,
 	File_Too_Short,
 	File_Too_Short,
@@ -73,6 +77,7 @@ General_Error :: enum {
 }
 }
 
 
 GZIP_Error :: enum {
 GZIP_Error :: enum {
+	None = 0,
 	Invalid_GZIP_Signature,
 	Invalid_GZIP_Signature,
 	Reserved_Flag_Set,
 	Reserved_Flag_Set,
 	Invalid_Extra_Data,
 	Invalid_Extra_Data,
@@ -97,6 +102,7 @@ GZIP_Error :: enum {
 }
 }
 
 
 ZIP_Error :: enum {
 ZIP_Error :: enum {
+	None = 0,
 	Invalid_ZIP_File_Signature,
 	Invalid_ZIP_File_Signature,
 	Unexpected_Signature,
 	Unexpected_Signature,
 	Insert_Next_Disk,
 	Insert_Next_Disk,
@@ -104,6 +110,7 @@ ZIP_Error :: enum {
 }
 }
 
 
 ZLIB_Error :: enum {
 ZLIB_Error :: enum {
+	None = 0,
 	Unsupported_Window_Size,
 	Unsupported_Window_Size,
 	FDICT_Unsupported,
 	FDICT_Unsupported,
 	Unsupported_Compression_Level,
 	Unsupported_Compression_Level,
@@ -111,6 +118,7 @@ ZLIB_Error :: enum {
 }
 }
 
 
 Deflate_Error :: enum {
 Deflate_Error :: enum {
+	None = 0,
 	Huffman_Bad_Sizes,
 	Huffman_Bad_Sizes,
 	Huffman_Bad_Code_Lengths,
 	Huffman_Bad_Code_Lengths,
 	Inflate_Error,
 	Inflate_Error,
@@ -120,7 +128,6 @@ Deflate_Error :: enum {
 	BType_3,
 	BType_3,
 }
 }
 
 
-
 // General I/O context for ZLIB, LZW, etc.
 // General I/O context for ZLIB, LZW, etc.
 Context_Memory_Input :: struct #packed {
 Context_Memory_Input :: struct #packed {
 	input_data:        []u8,
 	input_data:        []u8,
@@ -136,7 +143,12 @@ Context_Memory_Input :: struct #packed {
 	size_packed:       i64,
 	size_packed:       i64,
 	size_unpacked:     i64,
 	size_unpacked:     i64,
 }
 }
-#assert(size_of(Context_Memory_Input) == 64)
+when size_of(rawptr) == 8 {
+	#assert(size_of(Context_Memory_Input) == 64)
+} else {
+	// e.g. `-target:windows_i386`
+	#assert(size_of(Context_Memory_Input) == 52)
+}
 
 
 Context_Stream_Input :: struct #packed {
 Context_Stream_Input :: struct #packed {
 	input_data:        []u8,
 	input_data:        []u8,
@@ -171,8 +183,6 @@ Context_Stream_Input :: struct #packed {
 	This simplifies end-of-stream handling where bits may be left in the bit buffer.
 	This simplifies end-of-stream handling where bits may be left in the bit buffer.
 */
 */
 
 
-// TODO: Make these return compress.Error errors.
-
 input_size_from_memory :: proc(z: ^Context_Memory_Input) -> (res: i64, err: Error) {
 input_size_from_memory :: proc(z: ^Context_Memory_Input) -> (res: i64, err: Error) {
 	return i64(len(z.input_data)), nil
 	return i64(len(z.input_data)), nil
 }
 }
@@ -470,4 +480,4 @@ discard_to_next_byte_lsb_from_stream :: proc(z: ^Context_Stream_Input) {
 	consume_bits_lsb(z, discard)
 	consume_bits_lsb(z, discard)
 }
 }
 
 
-discard_to_next_byte_lsb :: proc{discard_to_next_byte_lsb_from_memory, discard_to_next_byte_lsb_from_stream};
+discard_to_next_byte_lsb :: proc{discard_to_next_byte_lsb_from_memory, discard_to_next_byte_lsb_from_stream}

+ 4 - 3
core/compress/gzip/gzip.odin

@@ -66,7 +66,8 @@ OS :: enum u8 {
 	_Unknown     = 14,
 	_Unknown     = 14,
 	Unknown      = 255,
 	Unknown      = 255,
 }
 }
-OS_Name :: #partial [OS]string{
+OS_Name :: #sparse[OS]string{
+	._Unknown     = "",
 	.FAT          = "FAT",
 	.FAT          = "FAT",
 	.Amiga        = "Amiga",
 	.Amiga        = "Amiga",
 	.VMS          = "VMS/OpenVMS",
 	.VMS          = "VMS/OpenVMS",
@@ -99,7 +100,7 @@ E_GZIP    :: compress.GZIP_Error
 E_ZLIB    :: compress.ZLIB_Error
 E_ZLIB    :: compress.ZLIB_Error
 E_Deflate :: compress.Deflate_Error
 E_Deflate :: compress.Deflate_Error
 
 
-GZIP_MAX_PAYLOAD_SIZE :: int(max(u32le))
+GZIP_MAX_PAYLOAD_SIZE :: i64(max(u32le))
 
 
 load :: proc{load_from_slice, load_from_file, load_from_context}
 load :: proc{load_from_slice, load_from_file, load_from_context}
 
 
@@ -135,7 +136,7 @@ load_from_context :: proc(z: ^$C, buf: ^bytes.Buffer, known_gzip_size := -1, exp
 
 
 	z.output = buf
 	z.output = buf
 
 
-	if expected_output_size > GZIP_MAX_PAYLOAD_SIZE {
+	if i64(expected_output_size) > i64(GZIP_MAX_PAYLOAD_SIZE) {
 		return E_GZIP.Payload_Size_Exceeds_Max_Payload
 		return E_GZIP.Payload_Size_Exceeds_Max_Payload
 	}
 	}
 
 

+ 148 - 0
core/compress/shoco/model.odin

@@ -0,0 +1,148 @@
+/*
+	This file was generated, so don't edit this by hand.
+	Transliterated from https://github.com/Ed-von-Schleck/shoco/blob/master/shoco_model.h,
+	which is an English word model.
+*/
+
+// package shoco is an implementation of the shoco short string compressor
+package shoco
+
+DEFAULT_MODEL :: Shoco_Model {
+	min_char = 39,
+	max_char = 122,
+	characters_by_id = {
+		'e', 'a', 'i', 'o', 't', 'h', 'n', 'r', 's', 'l', 'u', 'c', 'w', 'm', 'd', 'b', 'p', 'f', 'g', 'v', 'y', 'k', '-', 'H', 'M', 'T', '\'', 'B', 'x', 'I', 'W', 'L',
+	},
+	ids_by_character = {
+		-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 26, -1, -1, -1, -1, -1, 22, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 27, -1, -1, -1, -1, -1, 23, 29, -1, -1, 31, 24, -1, -1, -1, -1, -1, -1, 25, -1, -1, 30, -1, -1, -1, -1, -1, -1, -1, -1, -1, 1, 15, 11, 14, 0, 17, 18, 5, 2, -1, 21, 9, 13, 6, 3, 16, -1, 7, 8, 4, 10, 19, 12, 28, 20, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+	},
+	successors_by_bigram = {
+		7, 4, 12, -1, 6, -1, 1, 0, 3, 5, -1, 9, -1, 8, 2, -1, 15, 14, -1, 10, 11, -1, -1, -1, -1, -1, -1, -1, 13, -1, -1, -1,
+		1, -1, 6, -1, 1, -1, 0, 3, 2, 4, 15, 11, -1, 9, 5, 10, 13, -1, 12, 8, 7, 14, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+		9, 11, -1, 4, 2, -1, 0, 8, 1, 5, -1, 6, -1, 3, 7, 15, -1, 12, 10, 13, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+		-1, -1, 14, 7, 5, -1, 1, 2, 8, 9, 0, 15, 6, 4, 11, -1, 12, 3, -1, 10, -1, 13, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+		2, 4, 3, 1, 5, 0, -1, 6, 10, 9, 7, 12, 11, -1, -1, -1, -1, 13, -1, -1, 8, -1, 15, -1, -1, -1, 14, -1, -1, -1, -1, -1,
+		0, 1, 2, 3, 4, -1, -1, 5, 9, 10, 6, -1, -1, 8, 15, 11, -1, 14, -1, -1, 7, -1, 13, -1, -1, -1, 12, -1, -1, -1, -1, -1,
+		2, 8, 7, 4, 3, -1, 9, -1, 6, 11, -1, 5, -1, -1, 0, -1, -1, 14, 1, 15, 10, 12, -1, -1, -1, -1, 13, -1, -1, -1, -1, -1,
+		0, 3, 1, 2, 6, -1, 9, 8, 4, 12, 13, 10, -1, 11, 7, -1, -1, 15, 14, -1, 5, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+		0, 6, 3, 4, 1, 2, -1, -1, 5, 10, 7, 9, 11, 12, -1, -1, 8, 14, -1, -1, 15, 13, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+		0, 6, 2, 5, 9, -1, -1, -1, 10, 1, 8, -1, 12, 14, 4, -1, 15, 7, -1, 13, 3, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+		8, 10, 9, 15, 1, -1, 4, 0, 3, 2, -1, 6, -1, 12, 11, 13, 7, 14, 5, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+		1, 3, 6, 0, 4, 2, -1, 7, 13, 8, 9, 11, -1, -1, 15, -1, -1, -1, -1, -1, 10, 5, 14, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+		3, 0, 1, 4, -1, 2, 5, 6, 7, 8, -1, 14, -1, -1, 9, 15, -1, 12, -1, -1, -1, 10, 11, -1, -1, -1, 13, -1, -1, -1, -1, -1,
+		0, 1, 3, 2, 15, -1, 12, -1, 7, 14, 4, -1, -1, 9, -1, 8, 5, 10, -1, -1, 6, -1, 13, -1, -1, -1, 11, -1, -1, -1, -1, -1,
+		0, 3, 1, 2, -1, -1, 12, 6, 4, 9, 7, -1, -1, 14, 8, -1, -1, 15, 11, 13, 5, -1, 10, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+		0, 5, 7, 2, 10, 13, -1, 6, 8, 1, 3, -1, -1, 14, 15, 11, -1, -1, -1, 12, 4, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+		0, 2, 6, 3, 7, 10, -1, 1, 9, 4, 8, -1, -1, 15, -1, 12, 5, -1, -1, -1, 11, -1, 13, -1, -1, -1, 14, -1, -1, -1, -1, -1,
+		1, 3, 4, 0, 7, -1, 12, 2, 11, 8, 6, 13, -1, -1, -1, -1, -1, 5, -1, -1, 10, 15, 9, -1, -1, -1, 14, -1, -1, -1, -1, -1,
+		1, 3, 5, 2, 13, 0, 9, 4, 7, 6, 8, -1, -1, 15, -1, 11, -1, -1, 10, -1, 14, -1, 12, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+		0, 2, 1, 3, -1, -1, -1, 6, -1, -1, 5, -1, -1, -1, -1, -1, -1, -1, -1, -1, 4, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+		1, 11, 4, 0, 3, -1, 13, 12, 2, 7, -1, -1, 15, 10, 5, 8, 14, -1, -1, -1, -1, -1, 9, -1, -1, -1, 6, -1, -1, -1, -1, -1,
+		0, 9, 2, 14, 15, 4, 1, 13, 3, 5, -1, -1, 10, -1, -1, -1, -1, 6, 12, -1, 7, -1, 8, -1, -1, -1, 11, -1, -1, -1, -1, -1,
+		-1, 2, 14, -1, 1, 5, 8, 7, 4, 12, -1, 6, 9, 11, 13, 3, 10, 15, -1, -1, -1, -1, 0, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+		0, 1, 3, 2, -1, -1, -1, -1, -1, -1, 4, -1, -1, -1, -1, -1, -1, -1, -1, -1, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+		4, 3, 1, 5, -1, -1, -1, 0, -1, -1, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, 2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+		2, 8, 4, 1, -1, 0, -1, 6, -1, -1, 5, -1, 7, -1, -1, -1, -1, -1, -1, -1, 10, -1, -1, 9, -1, -1, -1, -1, -1, -1, -1, -1,
+		12, 5, -1, -1, 1, -1, -1, 7, 0, 3, -1, 2, -1, 4, 6, -1, -1, -1, -1, 8, -1, -1, 15, -1, 13, 9, -1, -1, -1, -1, -1, 11,
+		1, 3, 2, 4, -1, -1, -1, 5, -1, 7, 0, -1, -1, -1, -1, -1, -1, -1, -1, -1, 6, -1, -1, -1, -1, -1, -1, -1, -1, 8, -1, -1,
+		5, 3, 4, 12, 1, 6, -1, -1, -1, -1, 8, 2, -1, -1, -1, -1, 0, 9, -1, -1, 11, -1, 10, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+		-1, -1, -1, -1, 0, -1, 1, 12, 3, -1, -1, -1, -1, 5, -1, -1, -1, 2, -1, -1, -1, -1, -1, -1, -1, -1, 4, -1, -1, 6, -1, 10,
+		2, 3, 1, 4, -1, 0, -1, 5, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 7, -1, -1, -1, -1, -1, -1, -1, -1, 6, -1, -1,
+		5, 1, 3, 0, -1, -1, -1, -1, -1, -1, 4, -1, -1, -1, -1, -1, -1, -1, -1, -1, 2, -1, -1, -1, -1, -1, 9, -1, -1, 6, -1, 7,
+	},
+	successors_reversed = {
+		's', 't', 'c', 'l', 'm', 'a', 'd', 'r', 'v', 'T', 'A', 'L', 'e', 'M', 'Y', '-',
+		'\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00',
+		'\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00',
+		'\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00',
+		'\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00',
+		'\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00',
+		'-', 't', 'a', 'b', 's', 'h', 'c', 'r', 'n', 'w', 'p', 'm', 'l', 'd', 'i', 'f',
+		'\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00',
+		'\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00',
+		'\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00',
+		'\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00',
+		'\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00',
+		'\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00',
+		'\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00',
+		'\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00',
+		'\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00',
+		'\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00',
+		'\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00',
+		'\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00',
+		'\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00',
+		'\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00',
+		'\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00',
+		'\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00',
+		'\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00',
+		'\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00',
+		'\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00',
+		'\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00',
+		'u', 'e', 'i', 'a', 'o', 'r', 'y', 'l', 'I', 'E', 'R', '\x00', '\x00', '\x00', '\x00', '\x00',
+		'\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00',
+		'\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00',
+		'\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00',
+		'\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00',
+		'\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00',
+		'e', 'a', 'o', 'i', 'u', 'A', 'y', 'E', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00',
+		't', 'n', 'f', 's', '\'', 'm', 'I', 'N', 'A', 'E', 'L', 'Z', 'r', 'V', 'R', 'C',
+		'\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00',
+		'\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00',
+		'o', 'a', 'y', 'i', 'u', 'e', 'I', 'L', 'D', '\'', 'E', 'Y', '\x00', '\x00', '\x00', '\x00',
+		'r', 'i', 'y', 'a', 'e', 'o', 'u', 'Y', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00',
+		'\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00',
+		'\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00',
+		'\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00',
+		'\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00',
+		'\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00',
+		'\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00',
+		'h', 'o', 'e', 'E', 'i', 'u', 'r', 'w', 'a', 'H', 'y', 'R', 'Z', '\x00', '\x00', '\x00',
+		'\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00',
+		'\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00',
+		'h', 'i', 'e', 'a', 'o', 'r', 'I', 'y', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00',
+		'\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00',
+		'\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00',
+		'\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00',
+		'\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00',
+		'\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00',
+		'\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00',
+		'\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00',
+		'\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00',
+		'\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00',
+		'n', 't', 's', 'r', 'l', 'd', 'i', 'y', 'v', 'm', 'b', 'c', 'g', 'p', 'k', 'u',
+		'e', 'l', 'o', 'u', 'y', 'a', 'r', 'i', 's', 'j', 't', 'b', 'v', 'h', 'm', 'd',
+		'o', 'e', 'h', 'a', 't', 'k', 'i', 'r', 'l', 'u', 'y', 'c', 'q', 's', '-', 'd',
+		'e', 'i', 'o', 'a', 's', 'y', 'r', 'u', 'd', 'l', '-', 'g', 'n', 'v', 'm', 'f',
+		'r', 'n', 'd', 's', 'a', 'l', 't', 'e', 'm', 'c', 'v', 'y', 'i', 'x', 'f', 'p',
+		'o', 'e', 'r', 'a', 'i', 'f', 'u', 't', 'l', '-', 'y', 's', 'n', 'c', '\'', 'k',
+		'h', 'e', 'o', 'a', 'r', 'i', 'l', 's', 'u', 'n', 'g', 'b', '-', 't', 'y', 'm',
+		'e', 'a', 'i', 'o', 't', 'r', 'u', 'y', 'm', 's', 'l', 'b', '\'', '-', 'f', 'd',
+		'n', 's', 't', 'm', 'o', 'l', 'c', 'd', 'r', 'e', 'g', 'a', 'f', 'v', 'z', 'b',
+		'\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00',
+		'e', 'n', 'i', 's', 'h', 'l', 'f', 'y', '-', 'a', 'w', '\'', 'g', 'r', 'o', 't',
+		'e', 'l', 'i', 'y', 'd', 'o', 'a', 'f', 'u', 't', 's', 'k', 'w', 'v', 'm', 'p',
+		'e', 'a', 'o', 'i', 'u', 'p', 'y', 's', 'b', 'm', 'f', '\'', 'n', '-', 'l', 't',
+		'd', 'g', 'e', 't', 'o', 'c', 's', 'i', 'a', 'n', 'y', 'l', 'k', '\'', 'f', 'v',
+		'u', 'n', 'r', 'f', 'm', 't', 'w', 'o', 's', 'l', 'v', 'd', 'p', 'k', 'i', 'c',
+		'e', 'r', 'a', 'o', 'l', 'p', 'i', 't', 'u', 's', 'h', 'y', 'b', '-', '\'', 'm',
+		'\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00',
+		'e', 'i', 'o', 'a', 's', 'y', 't', 'd', 'r', 'n', 'c', 'm', 'l', 'u', 'g', 'f',
+		'e', 't', 'h', 'i', 'o', 's', 'a', 'u', 'p', 'c', 'l', 'w', 'm', 'k', 'f', 'y',
+		'h', 'o', 'e', 'i', 'a', 't', 'r', 'u', 'y', 'l', 's', 'w', 'c', 'f', '\'', '-',
+		'r', 't', 'l', 's', 'n', 'g', 'c', 'p', 'e', 'i', 'a', 'd', 'm', 'b', 'f', 'o',
+		'e', 'i', 'a', 'o', 'y', 'u', 'r', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00',
+		'a', 'i', 'h', 'e', 'o', 'n', 'r', 's', 'l', 'd', 'k', '-', 'f', '\'', 'c', 'b',
+		'p', 't', 'c', 'a', 'i', 'e', 'h', 'q', 'u', 'f', '-', 'y', 'o', '\x00', '\x00', '\x00',
+		'o', 'e', 's', 't', 'i', 'd', '\'', 'l', 'b', '-', 'm', 'a', 'r', 'n', 'p', 'w',
+	},
+
+	character_count = 32,
+	successor_count = 16,
+
+	max_successor_n = 7,
+	packs = {
+		{ 0x80000000, 1, 2, { 26, 24, 24, 24, 24, 24, 24, 24 }, { 15,  3,  0,  0, 0, 0, 0, 0 }, 0xc0, 0x80 },
+		{ 0xc0000000, 2, 4, { 25, 22, 19, 16, 16, 16, 16, 16 }, { 15,  7,  7,  7, 0, 0, 0, 0 }, 0xe0, 0xc0 },
+		{ 0xe0000000, 4, 8, { 23, 19, 15, 11,  8,  5,  2,  0 }, { 31, 15, 15, 15, 7, 7, 7, 3 }, 0xf0, 0xe0 },
+	},
+}

+ 318 - 0
core/compress/shoco/shoco.odin

@@ -0,0 +1,318 @@
+/*
+	Copyright 2022 Jeroen van Rijn <[email protected]>.
+	Made available under Odin's BSD-3 license.
+
+	List of contributors:
+		Jeroen van Rijn: Initial implementation.
+
+	An implementation of [shoco](https://github.com/Ed-von-Schleck/shoco) by Christian Schramm.
+*/
+
+// package shoco is an implementation of the shoco short string compressor
+package shoco
+
+import "core:intrinsics"
+import "core:compress"
+
+Shoco_Pack :: struct {
+	word:           u32,
+	bytes_packed:   i8,
+	bytes_unpacked: i8,
+	offsets:        [8]u16,
+	masks:          [8]i16,
+	header_mask:    u8,
+	header:         u8,
+}
+
+Shoco_Model :: struct {
+	min_char:             u8,
+	max_char:             u8,
+	characters_by_id:     []u8,
+	ids_by_character:     [256]i16,
+	successors_by_bigram: []i8,
+	successors_reversed:  []u8,
+
+	character_count:      u8,
+	successor_count:      u8,
+	max_successor_n:      i8,
+	packs:                []Shoco_Pack,
+}
+
+compress_bound :: proc(uncompressed_size: int) -> (worst_case_compressed_size: int) {
+	// Worst case compression happens when input is non-ASCII (128-255)
+	// Encoded as 0x00 + the byte in question.
+	return uncompressed_size * 2
+}
+
+decompress_bound :: proc(compressed_size: int, model := DEFAULT_MODEL) -> (maximum_decompressed_size: int) {
+	// Best case compression is 2:1
+	most: f64
+	for pack in model.packs {
+		val := f64(compressed_size) / f64(pack.bytes_packed) * f64(pack.bytes_unpacked)
+		most = max(most, val)
+	}
+	return int(most)
+}
+
+find_best_encoding :: proc(indices: []i16, n_consecutive: i8, model := DEFAULT_MODEL) -> (res: int) {
+	for p := len(model.packs); p > 0; p -= 1 {
+		pack := model.packs[p - 1]
+		if n_consecutive >= pack.bytes_unpacked {
+			have_index := true
+			for i := 0; i < int(pack.bytes_unpacked); i += 1 {
+				if indices[i] > pack.masks[i] {
+					have_index = false
+					break
+				}
+			}
+			if have_index {
+				return p - 1
+			}
+		}
+	}
+	return -1
+}
+
+validate_model :: proc(model: Shoco_Model) -> (int, compress.Error) {
+	if len(model.characters_by_id) != int(model.character_count) {
+		return 0, .Unknown_Compression_Method
+	}
+
+	if len(model.successors_by_bigram) != int(model.character_count) * int(model.character_count) {
+		return 0, .Unknown_Compression_Method
+	}
+
+	if len(model.successors_reversed) != int(model.successor_count) * int(model.max_char - model.min_char) {
+		return 0, .Unknown_Compression_Method
+	}
+
+	// Model seems legit.
+	return 0, nil
+}
+
+// Decompresses into provided buffer.
+decompress_slice_to_output_buffer :: proc(input: []u8, output: []u8, model := DEFAULT_MODEL) -> (size: int, err: compress.Error) {
+	inp, inp_end := 0, len(input)
+	out, out_end := 0, len(output)
+
+	validate_model(model) or_return
+
+	for inp < inp_end {
+		val  := transmute(i8)input[inp]
+		mark := int(-1)
+
+		for val < 0 {
+			val <<= 1
+			mark += 1
+		}
+
+		if mark > len(model.packs) {
+			return out, .Unknown_Compression_Method
+		}
+
+		if mark < 0 {
+			if out >= out_end {
+				return out, .Output_Too_Short
+			}
+
+			// Ignore the sentinel value for non-ASCII chars
+			if input[inp] == 0x00 {
+				inp += 1
+				if inp >= inp_end {
+					return out, .Stream_Too_Short
+				}
+			}
+			output[out] = input[inp]
+			inp, out = inp + 1, out + 1
+
+		} else {
+			pack := model.packs[mark]
+
+			if out + int(pack.bytes_unpacked) > out_end {
+				return out, .Output_Too_Short
+			} else if inp + int(pack.bytes_packed) > inp_end {
+				return out, .Stream_Too_Short
+			}
+
+			code := intrinsics.unaligned_load((^u32)(&input[inp]))
+			when ODIN_ENDIAN == .Little {
+				code = intrinsics.byte_swap(code)
+			}
+
+			// Unpack the leading char
+			offset := pack.offsets[0]
+			mask   := pack.masks[0]
+
+			last_chr := model.characters_by_id[(code >> offset) & u32(mask)]
+			output[out] = last_chr
+
+			// Unpack the successor chars
+			for i := 1; i < int(pack.bytes_unpacked); i += 1 {
+				offset = pack.offsets[i]
+				mask   = pack.masks[i]
+
+				index_major := u32(last_chr - model.min_char) * u32(model.successor_count)
+				index_minor := (code >> offset) & u32(mask)
+
+				last_chr = model.successors_reversed[index_major + index_minor]
+
+				output[out + i] = last_chr
+			}
+
+			out += int(pack.bytes_unpacked)
+			inp += int(pack.bytes_packed)
+		}
+	}
+
+	return out, nil
+}
+
+decompress_slice_to_string :: proc(input: []u8, model := DEFAULT_MODEL, allocator := context.allocator) -> (res: string, err: compress.Error) {
+	context.allocator = allocator
+
+	if len(input) == 0 {
+		return "", .Stream_Too_Short
+	}
+
+	max_output_size := decompress_bound(len(input), model)
+
+	buf: [dynamic]u8
+	if !resize(&buf, max_output_size) {
+		return "", .Out_Of_Memory
+	}
+
+	length, result := decompress_slice_to_output_buffer(input, buf[:])
+	resize(&buf, length)
+	return string(buf[:]), result
+}
+decompress :: proc{decompress_slice_to_output_buffer, decompress_slice_to_string}
+
+compress_string_to_buffer :: proc(input: string, output: []u8, model := DEFAULT_MODEL, allocator := context.allocator) -> (size: int, err: compress.Error) {
+	inp, inp_end := 0, len(input)
+	out, out_end := 0, len(output)
+	output := output
+
+	validate_model(model) or_return
+
+	indices := make([]i16, model.max_successor_n + 1)
+	defer delete(indices)
+
+	last_resort := false
+
+	encode: for inp < inp_end {
+		if last_resort {
+			last_resort = false
+
+			if input[inp] & 0x80 == 0x80 {
+				// Non-ASCII case
+				if out + 2 > out_end {
+					return out, .Output_Too_Short
+				}
+
+				// Put in a sentinel byte
+				output[out] = 0x00
+				out += 1
+			} else {
+				// An ASCII byte
+				if out + 1 > out_end {
+					return out, .Output_Too_Short
+				}
+			}
+			output[out] = input[inp]
+			out, inp = out + 1, inp + 1
+		} else {
+			// Find the longest string of known successors
+			indices[0] = model.ids_by_character[input[inp]]
+			last_chr_index := indices[0]
+
+			if last_chr_index < 0 {
+				last_resort = true
+				continue encode
+			}
+
+			rest := inp_end - inp
+			n_consecutive: i8 = 1
+			for ; n_consecutive <= model.max_successor_n; n_consecutive += 1 {
+				if inp_end > 0 && int(n_consecutive) == rest {
+					break
+				}
+
+				current_index := model.ids_by_character[input[inp + int(n_consecutive)]]
+				if current_index < 0 { // '\0' is always -1
+					break
+				}
+
+				successor_index := model.successors_by_bigram[last_chr_index * i16(model.character_count) + current_index]
+				if successor_index < 0 {
+					break
+				}
+
+				indices[n_consecutive] = i16(successor_index)
+				last_chr_index = current_index
+			}
+
+			if n_consecutive < 2 {
+				last_resort = true
+				continue encode
+			}
+
+			pack_n := find_best_encoding(indices, n_consecutive)
+			if pack_n >= 0 {
+				if out + int(model.packs[pack_n].bytes_packed) > out_end {
+					return out, .Output_Too_Short
+				}
+
+				pack := model.packs[pack_n]
+				code := pack.word
+
+				for i := 0; i < int(pack.bytes_unpacked); i += 1 {
+					code |= u32(indices[i]) << pack.offsets[i]
+				}
+
+				// In the little-endian world, we need to swap what's in the register to match the memory representation.
+				when ODIN_ENDIAN == .Little {
+					code = intrinsics.byte_swap(code)
+				}
+				out_ptr := raw_data(output[out:])
+
+				switch pack.bytes_packed {
+				case 4:
+					intrinsics.unaligned_store(transmute(^u32)out_ptr, code)
+				case 2:
+					intrinsics.unaligned_store(transmute(^u16)out_ptr, u16(code))
+				case 1:
+					intrinsics.unaligned_store(transmute(^u8)out_ptr,  u8(code))
+				case:
+					return out, .Unknown_Compression_Method
+				}
+
+				out += int(pack.bytes_packed)
+				inp += int(pack.bytes_unpacked)
+			} else {
+				last_resort = true
+				continue encode
+			}
+		}
+	}
+	return out, nil
+}
+
+compress_string :: proc(input: string, model := DEFAULT_MODEL, allocator := context.allocator) -> (output: []u8, err: compress.Error) {
+	context.allocator = allocator
+
+	if len(input) == 0 {
+		return {}, .Stream_Too_Short
+	}
+
+	max_output_size := compress_bound(len(input))
+
+	buf: [dynamic]u8
+	if !resize(&buf, max_output_size) {
+		return {}, .Out_Of_Memory
+	}
+
+	length, result := compress_string_to_buffer(input, buf[:])
+	resize(&buf, length)
+	return buf[:length], result
+}
+compress :: proc{compress_string_to_buffer, compress_string}

+ 43 - 40
core/compress/zlib/zlib.odin

@@ -47,10 +47,10 @@ Options :: struct {
 	level: u8,
 	level: u8,
 }
 }
 
 
-Error     :: compress.Error
-E_General :: compress.General_Error
-E_ZLIB    :: compress.ZLIB_Error
-E_Deflate :: compress.Deflate_Error
+Error         :: compress.Error
+General_Error :: compress.General_Error
+ZLIB_Error    :: compress.ZLIB_Error
+Deflate_Error :: compress.Deflate_Error
 
 
 DEFLATE_MAX_CHUNK_SIZE   :: 65535
 DEFLATE_MAX_CHUNK_SIZE   :: 65535
 DEFLATE_MAX_LITERAL_SIZE :: 65535
 DEFLATE_MAX_LITERAL_SIZE :: 65535
@@ -111,9 +111,9 @@ ZFAST_MASK :: ((1 << ZFAST_BITS) - 1)
 */
 */
 Huffman_Table :: struct {
 Huffman_Table :: struct {
 	fast:        [1 << ZFAST_BITS]u16,
 	fast:        [1 << ZFAST_BITS]u16,
-	firstcode:   [16]u16,
+	firstcode:   [17]u16,
 	maxcode:     [17]int,
 	maxcode:     [17]int,
-	firstsymbol: [16]u16,
+	firstsymbol: [17]u16,
 	size:        [288]u8,
 	size:        [288]u8,
 	value:       [288]u16,
 	value:       [288]u16,
 }
 }
@@ -244,7 +244,7 @@ allocate_huffman_table :: proc(allocator := context.allocator) -> (z: ^Huffman_T
 @(optimization_mode="speed")
 @(optimization_mode="speed")
 build_huffman :: proc(z: ^Huffman_Table, code_lengths: []u8) -> (err: Error) {
 build_huffman :: proc(z: ^Huffman_Table, code_lengths: []u8) -> (err: Error) {
 	sizes:     [HUFFMAN_MAX_BITS+1]int
 	sizes:     [HUFFMAN_MAX_BITS+1]int
-	next_code: [HUFFMAN_MAX_BITS]int
+	next_code: [HUFFMAN_MAX_BITS+1]int
 
 
 	k := int(0)
 	k := int(0)
 
 
@@ -256,21 +256,21 @@ build_huffman :: proc(z: ^Huffman_Table, code_lengths: []u8) -> (err: Error) {
 	}
 	}
 	sizes[0] = 0
 	sizes[0] = 0
 
 
-	for i in 1..<(HUFFMAN_MAX_BITS+1) {
+	for i in 1 ..< HUFFMAN_MAX_BITS {
 		if sizes[i] > (1 << uint(i)) {
 		if sizes[i] > (1 << uint(i)) {
-			return E_Deflate.Huffman_Bad_Sizes
+			return .Huffman_Bad_Sizes
 		}
 		}
 	}
 	}
 	code := int(0)
 	code := int(0)
 
 
-	for i in 1..<HUFFMAN_MAX_BITS {
+	for i in 1 ..= HUFFMAN_MAX_BITS {
 		next_code[i]     = code
 		next_code[i]     = code
 		z.firstcode[i]   = u16(code)
 		z.firstcode[i]   = u16(code)
 		z.firstsymbol[i] = u16(k)
 		z.firstsymbol[i] = u16(k)
 		code = code + sizes[i]
 		code = code + sizes[i]
 		if sizes[i] != 0 {
 		if sizes[i] != 0 {
 			if code - 1 >= (1 << u16(i)) {
 			if code - 1 >= (1 << u16(i)) {
-				return E_Deflate.Huffman_Bad_Code_Lengths
+				return .Huffman_Bad_Code_Lengths
 			}
 			}
 		}
 		}
 		z.maxcode[i] = code << (HUFFMAN_MAX_BITS - uint(i))
 		z.maxcode[i] = code << (HUFFMAN_MAX_BITS - uint(i))
@@ -314,15 +314,15 @@ decode_huffman_slowpath :: proc(z: ^$C, t: ^Huffman_Table) -> (r: u16, err: Erro
 		s += 1
 		s += 1
 	}
 	}
 	if s >= 16 {
 	if s >= 16 {
-		return 0, E_Deflate.Bad_Huffman_Code
+		return 0, .Bad_Huffman_Code
 	}
 	}
 	// code size is s, so:
 	// code size is s, so:
 	b := (k >> (16-s)) - int(t.firstcode[s]) + int(t.firstsymbol[s])
 	b := (k >> (16-s)) - int(t.firstcode[s]) + int(t.firstsymbol[s])
 	if b >= size_of(t.size) {
 	if b >= size_of(t.size) {
-		return 0, E_Deflate.Bad_Huffman_Code
+		return 0, .Bad_Huffman_Code
 	}
 	}
 	if t.size[b] != s {
 	if t.size[b] != s {
-		return 0, E_Deflate.Bad_Huffman_Code
+		return 0, .Bad_Huffman_Code
 	}
 	}
 
 
 	compress.consume_bits_lsb(z, s)
 	compress.consume_bits_lsb(z, s)
@@ -335,11 +335,11 @@ decode_huffman_slowpath :: proc(z: ^$C, t: ^Huffman_Table) -> (r: u16, err: Erro
 decode_huffman :: proc(z: ^$C, t: ^Huffman_Table) -> (r: u16, err: Error) #no_bounds_check {
 decode_huffman :: proc(z: ^$C, t: ^Huffman_Table) -> (r: u16, err: Error) #no_bounds_check {
 	if z.num_bits < 16 {
 	if z.num_bits < 16 {
 		if z.num_bits > 63 {
 		if z.num_bits > 63 {
-			return 0, E_ZLIB.Code_Buffer_Malformed
+			return 0, .Code_Buffer_Malformed
 		}
 		}
 		compress.refill_lsb(z)
 		compress.refill_lsb(z)
 		if z.num_bits > 63 {
 		if z.num_bits > 63 {
-			return 0, E_General.Stream_Too_Short
+			return 0, .Stream_Too_Short
 		}
 		}
 	}
 	}
 	#no_bounds_check b := t.fast[z.code_buffer & ZFAST_MASK]
 	#no_bounds_check b := t.fast[z.code_buffer & ZFAST_MASK]
@@ -361,7 +361,7 @@ parse_huffman_block :: proc(z: ^$C, z_repeat, z_offset: ^Huffman_Table) -> (err:
 		if value < 256 {
 		if value < 256 {
 			e := write_byte(z, u8(value))
 			e := write_byte(z, u8(value))
 			if e != .None {
 			if e != .None {
-				return E_General.Output_Too_Short
+				return .Output_Too_Short
 			}
 			}
 		} else {
 		} else {
 			if value == 256 {
 			if value == 256 {
@@ -377,7 +377,7 @@ parse_huffman_block :: proc(z: ^$C, z_repeat, z_offset: ^Huffman_Table) -> (err:
 
 
 			value, e = decode_huffman(z, z_offset)
 			value, e = decode_huffman(z, z_offset)
 			if e != nil {
 			if e != nil {
-				return E_Deflate.Bad_Huffman_Code
+				return .Bad_Huffman_Code
 			}
 			}
 
 
 			distance := Z_DIST_BASE[value]
 			distance := Z_DIST_BASE[value]
@@ -387,7 +387,7 @@ parse_huffman_block :: proc(z: ^$C, z_repeat, z_offset: ^Huffman_Table) -> (err:
 
 
 			if z.bytes_written < i64(distance) {
 			if z.bytes_written < i64(distance) {
 				// Distance is longer than we've decoded so far.
 				// Distance is longer than we've decoded so far.
-				return E_Deflate.Bad_Distance
+				return .Bad_Distance
 			}
 			}
 
 
 			/*
 			/*
@@ -405,14 +405,14 @@ parse_huffman_block :: proc(z: ^$C, z_repeat, z_offset: ^Huffman_Table) -> (err:
 					c := z.output.buf[z.bytes_written - i64(distance)]
 					c := z.output.buf[z.bytes_written - i64(distance)]
 					e := repl_byte(z, length, c)
 					e := repl_byte(z, length, c)
 					if e != .None {
 					if e != .None {
-						return E_General.Output_Too_Short
+						return .Output_Too_Short
 					}
 					}
 				}
 				}
 			} else {
 			} else {
 				if length > 0 {
 				if length > 0 {
 					e := repl_bytes(z, length, distance)
 					e := repl_bytes(z, length, distance)
 					if e != .None {
 					if e != .None {
-						return E_General.Output_Too_Short
+						return .Output_Too_Short
 					}
 					}
 				}
 				}
 			}
 			}
@@ -432,25 +432,25 @@ inflate_from_context :: proc(using ctx: ^compress.Context_Memory_Input, raw := f
 	if !raw {
 	if !raw {
 		size, size_err := compress.input_size(ctx)
 		size, size_err := compress.input_size(ctx)
 		if size < 6 || size_err != nil {
 		if size < 6 || size_err != nil {
-			return E_General.Stream_Too_Short
+			return .Stream_Too_Short
 		}
 		}
 
 
 		cmf, _ := compress.read_u8(ctx)
 		cmf, _ := compress.read_u8(ctx)
 
 
 		method := Compression_Method(cmf & 0xf)
 		method := Compression_Method(cmf & 0xf)
 		if method != .DEFLATE {
 		if method != .DEFLATE {
-			return E_General.Unknown_Compression_Method
+			return .Unknown_Compression_Method
 		}
 		}
 
 
 		if cinfo := (cmf >> 4) & 0xf; cinfo > 7 {
 		if cinfo := (cmf >> 4) & 0xf; cinfo > 7 {
-			return E_ZLIB.Unsupported_Window_Size
+			return .Unsupported_Window_Size
 		}
 		}
 		flg, _ := compress.read_u8(ctx)
 		flg, _ := compress.read_u8(ctx)
 
 
 		fcheck := flg & 0x1f
 		fcheck := flg & 0x1f
 		fcheck_computed := (cmf << 8 | flg) & 0x1f
 		fcheck_computed := (cmf << 8 | flg) & 0x1f
 		if fcheck != fcheck_computed {
 		if fcheck != fcheck_computed {
-			return E_General.Checksum_Failed
+			return .Checksum_Failed
 		}
 		}
 
 
 		/*
 		/*
@@ -458,7 +458,7 @@ inflate_from_context :: proc(using ctx: ^compress.Context_Memory_Input, raw := f
 			They're application specific and PNG doesn't use them.
 			They're application specific and PNG doesn't use them.
 		*/
 		*/
 		if fdict := (flg >> 5) & 1; fdict != 0 {
 		if fdict := (flg >> 5) & 1; fdict != 0 {
-			return E_ZLIB.FDICT_Unsupported
+			return .FDICT_Unsupported
 		}
 		}
 
 
 		// flevel  := Compression_Level((flg >> 6) & 3);
 		// flevel  := Compression_Level((flg >> 6) & 3);
@@ -485,7 +485,7 @@ inflate_from_context :: proc(using ctx: ^compress.Context_Memory_Input, raw := f
 		output_hash := hash.adler32(ctx.output.buf[:])
 		output_hash := hash.adler32(ctx.output.buf[:])
 
 
 		if output_hash != u32(adler) {
 		if output_hash != u32(adler) {
-			return E_General.Checksum_Failed
+			return .Checksum_Failed
 		}
 		}
 	}
 	}
 	return nil
 	return nil
@@ -538,23 +538,24 @@ inflate_raw :: proc(z: ^$C, expected_output_size := -1, allocator := context.all
 		final = compress.read_bits_lsb(z, 1)
 		final = compress.read_bits_lsb(z, 1)
 		type  = compress.read_bits_lsb(z, 2)
 		type  = compress.read_bits_lsb(z, 2)
 
 
-		// fmt.printf("Final: %v | Type: %v\n", final, type);
+		// fmt.printf("Final: %v | Type: %v\n", final, type)
 
 
 		switch type {
 		switch type {
 		case 0:
 		case 0:
+			// fmt.printf("Method 0: STORED\n")
 			// Uncompressed block
 			// Uncompressed block
 
 
 			// Discard bits until next byte boundary
 			// Discard bits until next byte boundary
 			compress.discard_to_next_byte_lsb(z)
 			compress.discard_to_next_byte_lsb(z)
 
 
-			uncompressed_len := i16(compress.read_bits_lsb(z, 16))
-			length_check     := i16(compress.read_bits_lsb(z, 16))
+			uncompressed_len := u16(compress.read_bits_lsb(z, 16))
+			length_check     := u16(compress.read_bits_lsb(z, 16))
 
 
-			// fmt.printf("LEN: %v, ~LEN: %v, NLEN: %v, ~NLEN: %v\n", uncompressed_len, ~uncompressed_len, length_check, ~length_check);
+			// fmt.printf("LEN: %v, ~LEN: %v, NLEN: %v, ~NLEN: %v\n", uncompressed_len, ~uncompressed_len, length_check, ~length_check)
 
 
 
 
 			if ~uncompressed_len != length_check {
 			if ~uncompressed_len != length_check {
-				return E_Deflate.Len_Nlen_Mismatch
+				return .Len_Nlen_Mismatch
 			}
 			}
 
 
 			/*
 			/*
@@ -567,10 +568,12 @@ inflate_raw :: proc(z: ^$C, expected_output_size := -1, allocator := context.all
 				write_byte(z, u8(lit))
 				write_byte(z, u8(lit))
 				uncompressed_len -= 1
 				uncompressed_len -= 1
 			}
 			}
+			assert(uncompressed_len == 0)
+
 		case 3:
 		case 3:
-			return E_Deflate.BType_3
+			return .BType_3
 		case:
 		case:
-			// log.debugf("Err: %v | Final: %v | Type: %v\n", err, final, type);
+			// fmt.printf("Err: %v | Final: %v | Type: %v\n", err, final, type)
 			if type == 1 {
 			if type == 1 {
 				// Use fixed code lengths.
 				// Use fixed code lengths.
 				build_huffman(z_repeat, Z_FIXED_LENGTH[:]) or_return
 				build_huffman(z_repeat, Z_FIXED_LENGTH[:]) or_return
@@ -601,7 +604,7 @@ inflate_raw :: proc(z: ^$C, expected_output_size := -1, allocator := context.all
 					c = decode_huffman(z, codelength_ht) or_return
 					c = decode_huffman(z, codelength_ht) or_return
 
 
 					if c < 0 || c >= 19 {
 					if c < 0 || c >= 19 {
-						return E_Deflate.Huffman_Bad_Code_Lengths
+						return .Huffman_Bad_Code_Lengths
 					}
 					}
 					if c < 16 {
 					if c < 16 {
 						lencodes[n] = u8(c)
 						lencodes[n] = u8(c)
@@ -613,7 +616,7 @@ inflate_raw :: proc(z: ^$C, expected_output_size := -1, allocator := context.all
 						case 16:
 						case 16:
 							c = u16(compress.read_bits_no_refill_lsb(z, 2) + 3)
 							c = u16(compress.read_bits_no_refill_lsb(z, 2) + 3)
 							if n == 0 {
 							if n == 0 {
-								return E_Deflate.Huffman_Bad_Code_Lengths
+								return .Huffman_Bad_Code_Lengths
 							}
 							}
 							fill = lencodes[n - 1]
 							fill = lencodes[n - 1]
 						case 17:
 						case 17:
@@ -621,11 +624,11 @@ inflate_raw :: proc(z: ^$C, expected_output_size := -1, allocator := context.all
 						case 18:
 						case 18:
 							c = u16(compress.read_bits_no_refill_lsb(z, 7) + 11)
 							c = u16(compress.read_bits_no_refill_lsb(z, 7) + 11)
 						case:
 						case:
-								return E_Deflate.Huffman_Bad_Code_Lengths
+								return .Huffman_Bad_Code_Lengths
 						}
 						}
 
 
 						if ntot - n < u32(c) {
 						if ntot - n < u32(c) {
-							return E_Deflate.Huffman_Bad_Code_Lengths
+							return .Huffman_Bad_Code_Lengths
 						}
 						}
 
 
 						nc := n + u32(c)
 						nc := n + u32(c)
@@ -636,7 +639,7 @@ inflate_raw :: proc(z: ^$C, expected_output_size := -1, allocator := context.all
 				}
 				}
 
 
 				if n != ntot {
 				if n != ntot {
-					return E_Deflate.Huffman_Bad_Code_Lengths
+					return .Huffman_Bad_Code_Lengths
 				}
 				}
 
 
 				build_huffman(z_repeat, lencodes[:hlit])     or_return
 				build_huffman(z_repeat, lencodes[:hlit])     or_return
@@ -674,4 +677,4 @@ inflate_from_byte_array_raw :: proc(input: []u8, buf: ^bytes.Buffer, raw := fals
 	return inflate_raw(z=&ctx, expected_output_size=expected_output_size)
 	return inflate_raw(z=&ctx, expected_output_size=expected_output_size)
 }
 }
 
 
-inflate :: proc{inflate_from_context, inflate_from_byte_array};
+inflate :: proc{inflate_from_context, inflate_from_byte_array}

+ 0 - 216
core/container/array.odin

@@ -1,216 +0,0 @@
-package container
-
-import "core:mem"
-import "core:runtime"
-
-Array :: struct($T: typeid) {
-	data:      ^T,
-	len:       int,
-	cap:       int,
-	allocator: mem.Allocator,
-}
-
-ARRAY_DEFAULT_CAPACITY :: 16
-
-/*
-array_init :: proc {
-	array_init_none,
-	array_init_len,
-	array_init_len_cap,
-}
-array_init
-array_delete
-array_len
-array_cap
-array_space
-array_slice
-array_get
-array_get_ptr
-array_set
-array_reserve
-array_resize
-array_push = array_append :: proc{
-	array_push_back,
-	array_push_back_elems,
-}
-array_push_front
-array_pop_back
-array_pop_front
-array_consume
-array_trim
-array_clear
-array_clone
-array_set_capacity
-array_grow
-*/
-
-
-array_init_none :: proc(a: ^$A/Array, allocator := context.allocator) {
-	array_init_len_cap(a, 0, ARRAY_DEFAULT_CAPACITY, allocator)
-}
-array_init_len :: proc(a: ^$A/Array, len: int, allocator := context.allocator) {
-	array_init_len_cap(a, len, len, allocator)
-}
-array_init_len_cap :: proc(a: ^$A/Array($T), len: int, cap: int, allocator := context.allocator) {
-	a.allocator = allocator
-	a.data = (^T)(mem.alloc(size_of(T)*cap, align_of(T), a.allocator))
-	a.len = len
-	a.cap = cap
-}
-
-array_init :: proc{array_init_none, array_init_len, array_init_len_cap}
-
-array_delete :: proc(a: $A/Array) {
-	mem.free(a.data, a.allocator)
-}
-
-array_len :: proc(a: $A/Array) -> int {
-	return a.len
-}
-
-array_cap :: proc(a: $A/Array) -> int {
-	return a.cap
-}
-
-array_space :: proc(a: $A/Array) -> int {
-	return a.cap - a.len
-}
-
-array_slice :: proc(a: $A/Array($T)) -> []T {
-	s := mem.Raw_Slice{a.data, a.len}
-	return transmute([]T)s
-}
-
-array_cap_slice :: proc(a: $A/Array($T)) -> []T {
-	s := mem.Raw_Slice{a.data, a.cap}
-	return transmute([]T)s
-}
-
-array_get :: proc(a: $A/Array($T), index: int, loc := #caller_location) -> T {
-	runtime.bounds_check_error_loc(loc, index, array_len(a))
-	return (^T)(uintptr(a.data) + size_of(T)*uintptr(index))^
-}
-array_get_ptr :: proc(a: $A/Array($T), index: int, loc := #caller_location) -> ^T {
-	runtime.bounds_check_error_loc(loc, index, array_len(a))
-	return (^T)(uintptr(a.data) + size_of(T)*uintptr(index))
-}
-
-array_set :: proc(a: ^$A/Array($T), index: int, item: T, loc := #caller_location)  {
-	runtime.bounds_check_error_loc(loc, index, array_len(a^))
-	(^T)(uintptr(a.data) + size_of(T)*uintptr(index))^ = item
-}
-
-
-array_reserve :: proc(a: ^$A/Array, capacity: int) {
-	if capacity > a.len {
-		array_set_capacity(a, capacity)
-	}
-}
-
-array_resize :: proc(a: ^$A/Array, length: int) {
-	if length > a.len {
-		array_set_capacity(a, length)
-	}
-	a.len = length
-}
-
-
-
-array_push_back :: proc(a: ^$A/Array($T), item: T) {
-	if array_space(a^) == 0 {
-		array_grow(a)
-	}
-
-	a.len += 1
-	array_set(a, a.len-1, item)
-}
-
-array_push_front :: proc(a: ^$A/Array($T), item: T) {
-	if array_space(a^) == 0 {
-		array_grow(a)
-	}
-
-	a.len += 1
-	data := array_slice(a^)
-	copy(data[1:], data[:])
-	data[0] = item
-}
-
-array_pop_back :: proc(a: ^$A/Array($T), loc := #caller_location) -> T {
-	assert(condition=a.len > 0, loc=loc)
-	item := array_get(a^, a.len-1)
-	a.len -= 1
-	return item
-}
-
-array_pop_front :: proc(a: ^$A/Array($T), loc := #caller_location) -> T {
-	assert(condition=a.len > 0, loc=loc)
-	item := array_get(a^, 0)
-	s := array_slice(a^)
-	copy(s[:], s[1:])
-	a.len -= 1
-	return item
-}
-
-
-array_consume :: proc(a: ^$A/Array($T), count: int, loc := #caller_location) {
-	assert(condition=a.len >= count, loc=loc)
-	a.len -= count
-}
-
-
-array_trim :: proc(a: ^$A/Array($T)) {
-	array_set_capacity(a, a.len)
-}
-
-array_clear :: proc(a: ^$A/Array($T)) {
-	array_resize(a, 0)
-}
-
-array_clone :: proc(a: $A/Array($T), allocator := context.allocator) -> A {
-	res: A
-	array_init(&res, array_len(a), array_len(a), allocator)
-	copy(array_slice(res), array_slice(a))
-	return res
-}
-
-array_push_back_elems :: proc(a: ^$A/Array($T), items: ..T) {
-	if array_space(a^) < len(items) {
-		array_grow(a, a.len + len(items))
-	}
-	offset := a.len
-	data := array_cap_slice(a^)
-	n := copy(data[a.len:], items)
-	a.len += n
-}
-
-array_push   :: proc{array_push_back, array_push_back_elems}
-array_append :: proc{array_push_back, array_push_back_elems}
-
-array_set_capacity :: proc(a: ^$A/Array($T), new_capacity: int) {
-	if new_capacity == a.cap {
-		return
-	}
-
-	if new_capacity < a.len {
-		array_resize(a, new_capacity)
-	}
-
-	new_data: ^T
-	if new_capacity > 0 {
-		if a.allocator.procedure == nil {
-			a.allocator = context.allocator
-		}
-		new_data = (^T)(mem.alloc(size_of(T)*new_capacity, align_of(T), a.allocator))
-		if new_data != nil {
-			mem.copy(new_data, a.data, size_of(T)*a.len)
-		}
-	}
-	mem.free(a.data, a.allocator)
-	a.data = new_data
-	a.cap = new_capacity
-}
-array_grow :: proc(a: ^$A/Array, min_capacity: int = 0) {
-	new_capacity := max(array_len(a^)*2 + 8, min_capacity)
-	array_set_capacity(a, new_capacity)
-}

+ 239 - 0
core/container/bit_array/bit_array.odin

@@ -0,0 +1,239 @@
+package dynamic_bit_array
+
+import "core:intrinsics"
+import "core:mem"
+
+/*
+	Note that these constants are dependent on the backing being a u64.
+*/
+@(private="file")
+INDEX_SHIFT :: 6
+
+@(private="file")
+INDEX_MASK  :: 63
+
+@(private="file")
+NUM_BITS :: 64
+
+Bit_Array :: struct {
+	bits:         [dynamic]u64,
+	bias:         int,
+	max_index:    int,
+	free_pointer: bool,
+}
+
+Bit_Array_Iterator :: struct {
+	array:    ^Bit_Array,
+	word_idx: int,
+	bit_idx:  uint,
+}
+
+/*
+	In:
+		- ba:   ^Bit_Array - the array to iterate over
+
+	Out:
+		- it:   ^Bit_Array_Iterator - the iterator that holds iteration state
+*/
+make_iterator :: proc (ba: ^Bit_Array) -> (it: Bit_Array_Iterator) {
+	return Bit_Array_Iterator { array = ba }
+}
+
+/*
+	In:
+		- it:    ^Bit_Array_Iterator - the iterator struct that holds the state.
+
+	Out:
+		- set:    bool - the state of the bit at `index`
+		- index:  int - the next bit of the Bit_Array referenced by `it`.
+		- ok:	  bool - `true` if the iterator returned a valid index,
+			  `false` if there were no more bits
+*/
+iterate_by_all :: proc (it: ^Bit_Array_Iterator) -> (set: bool, index: int, ok: bool) {
+	index = it.word_idx * NUM_BITS + int(it.bit_idx) + it.array.bias
+	if index > it.array.max_index { return false, 0, false }
+
+	word := it.array.bits[it.word_idx] if len(it.array.bits) > it.word_idx else 0
+	set = (word >> it.bit_idx & 1) == 1
+
+	it.bit_idx += 1
+	if it.bit_idx >= NUM_BITS {
+		it.bit_idx = 0
+		it.word_idx += 1
+	}
+
+	return set, index, true
+}
+
+/*
+	In:
+		- it:     ^Bit_Array_Iterator - the iterator struct that holds the state.
+
+	Out:
+		- index:  int - the next set bit of the Bit_Array referenced by `it`.
+		- ok:	  bool - `true` if the iterator returned a valid index,
+			  `false` if there were no more bits set
+*/
+iterate_by_set :: proc (it: ^Bit_Array_Iterator) -> (index: int, ok: bool) {
+	return iterate_internal_(it, true)
+}
+
+/*
+	In:
+		- it:	  ^Bit_Array_Iterator - the iterator struct that holds the state.
+
+	Out:
+		- index:  int - the next unset bit of the Bit_Array referenced by `it`.
+		- ok:	  bool - `true` if the iterator returned a valid index,
+			  `false` if there were no more unset bits
+*/
+iterate_by_unset:: proc (it: ^Bit_Array_Iterator) -> (index: int, ok: bool) {
+	return iterate_internal_(it, false)
+}
+
+@(private="file")
+iterate_internal_ :: proc (it: ^Bit_Array_Iterator, $ITERATE_SET_BITS: bool) -> (index: int, ok: bool) {
+	word := it.array.bits[it.word_idx] if len(it.array.bits) > it.word_idx else 0
+	when ! ITERATE_SET_BITS { word = ~word }
+
+	// if the word is empty or we have already gone over all the bits in it,
+	// b.bit_idx is greater than the index of any set bit in the word,
+	// meaning that word >> b.bit_idx == 0.
+	for it.word_idx < len(it.array.bits) && word >> it.bit_idx == 0 {
+		it.word_idx += 1
+		it.bit_idx = 0
+		word = it.array.bits[it.word_idx] if len(it.array.bits) > it.word_idx else 0
+		when ! ITERATE_SET_BITS { word = ~word }
+	}
+
+	// if we are iterating the set bits, reaching the end of the array means we have no more bits to check
+	when ITERATE_SET_BITS {
+		if it.word_idx >= len(it.array.bits) {
+			return 0, false
+		}
+	}
+
+	// reaching here means that the word has some set bits
+	it.bit_idx += uint(intrinsics.count_trailing_zeros(word >> it.bit_idx))
+	index = it.word_idx * NUM_BITS + int(it.bit_idx) + it.array.bias
+
+	it.bit_idx += 1
+	if it.bit_idx >= NUM_BITS {
+		it.bit_idx = 0
+		it.word_idx += 1
+	}
+	return index, index <= it.array.max_index
+}
+
+
+/*
+	In:
+		- ba:    ^Bit_Array - a pointer to the Bit Array
+		- index: The bit index. Can be an enum member.
+
+	Out:
+		- res:   The bit you're interested in.
+		- ok:    Whether the index was valid. Returns `false` if the index is smaller than the bias.
+
+	The `ok` return value may be ignored.
+*/
+get :: proc(ba: ^Bit_Array, #any_int index: uint, allocator := context.allocator) -> (res: bool, ok: bool) {
+	idx := int(index) - ba.bias
+
+	if ba == nil || int(index) < ba.bias { return false, false }
+	context.allocator = allocator
+
+	leg_index := idx >> INDEX_SHIFT
+	bit_index := idx &  INDEX_MASK
+
+	/*
+		If we `get` a bit that doesn't fit in the Bit Array, it's naturally `false`.
+		This early-out prevents unnecessary resizing.
+	*/
+	if leg_index + 1 > len(ba.bits) { return false, true }
+
+	val := u64(1 << uint(bit_index))
+	res = ba.bits[leg_index] & val == val
+
+	return res, true
+}
+
+/*
+	In:
+		- ba:    ^Bit_Array - a pointer to the Bit Array
+		- index: The bit index. Can be an enum member.
+
+	Out:
+		- ok:    Whether or not we managed to set requested bit.
+
+	`set` automatically resizes the Bit Array to accommodate the requested index if needed.
+*/
+set :: proc(ba: ^Bit_Array, #any_int index: uint, allocator := context.allocator) -> (ok: bool) {
+
+	idx := int(index) - ba.bias
+
+	if ba == nil || int(index) < ba.bias { return false }
+	context.allocator = allocator
+
+	leg_index := idx >> INDEX_SHIFT
+	bit_index := idx &  INDEX_MASK
+
+	resize_if_needed(ba, leg_index) or_return
+
+	ba.max_index = max(idx, ba.max_index)
+	ba.bits[leg_index] |= 1 << uint(bit_index)
+	return true
+}
+
+/*
+	A helper function to create a Bit Array with optional bias, in case your smallest index is non-zero (including negative).
+*/
+create :: proc(max_index: int, min_index := 0, allocator := context.allocator) -> (res: ^Bit_Array, ok: bool) #optional_ok {
+	context.allocator = allocator
+	size_in_bits := max_index - min_index
+
+	if size_in_bits < 1 { return {}, false }
+
+	legs := size_in_bits >> INDEX_SHIFT
+
+	res = new(Bit_Array)
+	res.bias         = min_index
+	res.max_index    = max_index
+	res.free_pointer = true
+	return res, resize_if_needed(res, legs)
+}
+
+/*
+	Sets all bits to `false`.
+*/
+clear :: proc(ba: ^Bit_Array) {
+	if ba == nil { return }
+	mem.zero_slice(ba.bits[:])
+}
+
+/*
+	Releases the memory used by the Bit Array.
+*/
+destroy :: proc(ba: ^Bit_Array) {
+	if ba == nil { return }
+	delete(ba.bits)
+	if ba.free_pointer { // Only free if this Bit_Array was created using `create`, not when on the stack.
+		free(ba)
+	}
+}
+
+/*
+	Resizes the Bit Array. For internal use.
+	If you want to reserve the memory for a given-sized Bit Array up front, you can use `create`.
+*/
+@(private="file")
+resize_if_needed :: proc(ba: ^Bit_Array, legs: int, allocator := context.allocator) -> (ok: bool) {
+	if ba == nil { return false }
+
+	context.allocator = allocator
+
+	if legs + 1 > len(ba.bits) {
+		resize(&ba.bits, legs + 1)
+	}
+	return len(ba.bits) > legs
+}

+ 53 - 0
core/container/bit_array/doc.odin

@@ -0,0 +1,53 @@
+package dynamic_bit_array
+
+/*
+	The Bit Array can be used in several ways:
+
+	-- By default you don't need to instantiate a Bit Array:
+
+		package test
+
+		import "core:fmt"
+		import "core:container/bit_array"
+
+		main :: proc() {
+			using bit_array
+
+			bits: Bit_Array
+
+			// returns `true`
+			fmt.println(set(&bits, 42))
+
+			// returns `false`, `false`, because this Bit Array wasn't created to allow negative indices.
+			was_set, was_retrieved := get(&bits, -1)
+			fmt.println(was_set, was_retrieved) 
+			destroy(&bits)
+		}
+
+	-- A Bit Array can optionally allow for negative indices, if the mininum value was given during creation:
+
+		package test
+
+		import "core:fmt"
+		import "core:container/bit_array"
+
+		main :: proc() {
+			Foo :: enum int {
+				Negative_Test = -42,
+				Bar           = 420,
+				Leaves        = 69105,
+			}
+
+			using bit_array
+
+			bits := create(int(max(Foo)), int(min(Foo)))
+			defer destroy(bits)
+
+			fmt.printf("Set(Bar):           %v\n",     set(bits, Foo.Bar))
+			fmt.printf("Get(Bar):           %v, %v\n", get(bits, Foo.Bar))
+			fmt.printf("Set(Negative_Test): %v\n",     set(bits, Foo.Negative_Test))
+			fmt.printf("Get(Leaves):        %v, %v\n", get(bits, Foo.Leaves))
+			fmt.printf("Get(Negative_Test): %v, %v\n", get(bits, Foo.Negative_Test))
+			fmt.printf("Freed.\n")
+		}
+*/

+ 0 - 80
core/container/bloom_filter.odin

@@ -1,80 +0,0 @@
-package container
-
-import "core:mem"
-
-Bloom_Hash_Proc :: #type proc(data: []byte) -> u32
-
-Bloom_Hash :: struct {
-	hash_proc: Bloom_Hash_Proc,
-	next:     ^Bloom_Hash,
-}
-
-Bloom_Filter :: struct {
-	allocator: mem.Allocator,
-	hash:      ^Bloom_Hash,
-	bits:      []byte,
-}
-
-bloom_filter_init :: proc(b: ^Bloom_Filter, size: int, allocator := context.allocator) {
-	b.allocator = allocator
-	b.bits = make([]byte, size, allocator)
-}
-
-bloom_filter_destroy :: proc(b: ^Bloom_Filter) {
-	context.allocator = b.allocator
-	delete(b.bits)
-	for b.hash != nil {
-		hash := b.hash
-		b.hash = b.hash.next
-		free(hash)
-	}
-}
-
-bloom_filter_add_hash_proc :: proc(b: ^Bloom_Filter, hash_proc: Bloom_Hash_Proc) {
-	context.allocator = b.allocator
-	h := new(Bloom_Hash)
-	h.hash_proc = hash_proc
-
-	head := &b.hash
-	for head^ != nil {
-		head = &(head^.next)
-	}
-	head^ = h
-}
-
-bloom_filter_add :: proc(b: ^Bloom_Filter, item: []byte) {
-	#no_bounds_check for h := b.hash; h != nil; h = h.next {
-		hash := h.hash_proc(item)
-		hash %= u32(len(b.bits) * 8)
-		b.bits[hash >> 3] |= 1 << (hash & 3)
-	}
-}
-
-bloom_filter_add_string :: proc(b: ^Bloom_Filter, item: string) {
-	bloom_filter_add(b, transmute([]byte)item)
-}
-
-bloom_filter_add_raw :: proc(b: ^Bloom_Filter, data: rawptr, size: int) {
-	item := mem.slice_ptr((^byte)(data), size)
-	bloom_filter_add(b, item)
-}
-
-bloom_filter_test :: proc(b: ^Bloom_Filter, item: []byte) -> bool {
-	#no_bounds_check for h := b.hash; h != nil; h = h.next {
-		hash := h.hash_proc(item)
-		hash %= u32(len(b.bits) * 8)
-		if (b.bits[hash >> 3] & (1 << (hash & 3)) == 0) {
-			return false
-		}
-	}
-	return true
-}
-
-bloom_filter_test_string :: proc(b: ^Bloom_Filter, item: string) -> bool {
-	return bloom_filter_test(b, transmute([]byte)item)
-}
-
-bloom_filter_test_raw :: proc(b: ^Bloom_Filter, data: rawptr, size: int) -> bool {
-	item := mem.slice_ptr((^byte)(data), size)
-	return bloom_filter_test(b, item)
-}

+ 201 - 0
core/container/lru/lru_cache.odin

@@ -0,0 +1,201 @@
+package container_lru
+
+import "core:runtime"
+import "core:intrinsics"
+_ :: runtime
+_ :: intrinsics
+
+Node :: struct($Key, $Value: typeid) where intrinsics.type_is_valid_map_key(Key) {
+	prev, next: ^Node(Key, Value),
+	key:   Key,
+	value: Value,
+}
+
+// Cache is an LRU cache. It automatically removes entries as new entries are
+// added if the capacity is reached. Entries are removed based on how recently
+// they were used where the oldest entries are removed first.
+Cache :: struct($Key, $Value: typeid) where intrinsics.type_is_valid_map_key(Key) {
+	head: ^Node(Key, Value),
+	tail: ^Node(Key, Value),
+
+	entries: map[Key]^Node(Key, Value),
+
+	count:    int,
+	capacity: int,
+
+	node_allocator: runtime.Allocator,
+
+	on_remove: proc(key: Key, value: Value, user_data: rawptr),
+	on_remove_user_data: rawptr,
+}
+
+// init initializes a Cache
+init :: proc(c: ^$C/Cache($Key, $Value), capacity: int, entries_allocator := context.allocator, node_allocator := context.allocator) {
+	c.entries.allocator = entries_allocator
+	c.node_allocator = node_allocator
+	c.capacity = capacity
+}
+
+// destroy deinitializes a Cachem
+destroy :: proc(c: ^$C/Cache($Key, $Value), call_on_remove: bool) {
+	clear(c, call_on_remove)
+	delete(c.entries)
+}
+
+// clear the contents of a Cache
+clear :: proc(c: ^$C/Cache($Key, $Value), call_on_remove: bool) {
+	for _, node in c.entries {
+		if call_on_remove {
+			_call_on_remove(c, node)
+		}
+		free(node, c.node_allocator)
+	}
+	runtime.clear(&c.entries)
+	c.head = nil
+	c.tail = nil
+	c.count = 0
+}
+
+// set the given key value pair. This operation updates the recent usage of the item.
+set :: proc(c: ^$C/Cache($Key, $Value), key: Key, value: Value) -> runtime.Allocator_Error {
+	if e, ok := c.entries[key]; ok {
+		e.value = value
+		_pop_node(c, e)
+		_push_front_node(c, e)
+		return nil
+	}
+
+	e : ^Node(Key, Value) = nil
+	assert(c.count <= c.capacity)
+	if c.count == c.capacity {
+		e = c.tail
+		_remove_node(c, e)
+	}
+	else {
+		c.count += 1
+		e = new(Node(Key, Value), c.node_allocator) or_return
+	}
+
+	e.key = key
+	e.value = value
+	_push_front_node(c, e)
+	c.entries[key] = e
+
+	return nil
+}
+
+// get a value from the cache from a given key. This operation updates the usage of the item.
+get :: proc(c: ^$C/Cache($Key, $Value), key: Key) -> (value: Value, ok: bool) #optional_ok {
+	e: ^Node(Key, Value)
+	e, ok = c.entries[key]
+	if !ok {
+		return
+	}
+	_pop_node(c, e)
+	_push_front_node(c, e)
+	return e.value, true
+}
+
+// get_ptr gets the pointer to a value the cache from a given key. This operation updates the usage of the item.
+get_ptr :: proc(c: ^$C/Cache($Key, $Value), key: Key) -> (value: ^Value, ok: bool) #optional_ok {
+	e: ^Node(Key, Value)
+	e, ok = c.entries[key]
+	if !ok {
+		return
+	}
+	_pop_node(c, e)
+	_push_front_node(c, e)
+	return &e.value, true
+}
+
+// peek gets the value from the cache from a given key without updating the recent usage.
+peek :: proc(c: ^$C/Cache($Key, $Value), key: Key) -> (value: Value, ok: bool) #optional_ok {
+	e: ^Node(Key, Value)
+	e, ok = c.entries[key]
+	if !ok {
+		return
+	}
+	return e.value, true
+}
+
+// exists checks for the existence of a value from a given key without updating the recent usage.
+exists :: proc(c: ^$C/Cache($Key, $Value), key: Key) -> bool {
+	return key in c.entries
+}
+
+// remove removes an item from the cache.
+remove :: proc(c: ^$C/Cache($Key, $Value), key: Key) -> bool {
+	e, ok := c.entries[key]
+	if !ok {
+		return false
+	}
+	_remove_node(c, e)
+	free(node, c.node_allocator)
+	c.count -= 1
+	return true
+}
+
+
+@(private)
+_remove_node :: proc(c: ^$C/Cache($Key, $Value), node: ^Node(Key, Value)) {
+	if c.head == node {
+		c.head = node.next
+	}
+	if c.tail == node {
+		c.tail = node.prev
+	}
+	if node.prev != nil {
+		node.prev.next = node.next
+	}
+	if node.next != nil {
+		node.next.prev = node.prev
+	}
+	node.prev = nil
+	node.next = nil
+
+	delete_key(&c.entries, node.key)
+
+	_call_on_remove(c, node)
+}
+
+@(private)
+_call_on_remove :: proc(c: ^$C/Cache($Key, $Value), node: ^Node(Key, Value)) {
+	if c.on_remove != nil {
+		c.on_remove(node.key, node.value, c.on_remove_user_data)
+	}
+}
+
+@(private)
+_push_front_node :: proc(c: ^$C/Cache($Key, $Value), e: ^Node(Key, Value)) {
+	if c.head != nil {
+		e.next = c.head
+		e.next.prev = e
+	}
+	c.head = e
+	if c.tail == nil {
+		c.tail = e
+	}
+	e.prev = nil
+}
+
+@(private)
+_pop_node :: proc(c: ^$C/Cache($Key, $Value), e: ^Node(Key, Value)) {
+	if e == nil {
+		return
+	}
+	if c.head == e {
+		c.head = e.next
+	}
+	if c.tail == e {
+		c.tail = e.prev
+	}
+	if e.prev != nil {
+		e.prev.next = e.next
+	}
+
+	if e.next != nil {
+		e.next.prev = e.prev
+	}
+	e.prev = nil
+	e.next = nil
+}

+ 0 - 377
core/container/map.odin

@@ -1,377 +0,0 @@
-package container
-
-import "core:intrinsics"
-_ :: intrinsics
-
-
-Map :: struct($Key, $Value: typeid) where intrinsics.type_is_valid_map_key(Key) {
-	hash: Array(int),
-	entries: Array(Map_Entry(Key, Value)),
-}
-
-Map_Entry :: struct($Key, $Value: typeid) where intrinsics.type_is_valid_map_key(Key) {
-	hash:  uintptr,
-	next:  int,
-	key:   Key,
-	value: Value,
-}
-
-
-/*
-map_init :: proc{
-	map_init_none,
-	map_init_cap,
-}
-map_delete
-
-map_has
-map_get
-map_get_default
-map_get_ptr
-map_set
-map_remove
-map_reserve
-map_clear
-
-// Multi Map
-
-multi_map_find_first
-multi_map_find_next
-multi_map_count
-multi_map_get :: proc{
-	multi_map_get_array,
-	multi_map_get_slice,
-};
-multi_map_get_as_slice
-multi_map_insert
-multi_map_remove
-multi_map_remove_all
-
-*/
-
-map_init :: proc{map_init_none, map_init_cap}
-
-map_init_none :: proc(m: ^$M/Map($Key, $Value), allocator := context.allocator) {
-	m.hash.allocator = allocator
-	m.entries.allocator = allocator
-}
-
-map_init_cap :: proc(m: ^$M/Map($Key, $Value), cap: int, allocator := context.allocator) {
-	m.hash.allocator = allocator
-	m.entries.allocator = allocator
-	map_reserve(m, cap)
-}
-
-map_delete :: proc(m: $M/Map($Key, $Value)) {
-	array_delete(m.hash)
-	array_delete(m.entries)
-}
-
-
-map_has :: proc(m: $M/Map($Key, $Value), key: Key) -> bool {
-	return _map_find_or_fail(m, key) >= 0
-}
-
-map_get :: proc(m: $M/Map($Key, $Value), key: Key) -> (res: Value, ok: bool) #optional_ok {
-	i := _map_find_or_fail(m, key)
-	if i < 0 {
-		return {}, false
-	}
-	return array_get(m.entries, i).value, true
-}
-
-map_get_default :: proc(m: $M/Map($Key, $Value), key: Key, default: Value) -> (res: Value, ok: bool) #optional_ok {
-	i := _map_find_or_fail(m, key)
-	if i < 0 {
-		return default, false
-	}
-	return array_get(m.entries, i).value, true
-}
-
-map_get_ptr :: proc(m: $M/Map($Key, $Value), key: Key) -> ^Value {
-	i := _map_find_or_fail(m, key)
-	if i < 0 {
-		return nil
-	}
-	return array_get_ptr(m.entries, i).value
-}
-
-map_set :: proc(m: ^$M/Map($Key, $Value), key: Key, value: Value) {
-	if array_len(m.hash) == 0 {
-		_map_grow(m)
-	}
-
-	i := _map_find_or_make(m, key)
-	array_get_ptr(m.entries, i).value = value
-	if _map_full(m^) {
-		_map_grow(m)
-	}
-}
-
-map_remove :: proc(m: ^$M/Map($Key, $Value), key: Key) {
-	fr := _map_find_key(m^, key)
-	if fr.entry_index >= 0 {
-		_map_erase(m, fr)
-	}
-}
-
-
-map_reserve :: proc(m: ^$M/Map($Key, $Value), new_size: int) {
-	nm: M
-	map_init(&nm, m.hash.allocator)
-	array_resize(&nm.hash, new_size)
-	array_reserve(&nm.entries, array_len(m.entries))
-
-	for i in 0..<new_size {
-		array_set(&nm.hash, i, -1)
-	}
-	for i in 0..<array_len(m.entries) {
-		e := array_get(m.entries, i)
-		multi_map_insert(&nm, e.key, e.value)
-	}
-
-	map_delete(m^)
-	m^ = nm
-}
-
-map_clear :: proc(m: ^$M/Map($Key, $Value)) {
-	array_clear(&m.hash)
-	array_clear(&m.entries)
-}
-
-
-
-multi_map_find_first :: proc(m: $M/Map($Key, $Value), key: Key) -> ^Map_Entry(Key, Value) {
-	i := _map_find_or_fail(m, key)
-	if i < 0 {
-		return nil
-	}
-	return array_get_ptr(m.entries, i)
-}
-
-multi_map_find_next :: proc(m: $M/Map($Key, $Value), e: ^Map_Entry(Key, Value)) -> ^Map_Entry(Key, Value) {
-	i := e.next
-	for i >= 0 {
-		it := array_get_ptr(m.entries, i)
-		if it.hash == e.hash && it.key == e.key {
-			return it
-		}
-		i = it.next
-	}
-	return nil
-}
-
-multi_map_count :: proc(m: $M/Map($Key, $Value), key: Key) -> int {
-	n := 0
-	e := multi_map_find_first(m, key)
-	for e != nil {
-		n += 1
-		e = multi_map_find_next(m, e)
-	}
-	return n
-}
-
-multi_map_get :: proc{multi_map_get_array, multi_map_get_slice}
-
-multi_map_get_array :: proc(m: $M/Map($Key, $Value), key: Key, items: ^Array(Value)) {
-	if items == nil {
-		return
-	}
-	e := multi_map_find_first(m, key)
-	for e != nil {
-		array_append(items, e.value)
-		e = multi_map_find_next(m, e)
-	}
-}
-
-multi_map_get_slice :: proc(m: $M/Map($Key, $Value), key: Key, items: []Value) {
-	e := multi_map_find_first(m, key)
-	i := 0
-	for e != nil && i < len(items) {
-		items[i] = e.value
-		i += 1
-		e = multi_map_find_next(m, e)
-	}
-}
-
-multi_map_get_as_slice :: proc(m: $M/Map($Key, $Value), key: Key) -> []Value {
-	items: Array(Value)
-	array_init(&items, 0)
-
-	e := multi_map_find_first(m, key)
-	for e != nil {
-		array_append(&items, e.value)
-		e = multi_map_find_next(m, e)
-	}
-
-	return array_slice(items)
-}
-
-
-multi_map_insert :: proc(m: ^$M/Map($Key, $Value), key: Key, value: Value) {
-	if array_len(m.hash) == 0 {
-		_map_grow(m)
-	}
-
-	i := _map_make(m, key)
-	array_get_ptr(m.entries, i).value = value
-	if _map_full(m^) {
-		_map_grow(m)
-	}
-}
-
-multi_map_remove :: proc(m: ^$M/Map($Key, $Value), e: ^Map_Entry(Key, Value)) {
-	fr := _map_find_entry(m, e)
-	if fr.entry_index >= 0 {
-		_map_erase(m, fr)
-	}
-}
-
-multi_map_remove_all :: proc(m: ^$M/Map($Key, $Value), key: Key) {
-	for map_exist(m^, key) {
-		map_remove(m, key)
-	}
-}
-
-
-/// Internal
-
-
-Map_Find_Result :: struct {
-	hash_index:  int,
-	entry_prev:  int,
-	entry_index: int,
-}
-
-_map_add_entry :: proc(m: ^$M/Map($Key, $Value), key: Key) -> int where intrinsics.type_is_valid_map_key(Key) {
-	hasher := intrinsics.type_hasher_proc(Key)
-
-	e: Map_Entry(Key, Value)
-	e.key = key
-	e.hash = hasher(&e.key, 0)
-	e.next = -1
-	idx := array_len(m.entries)
-	array_push(&m.entries, e)
-	return idx
-}
-
-_map_erase :: proc(m: ^$M/Map, fr: Map_Find_Result) {
-	if fr.entry_prev < 0 {
-		array_set(&m.hash, fr.hash_index, array_get(m.entries, fr.entry_index).next)
-	} else {
-		array_get_ptr(m.entries, fr.entry_prev).next = array_get(m.entries, fr.entry_index).next
-	}
-
-	if fr.entry_index == array_len(m.entries)-1 {
-		array_pop_back(&m.entries)
-		return
-	}
-
-	array_set(&m.entries, fr.entry_index, array_get(m.entries, array_len(m.entries)-1))
-	last := _map_find_key(m^, array_get(m.entries, fr.entry_index).key)
-
-	if last.entry_prev < 0 {
-		array_get_ptr(m.entries, last.entry_prev).next = fr.entry_index
-	} else {
-		array_set(&m.hash, last.hash_index, fr.entry_index)
-	}
-}
-
-
-_map_find_key :: proc(m: $M/Map($Key, $Value), key: Key) -> Map_Find_Result where intrinsics.type_is_valid_map_key(Key) {
-	fr: Map_Find_Result
-	fr.hash_index = -1
-	fr.entry_prev = -1
-	fr.entry_index = -1
-
-	if array_len(m.hash) == 0 {
-		return fr
-	}
-
-	hasher := intrinsics.type_hasher_proc(Key)
-
-	key := key
-	hash := hasher(&key, 0)
-
-	fr.hash_index = int(hash % uintptr(array_len(m.hash)))
-	fr.entry_index = array_get(m.hash, fr.hash_index)
-	for fr.entry_index >= 0 {
-		it := array_get_ptr(m.entries, fr.entry_index)
-		if it.hash == hash && it.key == key {
-			return fr
-		}
-		fr.entry_prev = fr.entry_index
-		fr.entry_index = it.next
-	}
-	return fr
-}
-
-_map_find_entry :: proc(m: ^$M/Map($Key, $Value), e: ^Map_Entry(Key, Value)) -> Map_Find_Result {
-	fr: Map_Find_Result
-	fr.hash_index = -1
-	fr.entry_prev = -1
-	fr.entry_index = -1
-
-	if array_len(m.hash) == 0 {
-		return fr
-	}
-
-	fr.hash_index = int(e.hash % uintptr(array_len(m.hash)))
-	fr.entry_index = array_get(m.hash, fr.hash_index)
-	for fr.entry_index >= 0 {
-		it := array_get_ptr(m.entries, fr.entry_index)
-		if it == e {
-			return fr
-		}
-		fr.entry_prev = fr.entry_index
-		fr.entry_index = it.next
-	}
-	return fr
-}
-
-_map_find_or_fail :: proc(m: $M/Map($Key, $Value), key: Key) -> int {
-	return _map_find_key(m, key).entry_index
-}
-_map_find_or_make :: proc(m: ^$M/Map($Key, $Value), key: Key) -> int {
-	fr := _map_find_key(m^, key)
-	if fr.entry_index >= 0 {
-		return fr.entry_index
-	}
-
-	i := _map_add_entry(m, key)
-	if fr.entry_prev < 0 {
-		array_set(&m.hash, fr.hash_index, i)
-	} else {
-		array_get_ptr(m.entries, fr.entry_prev).next = i
-	}
-	return i
-}
-
-
-_map_make :: proc(m: ^$M/Map($Key, $Value), key: Key) -> int {
-	fr := _map_find_key(m^, key)
-	i := _map_add_entry(m, key)
-
-	if fr.entry_prev < 0 {
-		array_set(&m.hash, fr.hash_index, i)
-	} else {
-		array_get_ptr(m.entries, fr.entry_prev).next = i
-	}
-
-	array_get_ptr(m.entries, i).next = fr.entry_index
-
-	return i
-}
-
-
-_map_full :: proc(m: $M/Map($Key, $Value)) -> bool {
-	// TODO(bill): Determine good max load factor
-	return array_len(m.entries) >= (array_len(m.hash) / 4)*3
-}
-
-_map_grow :: proc(m: ^$M/Map($Key, $Value)) {
-	new_size := array_len(m.entries) * 4 + 7 // TODO(bill): Determine good grow rate
-	map_reserve(m, new_size)
-}
-
-

+ 0 - 121
core/container/priority_queue.odin

@@ -1,121 +0,0 @@
-package container
-
-Priority_Queue :: struct($T: typeid) {
-	data: Array(T),
-	len: int,
-	priority: proc(item: T) -> int,
-}
-
-priority_queue_init_none :: proc(q: ^$Q/Priority_Queue($T), f: proc(item: T) -> int, allocator := context.allocator) {
-	queue_init_len(q, f, 0, allocator)
-}
-priority_queue_init_len :: proc(q: ^$Q/Priority_Queue($T), f: proc(item: T) -> int, len: int, allocator := context.allocator) {
-	queue_init_len_cap(q, f, 0, 16, allocator)
-}
-priority_queue_init_len_cap :: proc(q: ^$Q/Priority_Queue($T), f: proc(item: T) -> int, len: int, cap: int, allocator := context.allocator) {
-	array_init(&q.data, len, cap, allocator)
-	q.len = len
-	q.priority = f
-}
-
-priority_queue_init :: proc{priority_queue_init_none, priority_queue_init_len, priority_queue_init_len_cap}
-
-
-priority_queue_delete :: proc(q: $Q/Priority_Queue($T)) {
-	array_delete(q.data)
-}
-
-priority_queue_clear :: proc(q: ^$Q/Priority_Queue($T)) {
-	q.len = 0
-}
-
-priority_queue_len :: proc(q: $Q/Priority_Queue($T)) -> int {
-	return q.len
-}
-
-priority_queue_cap :: proc(q: $Q/Priority_Queue($T)) -> int {
-	return array_cap(q.data)
-}
-
-priority_queue_space :: proc(q: $Q/Priority_Queue($T)) -> int {
-	return array_len(q.data) - q.len
-}
-
-priority_queue_reserve :: proc(q: ^$Q/Priority_Queue($T), capacity: int) {
-	if capacity > q.len {
-		array_resize(&q.data, new_capacity)
-	}
-}
-
-priority_queue_resize :: proc(q: ^$Q/Priority_Queue($T), length: int) {
-	if length > q.len {
-		array_resize(&q.data, new_capacity)
-	}
-	q.len = length
-}
-
-_priority_queue_grow :: proc(q: ^$Q/Priority_Queue($T), min_capacity: int = 0) {
-	new_capacity := max(array_len(q.data)*2 + 8, min_capacity)
-	array_resize(&q.data, new_capacity)
-}
-
-
-priority_queue_push :: proc(q: ^$Q/Priority_Queue($T), item: T) {
-	if array_len(q.data) - q.len == 0 {
-		_priority_queue_grow(q)
-	}
-
-	s := array_slice(q.data)
-	s[q.len] = item
-
-	i := q.len
-	for i > 0 {
-		p := (i - 1) / 2
-		if q.priority(s[p]) <= q.priority(item) { 
-			break 
-		}
-		s[i] = s[p]
-		i = p
-	}
-
-	q.len += 1
-	if q.len > 0 { 
-		s[i] = item 
-	} 
-}
-
-
-
-priority_queue_pop :: proc(q: ^$Q/Priority_Queue($T)) -> T {
-	assert(q.len > 0)
-
-	s := array_slice(q.data)
-	min := s[0]
-	root := s[q.len-1]
-	q.len -= 1
-
-	i := 0
-	for i * 2 + 1 < q.len {
-		a := i * 2 + 1
-		b := i * 2 + 2
-		c := b < q.len && q.priority(s[b]) < q.priority(s[a]) ? b : a
-
-		if q.priority(s[c]) >= q.priority(root) {
-			break
-		}
-		s[i] = s[c]
-		i = c
-	}
-
-	if q.len > 0 {
-		s[i] = root
-	}
-	return min
-}
-
-priority_queue_peek :: proc(q: ^$Q/Priority_Queue($T)) -> T {
-	assert(q.len > 0)
-
-	s := array_slice(q.data)
-	return s[0]
-}

+ 143 - 0
core/container/priority_queue/priority_queue.odin

@@ -0,0 +1,143 @@
+package container_priority_queue
+
+import "core:builtin"
+
+Priority_Queue :: struct($T: typeid) {
+	queue: [dynamic]T,
+	
+	less:  proc(a, b: T) -> bool,
+	swap:  proc(q: []T, i, j: int),
+}
+
+DEFAULT_CAPACITY :: 16
+
+default_swap_proc :: proc($T: typeid) -> proc(q: []T, i, j: int) {
+	return proc(q: []T, i, j: int) {
+		q[i], q[j] = q[j], q[i]
+	}
+}
+
+init :: proc(pq: ^$Q/Priority_Queue($T), less: proc(a, b: T) -> bool, swap: proc(q: []T, i, j: int), capacity := DEFAULT_CAPACITY, allocator := context.allocator) {
+	if pq.queue.allocator.procedure == nil {
+		pq.queue.allocator = allocator
+	}
+	reserve(pq, capacity)
+	pq.less = less
+	pq.swap = swap
+}
+
+init_from_dynamic_array :: proc(pq: ^$Q/Priority_Queue($T), queue: [dynamic]T, less: proc(a, b: T) -> bool, swap: proc(q: []T, i, j: int)) {
+	pq.queue = queue
+	pq.less = less
+	pq.swap = swap
+	n := builtin.len(pq.queue)
+	for i := n/2 - 1; i >= 0; i -= 1 {
+		_shift_down(pq, i, n)
+	}
+}
+
+destroy :: proc(pq: ^$Q/Priority_Queue($T)) {
+	clear(pq)
+	delete(pq.queue)
+}
+
+reserve :: proc(pq: ^$Q/Priority_Queue($T), capacity: int) {
+	builtin.reserve(&pq.queue, capacity)
+}
+clear :: proc(pq: ^$Q/Priority_Queue($T)) {
+	builtin.clear(&pq.queue)
+}
+len :: proc(pq: $Q/Priority_Queue($T)) -> int {
+	return builtin.len(pq.queue)
+}
+cap :: proc(pq: $Q/Priority_Queue($T)) -> int {
+	return builtin.cap(pq.queue)
+}
+
+_shift_down :: proc(pq: ^$Q/Priority_Queue($T), i0, n: int) -> bool {
+	// O(n log n)
+	if 0 > i0 || i0 > n {
+		return false
+	}
+	
+	i := i0
+	queue := pq.queue[:]
+	
+	for {
+		j1 := 2*i + 1
+		if j1 < 0 || j1 >= n {
+			break
+		}
+		j := j1
+		if j2 := j1+1; j2 < n && pq.less(queue[j2], queue[j1]) {
+			j = j2
+		}
+		if !pq.less(queue[j], queue[i]) {
+			break
+		}
+		
+		pq.swap(queue, i, j)
+		i = j
+	}
+	return i > i0
+}
+
+_shift_up :: proc(pq: ^$Q/Priority_Queue($T), j: int) {
+	j := j
+	queue := pq.queue[:]
+	n := builtin.len(queue)
+	for 0 <= j {
+		i := (j-1)/2
+		if i == j || !pq.less(queue[j], queue[i]) {
+			break
+		}
+		pq.swap(queue, i, j)
+		j = i
+	}
+}
+
+// NOTE(bill): When an element at index 'i' has changed its value, this will fix the
+// the heap ordering. This is using a basic "heapsort" with shift up and a shift down parts.
+fix :: proc(pq: ^$Q/Priority_Queue($T), i: int) {
+	if !_shift_down(pq, i, builtin.len(pq.queue)) {
+		_shift_up(pq, i)
+	}
+}
+
+push :: proc(pq: ^$Q/Priority_Queue($T), value: T) {
+	append(&pq.queue, value)
+	_shift_up(pq, builtin.len(pq.queue)-1)
+}
+
+pop :: proc(pq: ^$Q/Priority_Queue($T), loc := #caller_location) -> (value: T) {
+	assert(condition=builtin.len(pq.queue)>0, loc=loc)
+	
+	n := builtin.len(pq.queue)-1
+	pq.swap(pq.queue[:], 0, n)
+	_shift_down(pq, 0, n)
+	return builtin.pop(&pq.queue)
+}
+
+pop_safe :: proc(pq: ^$Q/Priority_Queue($T), loc := #caller_location) -> (value: T, ok: bool) {
+	if builtin.len(pq.queue) > 0 {
+		n := builtin.len(pq.queue)-1
+		pq.swap(pq.queue[:], 0, n)
+		_shift_down(pq, 0, n)
+		return builtin.pop_safe(&pq.queue)
+	}
+	return
+}
+
+remove :: proc(pq: ^$Q/Priority_Queue($T), i: int) -> (value: T, ok: bool) {
+	n := builtin.len(pq.queue)
+	if 0 <= i && i < n {
+		if n != i {
+			pq.swap(pq.queue[:], i, n)
+			_shift_down(pq, i, n)
+			_shift_up(pq, i)
+		}
+		value, ok = builtin.pop_safe(&pq.queue)
+	}
+	return
+}
+

+ 0 - 175
core/container/queue.odin

@@ -1,175 +0,0 @@
-package container
-
-Queue :: struct($T: typeid) {
-	data: Array(T),
-	len: int,
-	offset: int,
-}
-
-/*
-queue_init :: proc{
-	queue_init_none,
-	queue_init_len,
-	queue_init_len_cap,
-}
-queue_delete
-queue_clear
-queue_len
-queue_cap
-queue_space
-queue_get
-queue_set
-queue_reserve
-queue_resize
-queue_push :: proc{
-	queue_push_back, 
-	queue_push_elems,
-};
-queue_push_front
-queue_pop_front
-queue_pop_back
-queue_consume
-*/
-
-queue_init_none :: proc(q: ^$Q/Queue($T), allocator := context.allocator) {
-	queue_init_len(q, 0, allocator)
-}
-queue_init_len :: proc(q: ^$Q/Queue($T), len: int, allocator := context.allocator) {
-	queue_init_len_cap(q, 0, 16, allocator)
-}
-queue_init_len_cap :: proc(q: ^$Q/Queue($T), len: int, cap: int, allocator := context.allocator) {
-	array_init(&q.data, len, cap, allocator)
-	q.len = len
-	q.offset = 0
-}
-
-queue_init :: proc{queue_init_none, queue_init_len, queue_init_len_cap}
-
-queue_delete :: proc(q: $Q/Queue($T)) {
-	array_delete(q.data)
-}
-
-queue_clear :: proc(q: ^$Q/Queue($T)) {
-	q.len = 0
-}
-
-queue_len :: proc(q: $Q/Queue($T)) -> int {
-	return q.len
-}
-
-queue_cap :: proc(q: $Q/Queue($T)) -> int {
-	return array_cap(q.data)
-}
-
-queue_space :: proc(q: $Q/Queue($T)) -> int {
-	return array_len(q.data) - q.len
-}
-
-queue_get :: proc(q: $Q/Queue($T), index: int) -> T {
-	i := (index + q.offset) % array_len(q.data)
-	data := array_slice(q.data)
-	return data[i]
-}
-
-queue_set :: proc(q: ^$Q/Queue($T), index: int, item: T)  {
-	i := (index + q.offset) % array_len(q.data)
-	data := array_slice(q.data)
-	data[i] = item
-}
-
-
-queue_reserve :: proc(q: ^$Q/Queue($T), capacity: int) {
-	if capacity > q.len {
-		_queue_increase_capacity(q, capacity)
-	}
-}
-
-queue_resize :: proc(q: ^$Q/Queue($T), length: int) {
-	if length > q.len {
-		_queue_increase_capacity(q, length)
-	}
-	q.len = length
-}
-
-queue_push_back :: proc(q: ^$Q/Queue($T), item: T) {
-	if queue_space(q^) == 0 {
-		_queue_grow(q)
-	}
-
-	queue_set(q, q.len, item)
-	q.len += 1
-}
-
-queue_push_front :: proc(q: ^$Q/Queue($T), item: T) {
-	if queue_space(q^) == 0 {
-		_queue_grow(q)
-	}
-
-	q.offset = (q.offset - 1 + array_len(q.data)) % array_len(q.data)
-	q.len += 1
-	queue_set(q, 0, item)
-}
-
-queue_pop_front :: proc(q: ^$Q/Queue($T)) -> T {
-	assert(q.len > 0)
-	item := queue_get(q^, 0)
-	q.offset = (q.offset + 1) % array_len(q.data)
-	q.len -= 1
-	if q.len == 0 {
-		q.offset = 0
-	}
-	return item
-}
-
-queue_pop_back :: proc(q: ^$Q/Queue($T)) -> T {
-	assert(q.len > 0)
-	item := queue_get(q^, q.len-1)
-	q.len -= 1
-	return item
-}
-
-queue_consume :: proc(q: ^$Q/Queue($T), count: int) {
-	q.offset = (q.offset + count) & array_len(q.data)
-	q.len -= count
-}
-
-
-queue_push_elems :: proc(q: ^$Q/Queue($T), items: ..T) {
-	if queue_space(q^) < len(items) {
-		_queue_grow(q, q.len + len(items))
-	}
-	size := array_len(q.data)
-	insert := (q.offset + q.len) % size
-
-	to_insert := len(items)
-	if insert + to_insert > size {
-		to_insert = size - insert
-	}
-
-	the_items := items[:]
-
-	data := array_slice(q.data)
-
-	q.len += copy(data[insert:][:to_insert], the_items)
-	the_items = the_items[to_insert:]
-	q.len += copy(data[:], the_items)
-}
-
-queue_push :: proc{queue_push_back, queue_push_elems}
-
-
-
-_queue_increase_capacity :: proc(q: ^$Q/Queue($T), new_capacity: int) {
-	end := array_len(q.data)
-	array_resize(&q.data, new_capacity)
-	if q.offset + q.len > end {
-		end_items := q.len + end
-		data := array_slice(q.data)
-		copy(data[new_capacity-end_items:][:end_items], data[q.offset:][:end_items])
-		q.offset += new_capacity - end
-	}
-}
-_queue_grow :: proc(q: ^$Q/Queue($T), min_capacity: int = 0) {
-	new_capacity := max(array_len(q.data)*2 + 8, min_capacity)
-	_queue_increase_capacity(q, new_capacity)
-}

+ 209 - 0
core/container/queue/queue.odin

@@ -0,0 +1,209 @@
+package container_queue
+
+import "core:builtin"
+import "core:runtime"
+_ :: runtime
+
+// Dynamically resizable double-ended queue/ring-buffer
+Queue :: struct($T: typeid) {
+	data:   [dynamic]T,
+	len:    uint,
+	offset: uint,
+}
+
+DEFAULT_CAPACITY :: 16
+
+// Procedure to initialize a queue
+init :: proc(q: ^$Q/Queue($T), capacity := DEFAULT_CAPACITY, allocator := context.allocator) -> bool {
+	if q.data.allocator.procedure == nil {
+		q.data.allocator = allocator
+	}
+	clear(q)
+	return reserve(q, capacity)
+}
+
+// Procedure to initialize a queue from a fixed backing slice
+init_from_slice :: proc(q: ^$Q/Queue($T), backing: []T) -> bool {
+	clear(q)
+	q.data = transmute([dynamic]T)runtime.Raw_Dynamic_Array{
+		data = raw_data(backing),
+		len = builtin.len(backing),
+		cap = builtin.len(backing),
+		allocator = {procedure=runtime.nil_allocator_proc, data=nil},
+	}
+	return true
+}
+
+// Procedure to destroy a queue
+destroy :: proc(q: ^$Q/Queue($T)) {
+	delete(q.data)
+}
+
+// The length of the queue
+len :: proc(q: $Q/Queue($T)) -> int {
+	return int(q.len)
+}
+
+// The current capacity of the queue
+cap :: proc(q: $Q/Queue($T)) -> int {
+	return builtin.len(q.data)
+}
+
+// Remaining space in the queue (cap-len)
+space :: proc(q: $Q/Queue($T)) -> int {
+	return builtin.len(q.data) - int(q.len)
+}
+
+// Reserve enough space for at least the specified capacity
+reserve :: proc(q: ^$Q/Queue($T), capacity: int) -> bool {
+	if uint(capacity) > q.len {
+		return _grow(q, uint(capacity)) 
+	}
+	return true
+}
+
+
+get :: proc(q: ^$Q/Queue($T), #any_int i: int, loc := #caller_location) -> T {
+	runtime.bounds_check_error_loc(loc, i, builtin.len(q.data))
+
+	idx := (uint(i)+q.offset)%builtin.len(q.data)
+	return q.data[idx]
+}
+set :: proc(q: ^$Q/Queue($T), #any_int i: int, val: T, loc := #caller_location) {
+	runtime.bounds_check_error_loc(loc, i, builtin.len(q.data))
+	
+	idx := (uint(i)+q.offset)%builtin.len(q.data)
+	q.data[idx] = val
+}
+get_ptr :: proc(q: ^$Q/Queue($T), #any_int i: int, loc := #caller_location) -> ^T {
+	runtime.bounds_check_error_loc(loc, i, builtin.len(q.data))
+	
+	idx := (uint(i)+q.offset)%builtin.len(q.data)
+	return &q.data[idx]
+}
+
+// Push an element to the back of the queue
+push_back :: proc(q: ^$Q/Queue($T), elem: T) -> bool {
+	if space(q^) == 0 {
+		_grow(q) or_return
+	}
+	idx := (q.offset+uint(q.len))%builtin.len(q.data)
+	q.data[idx] = elem
+	q.len += 1
+	return true
+}
+
+// Push an element to the front of the queue
+push_front :: proc(q: ^$Q/Queue($T), elem: T) -> bool {
+	if space(q^) == 0 {
+		_grow(q) or_return
+	}	
+	q.offset = uint(q.offset - 1 + builtin.len(q.data)) % builtin.len(q.data)
+	q.len += 1
+	q.data[q.offset] = elem
+	return true
+}
+
+
+// Pop an element from the back of the queue
+pop_back :: proc(q: ^$Q/Queue($T), loc := #caller_location) -> (elem: T) {
+	assert(condition=q.len > 0, loc=loc)
+	q.len -= 1
+	idx := (q.offset+uint(q.len))%builtin.len(q.data)
+	elem = q.data[idx]
+	return
+}
+// Safely pop an element from the back of the queue
+pop_back_safe :: proc(q: ^$Q/Queue($T)) -> (elem: T, ok: bool) {
+	if q.len > 0 {
+		q.len -= 1
+		idx := (q.offset+uint(q.len))%builtin.len(q.data)
+		elem = q.data[idx]
+		ok = true
+	}
+	return
+}
+
+// Pop an element from the front of the queue
+pop_front :: proc(q: ^$Q/Queue($T), loc := #caller_location) -> (elem: T) {
+	assert(condition=q.len > 0, loc=loc)
+	elem = q.data[q.offset]
+	q.offset = (q.offset+1)%builtin.len(q.data)
+	q.len -= 1
+	return
+}
+// Safely pop an element from the front of the queue
+pop_front_safe :: proc(q: ^$Q/Queue($T)) -> (elem: T, ok: bool) {
+	if q.len > 0 {
+		elem = q.data[q.offset]
+		q.offset = (q.offset+1)%builtin.len(q.data)
+		q.len -= 1
+		ok = true
+	}
+	return
+}
+
+// Push multiple elements to the front of the queue
+push_back_elems :: proc(q: ^$Q/Queue($T), elems: ..T) -> bool {
+	n := uint(builtin.len(elems))
+	if space(q^) < int(n) {
+		_grow(q, q.len + n) or_return
+	}
+	
+	sz := uint(builtin.len(q.data))
+	insert_from := (q.offset + q.len) % sz
+	insert_to := n
+	if insert_from + insert_to > sz {
+		insert_to = sz - insert_from
+	}
+	copy(q.data[insert_from:], elems[:insert_to])
+	copy(q.data[:insert_from], elems[insert_to:])
+	q.len += n
+	return true
+}
+
+// Consume `n` elements from the front of the queue
+consume_front :: proc(q: ^$Q/Queue($T), n: int, loc := #caller_location) {
+	assert(condition=int(q.len) >= n, loc=loc)
+	if n > 0 {
+		nu := uint(n)
+		q.offset = (q.offset + nu) % builtin.len(q.data)
+		q.len -= nu	
+	}
+}
+
+// Consume `n` elements from the back of the queue
+consume_back :: proc(q: ^$Q/Queue($T), n: int, loc := #caller_location) {
+	assert(condition=int(q.len) >= n, loc=loc)
+	if n > 0 {
+		q.len -= uint(n)
+	}
+}
+
+
+
+append_elem  :: push_back
+append_elems :: push_back_elems
+push   :: proc{push_back, push_back_elems}
+append :: proc{push_back, push_back_elems}
+
+
+// Clear the contents of the queue
+clear :: proc(q: ^$Q/Queue($T)) {
+	q.len = 0
+	q.offset = 0
+}
+
+
+// Internal growinh procedure
+_grow :: proc(q: ^$Q/Queue($T), min_capacity: uint = 0) -> bool {
+	new_capacity := max(min_capacity, uint(8), uint(builtin.len(q.data))*2)
+	n := uint(builtin.len(q.data))
+	builtin.resize(&q.data, int(new_capacity)) or_return
+	if q.offset + q.len > n {
+		diff := n - q.offset
+		copy(q.data[new_capacity-diff:], q.data[q.offset:][:diff])
+		q.offset += new_capacity - n
+	}
+	return true
+}

+ 0 - 74
core/container/ring.odin

@@ -1,74 +0,0 @@
-package container
-
-
-Ring :: struct($T: typeid) {
-	next, prev: ^Ring(T),
-	value: T,
-}
-
-ring_init :: proc(r: ^$R/Ring) -> ^R {
-	r.prev, r.next = r, r
-	return r
-}
-
-ring_next :: proc(r: ^$R/Ring) -> ^R {
-	if r.next == nil {
-		return ring_init(r)
-	}
-	return r.next
-}
-ring_prev :: proc(r: ^$R/Ring) -> ^R {
-	if r.prev == nil {
-		return ring_init(r)
-	}
-	return r.prev
-}
-
-
-ring_move :: proc(r: ^$R/Ring, n: int) -> ^R {
-  r := r
-	if r.next == nil {
-		return ring_init(r)
-	}
-
-	switch {
-	case n < 0:
-		for _ in n..<0 {
-			r = r.prev
-		}
-	case n > 0:
-		for _ in 0..<n {
-			r = r.next
-		}
-	}
-	return r
-}
-
-ring_link :: proc(r, s: ^$R/Ring) -> ^R {
-	n := ring_next(r)
-	if s != nil {
-		p := ring_prev(s)
-		r.next = s
-		s.prev = r
-		n.prev = p
-		p.next = n
-	}
-	return n
-}
-ring_unlink :: proc(r: ^$R/Ring, n: int) -> ^R {
-	if n <= 0 {
-		return nil
-	}
-	return ring_link(r, ring_move(r, n+1))
-}
-ring_len :: proc(r: ^$R/Ring) -> int {
-	n := 0
-	if r != nil {
-		n = 1
-		for p := ring_next(r); p != r; p = p.next {
-			n += 1
-		}
-	}
-	return n
-}
-

+ 0 - 240
core/container/set.odin

@@ -1,240 +0,0 @@
-package container
-
-Set :: struct {
-	hash:    Array(int),
-	entries: Array(Set_Entry),
-}
-
-Set_Entry :: struct {
-	key:   u64,
-	next:  int,
-}
-
-
-/*
-set_init :: proc{
-	set_init_none,
-	set_init_cap,
-}
-set_delete
-
-set_in
-set_not_in
-set_add
-set_remove
-set_reserve
-set_clear
-*/
-
-set_init :: proc{set_init_none, set_init_cap}
-
-set_init_none :: proc(m: ^Set, allocator := context.allocator) {
-	m.hash.allocator = allocator
-	m.entries.allocator = allocator
-}
-
-set_init_cap :: proc(m: ^Set, cap: int, allocator := context.allocator) {
-	m.hash.allocator = allocator
-	m.entries.allocator = allocator
-	set_reserve(m, cap)
-}
-
-set_delete :: proc(m: Set) {
-	array_delete(m.hash)
-	array_delete(m.entries)
-}
-
-
-set_in :: proc(m: Set, key: u64) -> bool {
-	return _set_find_or_fail(m, key) >= 0
-}
-set_not_in :: proc(m: Set, key: u64) -> bool {
-	return _set_find_or_fail(m, key) < 0
-}
-
-set_add :: proc(m: ^Set, key: u64) {
-	if array_len(m.hash) == 0 {
-		_set_grow(m)
-	}
-
-	_ = _set_find_or_make(m, key)
-	if _set_full(m^) {
-		_set_grow(m)
-	}
-}
-
-set_remove :: proc(m: ^Set, key: u64) {
-	fr := _set_find_key(m^, key)
-	if fr.entry_index >= 0 {
-		_set_erase(m, fr)
-	}
-}
-
-
-set_reserve :: proc(m: ^Set, new_size: int) {
-	nm: Set
-	set_init(&nm, m.hash.allocator)
-	array_resize(&nm.hash, new_size)
-	array_reserve(&nm.entries, array_len(m.entries))
-
-	for i in 0..<new_size {
-		array_set(&nm.hash, i, -1)
-	}
-	for i in 0..<array_len(m.entries) {
-		e := array_get(m.entries, i)
-		set_add(&nm, e.key)
-	}
-
-	set_delete(m^)
-	m^ = nm
-}
-
-set_clear :: proc(m: ^Set) {
-	array_clear(&m.hash)
-	array_clear(&m.entries)
-}
-
-
-set_equal :: proc(a, b: Set) -> bool {
-	a_entries := array_slice(a.entries)
-	b_entries := array_slice(b.entries)
-	if len(a_entries) != len(b_entries) {
-		return false
-	}
-	for e in a_entries {
-		if set_not_in(b, e.key) {
-			return false
-		}
-	}
-
-	return true
-}
-
-
-
-/// Internal
-
-_set_add_entry :: proc(m: ^Set, key: u64) -> int {
-	e: Set_Entry
-	e.key = key
-	e.next = -1
-	idx := array_len(m.entries)
-	array_push(&m.entries, e)
-	return idx
-}
-
-_set_erase :: proc(m: ^Set, fr: Map_Find_Result) {
-	if fr.entry_prev < 0 {
-		array_set(&m.hash, fr.hash_index, array_get(m.entries, fr.entry_index).next)
-	} else {
-		array_get_ptr(m.entries, fr.entry_prev).next = array_get(m.entries, fr.entry_index).next
-	}
-
-	if fr.entry_index == array_len(m.entries)-1 {
-		array_pop_back(&m.entries)
-		return
-	}
-
-	array_set(&m.entries, fr.entry_index, array_get(m.entries, array_len(m.entries)-1))
-	last := _set_find_key(m^, array_get(m.entries, fr.entry_index).key)
-
-	if last.entry_prev < 0 {
-		array_get_ptr(m.entries, last.entry_prev).next = fr.entry_index
-	} else {
-		array_set(&m.hash, last.hash_index, fr.entry_index)
-	}
-}
-
-
-_set_find_key :: proc(m: Set, key: u64) -> Map_Find_Result {
-	fr: Map_Find_Result
-	fr.hash_index = -1
-	fr.entry_prev = -1
-	fr.entry_index = -1
-
-	if array_len(m.hash) == 0 {
-		return fr
-	}
-
-	fr.hash_index = int(key % u64(array_len(m.hash)))
-	fr.entry_index = array_get(m.hash, fr.hash_index)
-	for fr.entry_index >= 0 {
-		it := array_get_ptr(m.entries, fr.entry_index)
-		if it.key == key {
-			return fr
-		}
-		fr.entry_prev = fr.entry_index
-		fr.entry_index = it.next
-	}
-	return fr
-}
-
-_set_find_entry :: proc(m: ^Set, e: ^Set_Entry) -> Map_Find_Result {
-	fr: Map_Find_Result
-	fr.hash_index = -1
-	fr.entry_prev = -1
-	fr.entry_index = -1
-
-	if array_len(m.hash) == 0 {
-		return fr
-	}
-
-	fr.hash_index = int(e.key % u64(array_len(m.hash)))
-	fr.entry_index = array_get(m.hash, fr.hash_index)
-	for fr.entry_index >= 0 {
-		it := array_get_ptr(m.entries, fr.entry_index)
-		if it == e {
-			return fr
-		}
-		fr.entry_prev = fr.entry_index
-		fr.entry_index = it.next
-	}
-	return fr
-}
-
-_set_find_or_fail :: proc(m: Set, key: u64) -> int {
-	return _set_find_key(m, key).entry_index
-}
-_set_find_or_make :: proc(m: ^Set, key: u64) -> int {
-	fr := _set_find_key(m^, key)
-	if fr.entry_index >= 0 {
-		return fr.entry_index
-	}
-
-	i := _set_add_entry(m, key)
-	if fr.entry_prev < 0 {
-		array_set(&m.hash, fr.hash_index, i)
-	} else {
-		array_get_ptr(m.entries, fr.entry_prev).next = i
-	}
-	return i
-}
-
-
-_set_make :: proc(m: ^Set, key: u64) -> int {
-	fr := _set_find_key(m^, key)
-	i := _set_add_entry(m, key)
-
-	if fr.entry_prev < 0 {
-		array_set(&m.hash, fr.hash_index, i)
-	} else {
-		array_get_ptr(m.entries, fr.entry_prev).next = i
-	}
-
-	array_get_ptr(m.entries, i).next = fr.entry_index
-
-	return i
-}
-
-
-_set_full :: proc(m: Set) -> bool {
-	// TODO(bill): Determine good max load factor
-	return array_len(m.entries) >= (array_len(m.hash) / 4)*3
-}
-
-_set_grow :: proc(m: ^Set) {
-	new_size := array_len(m.entries) * 4 + 7 // TODO(bill): Determine good grow rate
-	set_reserve(m, new_size)
-}
-
-

+ 0 - 95
core/container/small_array.odin

@@ -1,95 +0,0 @@
-package container
-
-Small_Array :: struct($N: int, $T: typeid) where N >= 0 {
-	data: [N]T,
-	len:  int,
-}
-
-
-small_array_len :: proc(a: $A/Small_Array) -> int {
-	return a.len
-}
-
-small_array_cap :: proc(a: $A/Small_Array) -> int {
-	return len(a.data)
-}
-
-small_array_space :: proc(a: $A/Small_Array) -> int {
-	return len(a.data) - a.len
-}
-
-small_array_slice :: proc(a: ^$A/Small_Array($N, $T)) -> []T {
-	return a.data[:a.len]
-}
-
-
-small_array_get :: proc(a: $A/Small_Array($N, $T), index: int, loc := #caller_location) -> T {
-	return a.data[index]
-}
-small_array_get_ptr :: proc(a: $A/Small_Array($N, $T), index: int, loc := #caller_location) -> ^T {
-	return &a.data[index]
-}
-
-small_array_set :: proc(a: ^$A/Small_Array($N, $T), index: int, item: T, loc := #caller_location) {
-	a.data[index] = item
-}
-
-small_array_resize :: proc(a: ^$A/Small_Array, length: int) {
-	a.len = min(length, len(a.data))
-}
-
-
-small_array_push_back :: proc(a: ^$A/Small_Array($N, $T), item: T) -> bool {
-	if a.len < len(a.data) {
-		a.len += 1
-		a.data[a.len-1] = item
-		return true
-	}
-	return false
-}
-
-small_array_push_front :: proc(a: ^$A/Small_Array($N, $T), item: T) -> bool {
-	if a.len < len(a.data) {
-		a.len += 1
-		data := small_array_slice(a)
-		copy(data[1:], data[:])
-		data[0] = item
-		return true
-	}
-	return false
-}
-
-small_array_pop_back :: proc(a: ^$A/Small_Array($N, $T), loc := #caller_location) -> T {
-	assert(condition=a.len > 0, loc=loc)
-	item := a.data[a.len-1]
-	a.len -= 1
-	return item
-}
-
-small_array_pop_front :: proc(a: ^$A/Small_Array($N, $T), loc := #caller_location) -> T {
-	assert(condition=a.len > 0, loc=loc)
-	item := a.data[0]
-	s := small_array_slice(a)
-	copy(s[:], s[1:])
-	a.len -= 1
-	return item
-}
-
-
-small_array_consume :: proc(a: ^$A/Small_Array($N, $T), count: int, loc := #caller_location) {
-	assert(condition=a.len >= count, loc=loc)
-	a.len -= count
-}
-
-small_array_clear :: proc(a: ^$A/Small_Array($N, $T)) {
-	small_array_resize(a, 0)
-}
-
-small_array_push_back_elems :: proc(a: ^$A/Small_Array($N, $T), items: ..T) {
-	n := copy(a.data[a.len:], items[:])
-	a.len += n
-}
-
-small_array_push   :: proc{small_array_push_back, small_array_push_back_elems}
-small_array_append :: proc{small_array_push_back, small_array_push_back_elems}
-

+ 117 - 0
core/container/small_array/small_array.odin

@@ -0,0 +1,117 @@
+package container_small_array
+
+import "core:builtin"
+
+Small_Array :: struct($N: int, $T: typeid) where N >= 0 {
+	data: [N]T,
+	len:  int,
+}
+
+
+len :: proc(a: $A/Small_Array) -> int {
+	return a.len
+}
+
+cap :: proc(a: $A/Small_Array) -> int {
+	return builtin.len(a.data)
+}
+
+space :: proc(a: $A/Small_Array) -> int {
+	return builtin.len(a.data) - a.len
+}
+
+slice :: proc(a: ^$A/Small_Array($N, $T)) -> []T {
+	return a.data[:a.len]
+}
+
+
+get :: proc(a: $A/Small_Array($N, $T), index: int) -> T {
+	return a.data[index]
+}
+get_ptr :: proc(a: ^$A/Small_Array($N, $T), index: int) -> ^T {
+	return &a.data[index]
+}
+
+set :: proc(a: ^$A/Small_Array($N, $T), index: int, item: T) {
+	a.data[index] = item
+}
+
+resize :: proc(a: ^$A/Small_Array, length: int) {
+	a.len = min(length, builtin.len(a.data))
+}
+
+
+push_back :: proc(a: ^$A/Small_Array($N, $T), item: T) -> bool {
+	if a.len < cap(a^) {
+		a.data[a.len] = item
+		a.len += 1
+		return true
+	}
+	return false
+}
+
+push_front :: proc(a: ^$A/Small_Array($N, $T), item: T) -> bool {
+	if a.len < cap(a^) {
+		a.len += 1
+		data := slice(a)
+		copy(data[1:], data[:])
+		data[0] = item
+		return true
+	}
+	return false
+}
+
+pop_back :: proc(a: ^$A/Small_Array($N, $T), loc := #caller_location) -> T {
+	assert(condition=(N > 0 && a.len > 0), loc=loc)
+	item := a.data[a.len-1]
+	a.len -= 1
+	return item
+}
+
+pop_front :: proc(a: ^$A/Small_Array($N, $T), loc := #caller_location) -> T {
+	assert(condition=(N > 0 && a.len > 0), loc=loc)
+	item := a.data[0]
+	s := slice(a)
+	copy(s[:], s[1:])
+	a.len -= 1
+	return item
+}
+
+pop_back_safe :: proc(a: ^$A/Small_Array($N, $T)) -> (item: T, ok: bool) {
+	if N > 0 && a.len > 0 {
+		item = a.data[a.len-1]
+		a.len -= 1
+		ok = true
+	}
+	return
+}
+
+pop_front_safe :: proc(a: ^$A/Small_Array($N, $T)) -> (T, bool) {
+	if N > 0 && a.len > 0 {
+		item = a.data[0]
+		s := slice(a)
+		copy(s[:], s[1:])
+		a.len -= 1
+		ok = true
+	} 
+	return
+}
+
+consume :: proc(a: ^$A/Small_Array($N, $T), count: int, loc := #caller_location) {
+	assert(condition=a.len >= count, loc=loc)
+	a.len -= count
+}
+
+clear :: proc(a: ^$A/Small_Array($N, $T)) {
+	resize(a, 0)
+}
+
+push_back_elems :: proc(a: ^$A/Small_Array($N, $T), items: ..T) {
+	n := copy(a.data[a.len:], items[:])
+	a.len += n
+}
+
+append_elem  :: push_back
+append_elems :: push_back_elems
+push   :: proc{push_back, push_back_elems}
+append :: proc{push_back, push_back_elems}

+ 98 - 0
core/container/topological_sort/topological_sort.odin

@@ -0,0 +1,98 @@
+// The following is a generic O(V+E) topological sorter implementation.
+// This is the fastest known method for topological sorting and Odin's
+// map type is being used to accelerate lookups.
+package container_topological_sort
+
+import "core:intrinsics"
+import "core:runtime"
+_ :: intrinsics
+_ :: runtime
+
+
+Relations :: struct($K: typeid) where intrinsics.type_is_valid_map_key(K) {
+	dependents:   map[K]bool,
+	dependencies: int,
+}
+
+Sorter :: struct(K: typeid) where intrinsics.type_is_valid_map_key(K)  {
+	relations: map[K]Relations(K),
+	dependents_allocator: runtime.Allocator,
+}
+
+@(private="file")
+make_relations :: proc(sorter: ^$S/Sorter($K)) -> (r: Relations(K)) {
+	r.dependents.allocator = sorter.dependents_allocator
+	return
+}
+
+
+init :: proc(sorter: ^$S/Sorter($K)) {
+	sorter.relations = make(map[K]Relations(K))
+	sorter.dependents_allocator = context.allocator
+}
+
+destroy :: proc(sorter: ^$S/Sorter($K)) {
+	for _, v in &sorter.relations {
+		delete(v.dependents)
+	}
+	delete(sorter.relations)
+}
+
+add_key :: proc(sorter: ^$S/Sorter($K), key: K) -> bool {
+	if key in sorter.relations {
+		return false
+	}
+	sorter.relations[key] = make_relations(sorter)
+	return true
+}
+
+add_dependency :: proc(sorter: ^$S/Sorter($K), key, dependency: K) -> bool {
+	if key == dependency {
+		return false
+	}
+
+	find := &sorter.relations[dependency]
+	if find == nil {
+		find = map_insert(&sorter.relations, dependency, make_relations(sorter))
+	}
+
+	if find.dependents[key] {
+		return true
+	}
+	find.dependents[key] = true
+
+ 	find = &sorter.relations[key]
+	if find == nil {
+		find = map_insert(&sorter.relations, key, make_relations(sorter))
+	}
+
+	find.dependencies += 1
+
+	return true
+}
+
+sort :: proc(sorter: ^$S/Sorter($K)) -> (sorted, cycled: [dynamic]K) {
+	relations := &sorter.relations
+
+	for k, v in relations {
+		if v.dependencies == 0 {
+			append(&sorted, k)
+		}
+	}
+
+	for root in &sorted do for k, _ in relations[root].dependents {
+		relation := &relations[k]
+		relation.dependencies -= 1
+		if relation.dependencies == 0 {
+			append(&sorted, k)
+		}
+	}
+
+	for k, v in relations {
+		if v.dependencies != 0 {
+			append(&cycled, k)
+		}
+	}
+
+	return
+}

+ 7 - 1
core/crypto/README.md

@@ -32,9 +32,11 @@ Please see the chart below for the options.
 
 
 #### High level API
 #### High level API
 Each hash algorithm contains a procedure group named `hash`, or if the algorithm provides more than one digest size `hash_<size>`\*.  
 Each hash algorithm contains a procedure group named `hash`, or if the algorithm provides more than one digest size `hash_<size>`\*.  
-Included in these groups are four procedures.
+Included in these groups are six procedures.
 * `hash_string` - Hash a given string and return the computed hash. Just calls `hash_bytes` internally
 * `hash_string` - Hash a given string and return the computed hash. Just calls `hash_bytes` internally
 * `hash_bytes` - Hash a given byte slice and return the computed hash
 * `hash_bytes` - Hash a given byte slice and return the computed hash
+* `hash_string_to_buffer` - Hash a given string and put the computed hash in the second proc parameter. Just calls `hash_bytes_to_buffer` internally
+* `hash_bytes_to_buffer` - Hash a given string and put the computed hash in the second proc parameter. The destination buffer has to be at least as big as the digest size of the hash
 * `hash_stream` - Takes a stream from io.Stream and returns the computed hash from it
 * `hash_stream` - Takes a stream from io.Stream and returns the computed hash from it
 * `hash_file` - Takes a file handle and returns the computed hash from it. A second optional boolean parameter controls if the file is streamed (this is the default) or read at once (set to true)
 * `hash_file` - Takes a file handle and returns the computed hash from it. A second optional boolean parameter controls if the file is streamed (this is the default) or read at once (set to true)
 
 
@@ -59,6 +61,10 @@ main :: proc() {
     // Compute the hash, using the high level API
     // Compute the hash, using the high level API
     computed_hash := md4.hash(input)
     computed_hash := md4.hash(input)
 
 
+    // Variant that takes a destination buffer, instead of returning the computed hash
+    hash := make([]byte, md4.DIGEST_SIZE) // @note: Destination buffer has to be at least as big as the digest size of the hash
+    md4.hash(input, hash[:])
+
     // Compute the hash, using the low level API
     // Compute the hash, using the low level API
     ctx: md4.Md4_Context
     ctx: md4.Md4_Context
     computed_hash_low: [16]byte
     computed_hash_low: [16]byte

+ 1 - 1
core/crypto/_fiat/field_poly1305/field.odin

@@ -22,7 +22,7 @@ fe_from_bytes :: #force_inline proc (out1: ^Tight_Field_Element, arg1: []byte, a
 
 
 	assert(len(arg1) == 16)
 	assert(len(arg1) == 16)
 
 
-	when ODIN_ARCH == "386" || ODIN_ARCH == "amd64" {
+	when ODIN_ARCH == .i386 || ODIN_ARCH == .amd64 {
 		// While it may be unwise to do deserialization here on our
 		// While it may be unwise to do deserialization here on our
 		// own when fiat-crypto provides equivalent functionality,
 		// own when fiat-crypto provides equivalent functionality,
 		// doing it this way provides a little under 3x performance
 		// doing it this way provides a little under 3x performance

+ 2 - 2
core/crypto/_sha3/_sha3.odin

@@ -52,7 +52,7 @@ keccakf :: proc "contextless" (st: ^[25]u64) {
     t: u64       = ---
     t: u64       = ---
     bc: [5]u64   = ---
     bc: [5]u64   = ---
 
 
-    when ODIN_ENDIAN != "little" {
+    when ODIN_ENDIAN != .Little {
         v: uintptr = ---
         v: uintptr = ---
         for i = 0; i < 25; i += 1 {
         for i = 0; i < 25; i += 1 {
             v := uintptr(&st[i])
             v := uintptr(&st[i])
@@ -98,7 +98,7 @@ keccakf :: proc "contextless" (st: ^[25]u64) {
         st[0] ~= keccakf_rndc[r]
         st[0] ~= keccakf_rndc[r]
     }
     }
 
 
-    when ODIN_ENDIAN != "little" {
+    when ODIN_ENDIAN != .Little {
         for i = 0; i < 25; i += 1 {
         for i = 0; i < 25; i += 1 {
             v = uintptr(&st[i])
             v = uintptr(&st[i])
             t = st[i]
             t = st[i]

+ 117 - 28
core/crypto/blake/blake.odin

@@ -17,16 +17,21 @@ import "core:io"
     High level API
     High level API
 */
 */
 
 
+DIGEST_SIZE_224 :: 28
+DIGEST_SIZE_256 :: 32
+DIGEST_SIZE_384 :: 48
+DIGEST_SIZE_512 :: 64
+
 // hash_string_224 will hash the given input and return the
 // hash_string_224 will hash the given input and return the
 // computed hash
 // computed hash
-hash_string_224 :: proc "contextless" (data: string) -> [28]byte {
+hash_string_224 :: proc "contextless" (data: string) -> [DIGEST_SIZE_224]byte {
     return hash_bytes_224(transmute([]byte)(data))
     return hash_bytes_224(transmute([]byte)(data))
 }
 }
 
 
 // hash_bytes_224 will hash the given input and return the
 // hash_bytes_224 will hash the given input and return the
 // computed hash
 // computed hash
-hash_bytes_224 :: proc "contextless" (data: []byte) -> [28]byte {
-    hash: [28]byte
+hash_bytes_224 :: proc "contextless" (data: []byte) -> [DIGEST_SIZE_224]byte {
+    hash: [DIGEST_SIZE_224]byte
     ctx: Blake256_Context
     ctx: Blake256_Context
     ctx.is224 = true
     ctx.is224 = true
     init(&ctx)
     init(&ctx)
@@ -35,10 +40,29 @@ hash_bytes_224 :: proc "contextless" (data: []byte) -> [28]byte {
     return hash
     return hash
 }
 }
 
 
+// hash_string_to_buffer_224 will hash the given input and assign the
+// computed hash to the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_string_to_buffer_224 :: proc(data: string, hash: []byte) {
+    hash_bytes_to_buffer_224(transmute([]byte)(data), hash)
+}
+
+// hash_bytes_to_buffer_224 will hash the given input and write the
+// computed hash into the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_bytes_to_buffer_224 :: proc(data, hash: []byte) {
+    assert(len(hash) >= DIGEST_SIZE_224, "Size of destination buffer is smaller than the digest size")
+    ctx: Blake256_Context
+    ctx.is224 = true
+    init(&ctx)
+    update(&ctx, data)
+    final(&ctx, hash)
+}
+
 // hash_stream_224 will read the stream in chunks and compute a
 // hash_stream_224 will read the stream in chunks and compute a
 // hash from its contents
 // hash from its contents
-hash_stream_224 :: proc(s: io.Stream) -> ([28]byte, bool) {
-    hash: [28]byte
+hash_stream_224 :: proc(s: io.Stream) -> ([DIGEST_SIZE_224]byte, bool) {
+    hash: [DIGEST_SIZE_224]byte
     ctx: Blake256_Context
     ctx: Blake256_Context
     ctx.is224 = true
     ctx.is224 = true
     init(&ctx)
     init(&ctx)
@@ -57,7 +81,7 @@ hash_stream_224 :: proc(s: io.Stream) -> ([28]byte, bool) {
 
 
 // hash_file_224 will read the file provided by the given handle
 // hash_file_224 will read the file provided by the given handle
 // and compute a hash
 // and compute a hash
-hash_file_224 :: proc(hd: os.Handle, load_at_once := false) -> ([28]byte, bool) {
+hash_file_224 :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE_224]byte, bool) {
     if !load_at_once {
     if !load_at_once {
         return hash_stream_224(os.stream_from_handle(hd))
         return hash_stream_224(os.stream_from_handle(hd))
     } else {
     } else {
@@ -65,7 +89,7 @@ hash_file_224 :: proc(hd: os.Handle, load_at_once := false) -> ([28]byte, bool)
             return hash_bytes_224(buf[:]), ok
             return hash_bytes_224(buf[:]), ok
         }
         }
     }
     }
-    return [28]byte{}, false
+    return [DIGEST_SIZE_224]byte{}, false
 }
 }
 
 
 hash_224 :: proc {
 hash_224 :: proc {
@@ -73,18 +97,20 @@ hash_224 :: proc {
     hash_file_224,
     hash_file_224,
     hash_bytes_224,
     hash_bytes_224,
     hash_string_224,
     hash_string_224,
+    hash_bytes_to_buffer_224,
+    hash_string_to_buffer_224,
 }
 }
 
 
 // hash_string_256 will hash the given input and return the
 // hash_string_256 will hash the given input and return the
 // computed hash
 // computed hash
-hash_string_256 :: proc "contextless" (data: string) -> [32]byte {
+hash_string_256 :: proc "contextless" (data: string) -> [DIGEST_SIZE_256]byte {
     return hash_bytes_256(transmute([]byte)(data))
     return hash_bytes_256(transmute([]byte)(data))
 }
 }
 
 
 // hash_bytes_256 will hash the given input and return the
 // hash_bytes_256 will hash the given input and return the
 // computed hash
 // computed hash
-hash_bytes_256 :: proc "contextless" (data: []byte) -> [32]byte {
-    hash: [32]byte
+hash_bytes_256 :: proc "contextless" (data: []byte) -> [DIGEST_SIZE_256]byte {
+    hash: [DIGEST_SIZE_256]byte
     ctx: Blake256_Context
     ctx: Blake256_Context
     ctx.is224 = false
     ctx.is224 = false
     init(&ctx)
     init(&ctx)
@@ -93,10 +119,29 @@ hash_bytes_256 :: proc "contextless" (data: []byte) -> [32]byte {
     return hash
     return hash
 }
 }
 
 
+// hash_string_to_buffer_256 will hash the given input and assign the
+// computed hash to the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_string_to_buffer_256 :: proc(data: string, hash: []byte) {
+    hash_bytes_to_buffer_256(transmute([]byte)(data), hash)
+}
+
+// hash_bytes_to_buffer_256 will hash the given input and write the
+// computed hash into the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_bytes_to_buffer_256 :: proc(data, hash: []byte) {
+    assert(len(hash) >= DIGEST_SIZE_256, "Size of destination buffer is smaller than the digest size")
+    ctx: Blake256_Context
+    ctx.is224 = false
+    init(&ctx)
+    update(&ctx, data)
+    final(&ctx, hash)
+}
+
 // hash_stream_256 will read the stream in chunks and compute a
 // hash_stream_256 will read the stream in chunks and compute a
 // hash from its contents
 // hash from its contents
-hash_stream_256 :: proc(s: io.Stream) -> ([32]byte, bool) {
-    hash: [32]byte
+hash_stream_256 :: proc(s: io.Stream) -> ([DIGEST_SIZE_256]byte, bool) {
+    hash: [DIGEST_SIZE_256]byte
     ctx: Blake256_Context
     ctx: Blake256_Context
     ctx.is224 = false
     ctx.is224 = false
     init(&ctx)
     init(&ctx)
@@ -115,7 +160,7 @@ hash_stream_256 :: proc(s: io.Stream) -> ([32]byte, bool) {
 
 
 // hash_file_256 will read the file provided by the given handle
 // hash_file_256 will read the file provided by the given handle
 // and compute a hash
 // and compute a hash
-hash_file_256 :: proc(hd: os.Handle, load_at_once := false) -> ([32]byte, bool) {
+hash_file_256 :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE_256]byte, bool) {
     if !load_at_once {
     if !load_at_once {
         return hash_stream_256(os.stream_from_handle(hd))
         return hash_stream_256(os.stream_from_handle(hd))
     } else {
     } else {
@@ -123,7 +168,7 @@ hash_file_256 :: proc(hd: os.Handle, load_at_once := false) -> ([32]byte, bool)
             return hash_bytes_256(buf[:]), ok
             return hash_bytes_256(buf[:]), ok
         }
         }
     }
     }
-    return [32]byte{}, false
+    return [DIGEST_SIZE_256]byte{}, false
 }
 }
 
 
 hash_256 :: proc {
 hash_256 :: proc {
@@ -131,18 +176,20 @@ hash_256 :: proc {
     hash_file_256,
     hash_file_256,
     hash_bytes_256,
     hash_bytes_256,
     hash_string_256,
     hash_string_256,
+    hash_bytes_to_buffer_256,
+    hash_string_to_buffer_256,
 }
 }
 
 
 // hash_string_384 will hash the given input and return the
 // hash_string_384 will hash the given input and return the
 // computed hash
 // computed hash
-hash_string_384 :: proc "contextless" (data: string) -> [48]byte {
+hash_string_384 :: proc "contextless" (data: string) -> [DIGEST_SIZE_384]byte {
     return hash_bytes_384(transmute([]byte)(data))
     return hash_bytes_384(transmute([]byte)(data))
 }
 }
 
 
 // hash_bytes_384 will hash the given input and return the
 // hash_bytes_384 will hash the given input and return the
 // computed hash
 // computed hash
-hash_bytes_384 :: proc "contextless" (data: []byte) -> [48]byte {
-    hash: [48]byte
+hash_bytes_384 :: proc "contextless" (data: []byte) -> [DIGEST_SIZE_384]byte {
+    hash: [DIGEST_SIZE_384]byte
     ctx: Blake512_Context
     ctx: Blake512_Context
     ctx.is384 = true
     ctx.is384 = true
     init(&ctx)
     init(&ctx)
@@ -151,10 +198,29 @@ hash_bytes_384 :: proc "contextless" (data: []byte) -> [48]byte {
     return hash
     return hash
 }
 }
 
 
+// hash_string_to_buffer_384 will hash the given input and assign the
+// computed hash to the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_string_to_buffer_384 :: proc(data: string, hash: []byte) {
+    hash_bytes_to_buffer_384(transmute([]byte)(data), hash)
+}
+
+// hash_bytes_to_buffer_384 will hash the given input and write the
+// computed hash into the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_bytes_to_buffer_384 :: proc(data, hash: []byte) {
+    assert(len(hash) >= DIGEST_SIZE_384, "Size of destination buffer is smaller than the digest size")
+    ctx: Blake512_Context
+    ctx.is384 = true
+    init(&ctx)
+    update(&ctx, data)
+    final(&ctx, hash)
+}
+
 // hash_stream_384 will read the stream in chunks and compute a
 // hash_stream_384 will read the stream in chunks and compute a
 // hash from its contents
 // hash from its contents
-hash_stream_384 :: proc(s: io.Stream) -> ([48]byte, bool) {
-    hash: [48]byte
+hash_stream_384 :: proc(s: io.Stream) -> ([DIGEST_SIZE_384]byte, bool) {
+    hash: [DIGEST_SIZE_384]byte
     ctx: Blake512_Context
     ctx: Blake512_Context
     ctx.is384 = true
     ctx.is384 = true
     init(&ctx)
     init(&ctx)
@@ -173,7 +239,7 @@ hash_stream_384 :: proc(s: io.Stream) -> ([48]byte, bool) {
 
 
 // hash_file_384 will read the file provided by the given handle
 // hash_file_384 will read the file provided by the given handle
 // and compute a hash
 // and compute a hash
-hash_file_384 :: proc(hd: os.Handle, load_at_once := false) -> ([48]byte, bool) {
+hash_file_384 :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE_384]byte, bool) {
     if !load_at_once {
     if !load_at_once {
         return hash_stream_384(os.stream_from_handle(hd))
         return hash_stream_384(os.stream_from_handle(hd))
     } else {
     } else {
@@ -181,7 +247,7 @@ hash_file_384 :: proc(hd: os.Handle, load_at_once := false) -> ([48]byte, bool)
             return hash_bytes_384(buf[:]), ok
             return hash_bytes_384(buf[:]), ok
         }
         }
     }
     }
-    return [48]byte{}, false
+    return [DIGEST_SIZE_384]byte{}, false
 }
 }
 
 
 hash_384 :: proc {
 hash_384 :: proc {
@@ -189,18 +255,20 @@ hash_384 :: proc {
     hash_file_384,
     hash_file_384,
     hash_bytes_384,
     hash_bytes_384,
     hash_string_384,
     hash_string_384,
+    hash_bytes_to_buffer_384,
+    hash_string_to_buffer_384,
 }
 }
 
 
 // hash_string_512 will hash the given input and return the
 // hash_string_512 will hash the given input and return the
 // computed hash
 // computed hash
-hash_string_512 :: proc "contextless" (data: string) -> [64]byte {
+hash_string_512 :: proc "contextless" (data: string) -> [DIGEST_SIZE_512]byte {
     return hash_bytes_512(transmute([]byte)(data))
     return hash_bytes_512(transmute([]byte)(data))
 }
 }
 
 
 // hash_bytes_512 will hash the given input and return the
 // hash_bytes_512 will hash the given input and return the
 // computed hash
 // computed hash
-hash_bytes_512 :: proc "contextless" (data: []byte) -> [64]byte {
-    hash: [64]byte
+hash_bytes_512 :: proc "contextless" (data: []byte) -> [DIGEST_SIZE_512]byte {
+    hash: [DIGEST_SIZE_512]byte
     ctx: Blake512_Context
     ctx: Blake512_Context
     ctx.is384 = false
     ctx.is384 = false
     init(&ctx)
     init(&ctx)
@@ -209,10 +277,29 @@ hash_bytes_512 :: proc "contextless" (data: []byte) -> [64]byte {
     return hash
     return hash
 }
 }
 
 
+// hash_string_to_buffer_512 will hash the given input and assign the
+// computed hash to the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_string_to_buffer_512 :: proc(data: string, hash: []byte) {
+    hash_bytes_to_buffer_512(transmute([]byte)(data), hash)
+}
+
+// hash_bytes_to_buffer_512 will hash the given input and write the
+// computed hash into the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_bytes_to_buffer_512 :: proc(data, hash: []byte) {
+    assert(len(hash) >= DIGEST_SIZE_512, "Size of destination buffer is smaller than the digest size")
+    ctx: Blake512_Context
+    ctx.is384 = false
+    init(&ctx)
+    update(&ctx, data)
+    final(&ctx, hash)
+}
+
 // hash_stream_512 will read the stream in chunks and compute a
 // hash_stream_512 will read the stream in chunks and compute a
 // hash from its contents
 // hash from its contents
-hash_stream_512 :: proc(s: io.Stream) -> ([64]byte, bool) {
-    hash: [64]byte
+hash_stream_512 :: proc(s: io.Stream) -> ([DIGEST_SIZE_512]byte, bool) {
+    hash: [DIGEST_SIZE_512]byte
     ctx: Blake512_Context
     ctx: Blake512_Context
     ctx.is384 = false
     ctx.is384 = false
     init(&ctx)
     init(&ctx)
@@ -231,7 +318,7 @@ hash_stream_512 :: proc(s: io.Stream) -> ([64]byte, bool) {
 
 
 // hash_file_512 will read the file provided by the given handle
 // hash_file_512 will read the file provided by the given handle
 // and compute a hash
 // and compute a hash
-hash_file_512 :: proc(hd: os.Handle, load_at_once := false) -> ([64]byte, bool) {
+hash_file_512 :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE_512]byte, bool) {
     if !load_at_once {
     if !load_at_once {
         return hash_stream_512(os.stream_from_handle(hd))
         return hash_stream_512(os.stream_from_handle(hd))
     } else {
     } else {
@@ -239,7 +326,7 @@ hash_file_512 :: proc(hd: os.Handle, load_at_once := false) -> ([64]byte, bool)
             return hash_bytes_512(buf[:]), ok
             return hash_bytes_512(buf[:]), ok
         }
         }
     }
     }
-    return [64]byte{}, false
+    return [DIGEST_SIZE_512]byte{}, false
 }
 }
 
 
 hash_512 :: proc {
 hash_512 :: proc {
@@ -247,6 +334,8 @@ hash_512 :: proc {
     hash_file_512,
     hash_file_512,
     hash_bytes_512,
     hash_bytes_512,
     hash_string_512,
     hash_string_512,
+    hash_bytes_to_buffer_512,
+    hash_string_to_buffer_512,
 }
 }
 
 
 /*
 /*

+ 33 - 7
core/crypto/blake2b/blake2b.odin

@@ -20,16 +20,18 @@ import "../_blake2"
     High level API
     High level API
 */
 */
 
 
+DIGEST_SIZE :: 64
+
 // hash_string will hash the given input and return the
 // hash_string will hash the given input and return the
 // computed hash
 // computed hash
-hash_string :: proc(data: string) -> [64]byte {
+hash_string :: proc(data: string) -> [DIGEST_SIZE]byte {
     return hash_bytes(transmute([]byte)(data))
     return hash_bytes(transmute([]byte)(data))
 }
 }
 
 
 // hash_bytes will hash the given input and return the
 // hash_bytes will hash the given input and return the
 // computed hash
 // computed hash
-hash_bytes :: proc(data: []byte) -> [64]byte {
-    hash: [64]byte
+hash_bytes :: proc(data: []byte) -> [DIGEST_SIZE]byte {
+    hash: [DIGEST_SIZE]byte
     ctx: _blake2.Blake2b_Context
     ctx: _blake2.Blake2b_Context
     cfg: _blake2.Blake2_Config
     cfg: _blake2.Blake2_Config
     cfg.size = _blake2.BLAKE2B_SIZE
     cfg.size = _blake2.BLAKE2B_SIZE
@@ -40,10 +42,32 @@ hash_bytes :: proc(data: []byte) -> [64]byte {
     return hash
     return hash
 }
 }
 
 
+// hash_string_to_buffer will hash the given input and assign the
+// computed hash to the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_string_to_buffer :: proc(data: string, hash: []byte) {
+    hash_bytes_to_buffer(transmute([]byte)(data), hash)
+}
+
+// hash_bytes_to_buffer will hash the given input and write the
+// computed hash into the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_bytes_to_buffer :: proc(data, hash: []byte) {
+    assert(len(hash) >= DIGEST_SIZE, "Size of destination buffer is smaller than the digest size")
+    ctx: _blake2.Blake2b_Context
+    cfg: _blake2.Blake2_Config
+    cfg.size = _blake2.BLAKE2B_SIZE
+    ctx.cfg  = cfg
+    _blake2.init(&ctx)
+    _blake2.update(&ctx, data)
+    _blake2.final(&ctx, hash)
+}
+
+
 // hash_stream will read the stream in chunks and compute a
 // hash_stream will read the stream in chunks and compute a
 // hash from its contents
 // hash from its contents
-hash_stream :: proc(s: io.Stream) -> ([64]byte, bool) {
-    hash: [64]byte
+hash_stream :: proc(s: io.Stream) -> ([DIGEST_SIZE]byte, bool) {
+    hash: [DIGEST_SIZE]byte
     ctx: _blake2.Blake2b_Context
     ctx: _blake2.Blake2b_Context
     cfg: _blake2.Blake2_Config
     cfg: _blake2.Blake2_Config
     cfg.size = _blake2.BLAKE2B_SIZE
     cfg.size = _blake2.BLAKE2B_SIZE
@@ -64,7 +88,7 @@ hash_stream :: proc(s: io.Stream) -> ([64]byte, bool) {
 
 
 // hash_file will read the file provided by the given handle
 // hash_file will read the file provided by the given handle
 // and compute a hash
 // and compute a hash
-hash_file :: proc(hd: os.Handle, load_at_once := false) -> ([64]byte, bool) {
+hash_file :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE]byte, bool) {
     if !load_at_once {
     if !load_at_once {
         return hash_stream(os.stream_from_handle(hd))
         return hash_stream(os.stream_from_handle(hd))
     } else {
     } else {
@@ -72,7 +96,7 @@ hash_file :: proc(hd: os.Handle, load_at_once := false) -> ([64]byte, bool) {
             return hash_bytes(buf[:]), ok
             return hash_bytes(buf[:]), ok
         }
         }
     }
     }
-    return [64]byte{}, false
+    return [DIGEST_SIZE]byte{}, false
 }
 }
 
 
 hash :: proc {
 hash :: proc {
@@ -80,6 +104,8 @@ hash :: proc {
     hash_file,
     hash_file,
     hash_bytes,
     hash_bytes,
     hash_string,
     hash_string,
+    hash_bytes_to_buffer,
+    hash_string_to_buffer,
 }
 }
 
 
 /*
 /*

+ 33 - 7
core/crypto/blake2s/blake2s.odin

@@ -20,16 +20,18 @@ import "../_blake2"
     High level API
     High level API
 */
 */
 
 
+DIGEST_SIZE :: 32
+
 // hash_string will hash the given input and return the
 // hash_string will hash the given input and return the
 // computed hash
 // computed hash
-hash_string :: proc(data: string) -> [32]byte {
+hash_string :: proc(data: string) -> [DIGEST_SIZE]byte {
     return hash_bytes(transmute([]byte)(data))
     return hash_bytes(transmute([]byte)(data))
 }
 }
 
 
 // hash_bytes will hash the given input and return the
 // hash_bytes will hash the given input and return the
 // computed hash
 // computed hash
-hash_bytes :: proc(data: []byte) -> [32]byte {
-    hash: [32]byte
+hash_bytes :: proc(data: []byte) -> [DIGEST_SIZE]byte {
+    hash: [DIGEST_SIZE]byte
     ctx: _blake2.Blake2s_Context
     ctx: _blake2.Blake2s_Context
     cfg: _blake2.Blake2_Config
     cfg: _blake2.Blake2_Config
     cfg.size = _blake2.BLAKE2S_SIZE
     cfg.size = _blake2.BLAKE2S_SIZE
@@ -40,10 +42,32 @@ hash_bytes :: proc(data: []byte) -> [32]byte {
     return hash
     return hash
 }
 }
 
 
+
+// hash_string_to_buffer will hash the given input and assign the
+// computed hash to the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_string_to_buffer :: proc(data: string, hash: []byte) {
+    hash_bytes_to_buffer(transmute([]byte)(data), hash)
+}
+
+// hash_bytes_to_buffer will hash the given input and write the
+// computed hash into the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_bytes_to_buffer :: proc(data, hash: []byte) {
+    assert(len(hash) >= DIGEST_SIZE, "Size of destination buffer is smaller than the digest size")
+    ctx: _blake2.Blake2s_Context
+    cfg: _blake2.Blake2_Config
+    cfg.size = _blake2.BLAKE2S_SIZE
+    ctx.cfg  = cfg
+    _blake2.init(&ctx)
+    _blake2.update(&ctx, data)
+    _blake2.final(&ctx, hash)
+}
+
 // hash_stream will read the stream in chunks and compute a
 // hash_stream will read the stream in chunks and compute a
 // hash from its contents
 // hash from its contents
-hash_stream :: proc(s: io.Stream) -> ([32]byte, bool) {
-    hash: [32]byte
+hash_stream :: proc(s: io.Stream) -> ([DIGEST_SIZE]byte, bool) {
+    hash: [DIGEST_SIZE]byte
     ctx: _blake2.Blake2s_Context
     ctx: _blake2.Blake2s_Context
     cfg: _blake2.Blake2_Config
     cfg: _blake2.Blake2_Config
     cfg.size = _blake2.BLAKE2S_SIZE
     cfg.size = _blake2.BLAKE2S_SIZE
@@ -64,7 +88,7 @@ hash_stream :: proc(s: io.Stream) -> ([32]byte, bool) {
 
 
 // hash_file will read the file provided by the given handle
 // hash_file will read the file provided by the given handle
 // and compute a hash
 // and compute a hash
-hash_file :: proc(hd: os.Handle, load_at_once := false) -> ([32]byte, bool) {
+hash_file :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE]byte, bool) {
     if !load_at_once {
     if !load_at_once {
         return hash_stream(os.stream_from_handle(hd))
         return hash_stream(os.stream_from_handle(hd))
     } else {
     } else {
@@ -72,7 +96,7 @@ hash_file :: proc(hd: os.Handle, load_at_once := false) -> ([32]byte, bool) {
             return hash_bytes(buf[:]), ok
             return hash_bytes(buf[:]), ok
         }
         }
     }
     }
-    return [32]byte{}, false
+    return [DIGEST_SIZE]byte{}, false
 }
 }
 
 
 hash :: proc {
 hash :: proc {
@@ -80,6 +104,8 @@ hash :: proc {
     hash_file,
     hash_file,
     hash_bytes,
     hash_bytes,
     hash_string,
     hash_string,
+    hash_bytes_to_buffer,
+    hash_string_to_buffer,
 }
 }
 
 
 /*
 /*

+ 1 - 1
core/crypto/chacha20/chacha20.odin

@@ -346,7 +346,7 @@ _do_blocks :: proc (ctx: ^Context, dst, src: []byte, nr_blocks: int) {
 		// Until dedicated assembly can be written leverage the fact that
 		// Until dedicated assembly can be written leverage the fact that
 		// the callers of this routine ensure that src/dst are valid.
 		// the callers of this routine ensure that src/dst are valid.
 
 
-		when ODIN_ARCH == "386" || ODIN_ARCH == "amd64" {
+		when ODIN_ARCH == .i386 || ODIN_ARCH == .amd64 {
 			// util.PUT_U32_LE/util.U32_LE are not required on little-endian
 			// util.PUT_U32_LE/util.U32_LE are not required on little-endian
 			// systems that also happen to not be strict about aligned
 			// systems that also happen to not be strict about aligned
 			// memory access.
 			// memory access.

+ 29 - 7
core/crypto/gost/gost.odin

@@ -18,16 +18,18 @@ import "core:io"
     High level API
     High level API
 */
 */
 
 
+DIGEST_SIZE :: 32
+
 // hash_string will hash the given input and return the
 // hash_string will hash the given input and return the
 // computed hash
 // computed hash
-hash_string :: proc(data: string) -> [32]byte {
+hash_string :: proc(data: string) -> [DIGEST_SIZE]byte {
     return hash_bytes(transmute([]byte)(data))
     return hash_bytes(transmute([]byte)(data))
 }
 }
 
 
 // hash_bytes will hash the given input and return the
 // hash_bytes will hash the given input and return the
 // computed hash
 // computed hash
-hash_bytes :: proc(data: []byte) -> [32]byte {
-    hash: [32]byte
+hash_bytes :: proc(data: []byte) -> [DIGEST_SIZE]byte {
+    hash: [DIGEST_SIZE]byte
     ctx: Gost_Context
     ctx: Gost_Context
     init(&ctx)
     init(&ctx)
     update(&ctx, data)
     update(&ctx, data)
@@ -35,10 +37,28 @@ hash_bytes :: proc(data: []byte) -> [32]byte {
     return hash
     return hash
 }
 }
 
 
+// hash_string_to_buffer will hash the given input and assign the
+// computed hash to the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_string_to_buffer :: proc(data: string, hash: []byte) {
+    hash_bytes_to_buffer(transmute([]byte)(data), hash)
+}
+
+// hash_bytes_to_buffer will hash the given input and write the
+// computed hash into the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_bytes_to_buffer :: proc(data, hash: []byte) {
+    assert(len(hash) >= DIGEST_SIZE, "Size of destination buffer is smaller than the digest size")
+    ctx: Gost_Context
+    init(&ctx)
+    update(&ctx, data)
+    final(&ctx, hash)
+}
+
 // hash_stream will read the stream in chunks and compute a
 // hash_stream will read the stream in chunks and compute a
 // hash from its contents
 // hash from its contents
-hash_stream :: proc(s: io.Stream) -> ([32]byte, bool) {
-    hash: [32]byte
+hash_stream :: proc(s: io.Stream) -> ([DIGEST_SIZE]byte, bool) {
+    hash: [DIGEST_SIZE]byte
     ctx: Gost_Context
     ctx: Gost_Context
     init(&ctx)
     init(&ctx)
     buf := make([]byte, 512)
     buf := make([]byte, 512)
@@ -56,7 +76,7 @@ hash_stream :: proc(s: io.Stream) -> ([32]byte, bool) {
 
 
 // hash_file will read the file provided by the given handle
 // hash_file will read the file provided by the given handle
 // and compute a hash
 // and compute a hash
-hash_file :: proc(hd: os.Handle, load_at_once := false) -> ([32]byte, bool) {
+hash_file :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE]byte, bool) {
     if !load_at_once {
     if !load_at_once {
         return hash_stream(os.stream_from_handle(hd))
         return hash_stream(os.stream_from_handle(hd))
     } else {
     } else {
@@ -64,7 +84,7 @@ hash_file :: proc(hd: os.Handle, load_at_once := false) -> ([32]byte, bool) {
             return hash_bytes(buf[:]), ok
             return hash_bytes(buf[:]), ok
         }
         }
     }
     }
-    return [32]byte{}, false
+    return [DIGEST_SIZE]byte{}, false
 }
 }
 
 
 hash :: proc {
 hash :: proc {
@@ -72,6 +92,8 @@ hash :: proc {
     hash_file,
     hash_file,
     hash_bytes,
     hash_bytes,
     hash_string,
     hash_string,
+    hash_bytes_to_buffer,
+    hash_string_to_buffer,
 }
 }
 
 
 /*
 /*

+ 117 - 28
core/crypto/groestl/groestl.odin

@@ -17,16 +17,21 @@ import "core:io"
     High level API
     High level API
 */
 */
 
 
+DIGEST_SIZE_224 :: 28
+DIGEST_SIZE_256 :: 32
+DIGEST_SIZE_384 :: 48
+DIGEST_SIZE_512 :: 64
+
 // hash_string_224 will hash the given input and return the
 // hash_string_224 will hash the given input and return the
 // computed hash
 // computed hash
-hash_string_224 :: proc(data: string) -> [28]byte {
+hash_string_224 :: proc(data: string) -> [DIGEST_SIZE_224]byte {
     return hash_bytes_224(transmute([]byte)(data))
     return hash_bytes_224(transmute([]byte)(data))
 }
 }
 
 
 // hash_bytes_224 will hash the given input and return the
 // hash_bytes_224 will hash the given input and return the
 // computed hash
 // computed hash
-hash_bytes_224 :: proc(data: []byte) -> [28]byte {
-    hash: [28]byte
+hash_bytes_224 :: proc(data: []byte) -> [DIGEST_SIZE_224]byte {
+    hash: [DIGEST_SIZE_224]byte
     ctx: Groestl_Context
     ctx: Groestl_Context
     ctx.hashbitlen = 224
     ctx.hashbitlen = 224
     init(&ctx)
     init(&ctx)
@@ -35,10 +40,29 @@ hash_bytes_224 :: proc(data: []byte) -> [28]byte {
     return hash
     return hash
 }
 }
 
 
+// hash_string_to_buffer_224 will hash the given input and assign the
+// computed hash to the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_string_to_buffer_224 :: proc(data: string, hash: []byte) {
+    hash_bytes_to_buffer_224(transmute([]byte)(data), hash)
+}
+
+// hash_bytes_to_buffer_224 will hash the given input and write the
+// computed hash into the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_bytes_to_buffer_224 :: proc(data, hash: []byte) {
+    assert(len(hash) >= DIGEST_SIZE_224, "Size of destination buffer is smaller than the digest size")
+    ctx: Groestl_Context
+    ctx.hashbitlen = 224
+    init(&ctx)
+    update(&ctx, data)
+    final(&ctx, hash)
+}
+
 // hash_stream_224 will read the stream in chunks and compute a
 // hash_stream_224 will read the stream in chunks and compute a
 // hash from its contents
 // hash from its contents
-hash_stream_224 :: proc(s: io.Stream) -> ([28]byte, bool) {
-    hash: [28]byte
+hash_stream_224 :: proc(s: io.Stream) -> ([DIGEST_SIZE_224]byte, bool) {
+    hash: [DIGEST_SIZE_224]byte
     ctx: Groestl_Context
     ctx: Groestl_Context
     ctx.hashbitlen = 224
     ctx.hashbitlen = 224
     init(&ctx)
     init(&ctx)
@@ -57,7 +81,7 @@ hash_stream_224 :: proc(s: io.Stream) -> ([28]byte, bool) {
 
 
 // hash_file_224 will read the file provided by the given handle
 // hash_file_224 will read the file provided by the given handle
 // and compute a hash
 // and compute a hash
-hash_file_224 :: proc(hd: os.Handle, load_at_once := false) -> ([28]byte, bool) {
+hash_file_224 :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE_224]byte, bool) {
     if !load_at_once {
     if !load_at_once {
         return hash_stream_224(os.stream_from_handle(hd))
         return hash_stream_224(os.stream_from_handle(hd))
     } else {
     } else {
@@ -65,7 +89,7 @@ hash_file_224 :: proc(hd: os.Handle, load_at_once := false) -> ([28]byte, bool)
             return hash_bytes_224(buf[:]), ok
             return hash_bytes_224(buf[:]), ok
         }
         }
     }
     }
-    return [28]byte{}, false
+    return [DIGEST_SIZE_224]byte{}, false
 }
 }
 
 
 hash_224 :: proc {
 hash_224 :: proc {
@@ -73,18 +97,20 @@ hash_224 :: proc {
     hash_file_224,
     hash_file_224,
     hash_bytes_224,
     hash_bytes_224,
     hash_string_224,
     hash_string_224,
+    hash_bytes_to_buffer_224,
+    hash_string_to_buffer_224,
 }
 }
 
 
 // hash_string_256 will hash the given input and return the
 // hash_string_256 will hash the given input and return the
 // computed hash
 // computed hash
-hash_string_256 :: proc(data: string) -> [32]byte {
+hash_string_256 :: proc(data: string) -> [DIGEST_SIZE_256]byte {
     return hash_bytes_256(transmute([]byte)(data))
     return hash_bytes_256(transmute([]byte)(data))
 }
 }
 
 
 // hash_bytes_256 will hash the given input and return the
 // hash_bytes_256 will hash the given input and return the
 // computed hash
 // computed hash
-hash_bytes_256 :: proc(data: []byte) -> [32]byte {
-    hash: [32]byte
+hash_bytes_256 :: proc(data: []byte) -> [DIGEST_SIZE_256]byte {
+    hash: [DIGEST_SIZE_256]byte
     ctx: Groestl_Context
     ctx: Groestl_Context
     ctx.hashbitlen = 256
     ctx.hashbitlen = 256
     init(&ctx)
     init(&ctx)
@@ -93,10 +119,29 @@ hash_bytes_256 :: proc(data: []byte) -> [32]byte {
     return hash
     return hash
 }
 }
 
 
+// hash_string_to_buffer_256 will hash the given input and assign the
+// computed hash to the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_string_to_buffer_256 :: proc(data: string, hash: []byte) {
+    hash_bytes_to_buffer_256(transmute([]byte)(data), hash)
+}
+
+// hash_bytes_to_buffer_256 will hash the given input and write the
+// computed hash into the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_bytes_to_buffer_256 :: proc(data, hash: []byte) {
+    assert(len(hash) >= DIGEST_SIZE_256, "Size of destination buffer is smaller than the digest size")
+    ctx: Groestl_Context
+    ctx.hashbitlen = 256
+    init(&ctx)
+    update(&ctx, data)
+    final(&ctx, hash)
+}
+
 // hash_stream_256 will read the stream in chunks and compute a
 // hash_stream_256 will read the stream in chunks and compute a
 // hash from its contents
 // hash from its contents
-hash_stream_256 :: proc(s: io.Stream) -> ([32]byte, bool) {
-    hash: [32]byte
+hash_stream_256 :: proc(s: io.Stream) -> ([DIGEST_SIZE_256]byte, bool) {
+    hash: [DIGEST_SIZE_256]byte
     ctx: Groestl_Context
     ctx: Groestl_Context
     ctx.hashbitlen = 256
     ctx.hashbitlen = 256
     init(&ctx)
     init(&ctx)
@@ -115,7 +160,7 @@ hash_stream_256 :: proc(s: io.Stream) -> ([32]byte, bool) {
 
 
 // hash_file_256 will read the file provided by the given handle
 // hash_file_256 will read the file provided by the given handle
 // and compute a hash
 // and compute a hash
-hash_file_256 :: proc(hd: os.Handle, load_at_once := false) -> ([32]byte, bool) {
+hash_file_256 :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE_256]byte, bool) {
     if !load_at_once {
     if !load_at_once {
         return hash_stream_256(os.stream_from_handle(hd))
         return hash_stream_256(os.stream_from_handle(hd))
     } else {
     } else {
@@ -123,7 +168,7 @@ hash_file_256 :: proc(hd: os.Handle, load_at_once := false) -> ([32]byte, bool)
             return hash_bytes_256(buf[:]), ok
             return hash_bytes_256(buf[:]), ok
         }
         }
     }
     }
-    return [32]byte{}, false
+    return [DIGEST_SIZE_256]byte{}, false
 }
 }
 
 
 hash_256 :: proc {
 hash_256 :: proc {
@@ -131,18 +176,20 @@ hash_256 :: proc {
     hash_file_256,
     hash_file_256,
     hash_bytes_256,
     hash_bytes_256,
     hash_string_256,
     hash_string_256,
+    hash_bytes_to_buffer_256,
+    hash_string_to_buffer_256,
 }
 }
 
 
 // hash_string_384 will hash the given input and return the
 // hash_string_384 will hash the given input and return the
 // computed hash
 // computed hash
-hash_string_384 :: proc(data: string) -> [48]byte {
+hash_string_384 :: proc(data: string) -> [DIGEST_SIZE_384]byte {
     return hash_bytes_384(transmute([]byte)(data))
     return hash_bytes_384(transmute([]byte)(data))
 }
 }
 
 
 // hash_bytes_384 will hash the given input and return the
 // hash_bytes_384 will hash the given input and return the
 // computed hash
 // computed hash
-hash_bytes_384 :: proc(data: []byte) -> [48]byte {
-    hash: [48]byte
+hash_bytes_384 :: proc(data: []byte) -> [DIGEST_SIZE_384]byte {
+    hash: [DIGEST_SIZE_384]byte
     ctx: Groestl_Context
     ctx: Groestl_Context
     ctx.hashbitlen = 384
     ctx.hashbitlen = 384
     init(&ctx)
     init(&ctx)
@@ -151,10 +198,29 @@ hash_bytes_384 :: proc(data: []byte) -> [48]byte {
     return hash
     return hash
 }
 }
 
 
+// hash_string_to_buffer_384 will hash the given input and assign the
+// computed hash to the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_string_to_buffer_384 :: proc(data: string, hash: []byte) {
+    hash_bytes_to_buffer_384(transmute([]byte)(data), hash)
+}
+
+// hash_bytes_to_buffer_384 will hash the given input and write the
+// computed hash into the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_bytes_to_buffer_384 :: proc(data, hash: []byte) {
+    assert(len(hash) >= DIGEST_SIZE_384, "Size of destination buffer is smaller than the digest size")
+    ctx: Groestl_Context
+    ctx.hashbitlen = 384
+    init(&ctx)
+    update(&ctx, data)
+    final(&ctx, hash)
+}
+
 // hash_stream_384 will read the stream in chunks and compute a
 // hash_stream_384 will read the stream in chunks and compute a
 // hash from its contents
 // hash from its contents
-hash_stream_384 :: proc(s: io.Stream) -> ([48]byte, bool) {
-    hash: [48]byte
+hash_stream_384 :: proc(s: io.Stream) -> ([DIGEST_SIZE_384]byte, bool) {
+    hash: [DIGEST_SIZE_384]byte
     ctx: Groestl_Context
     ctx: Groestl_Context
     ctx.hashbitlen = 384
     ctx.hashbitlen = 384
     init(&ctx)
     init(&ctx)
@@ -173,7 +239,7 @@ hash_stream_384 :: proc(s: io.Stream) -> ([48]byte, bool) {
 
 
 // hash_file_384 will read the file provided by the given handle
 // hash_file_384 will read the file provided by the given handle
 // and compute a hash
 // and compute a hash
-hash_file_384 :: proc(hd: os.Handle, load_at_once := false) -> ([48]byte, bool) {
+hash_file_384 :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE_384]byte, bool) {
     if !load_at_once {
     if !load_at_once {
         return hash_stream_384(os.stream_from_handle(hd))
         return hash_stream_384(os.stream_from_handle(hd))
     } else {
     } else {
@@ -181,7 +247,7 @@ hash_file_384 :: proc(hd: os.Handle, load_at_once := false) -> ([48]byte, bool)
             return hash_bytes_384(buf[:]), ok
             return hash_bytes_384(buf[:]), ok
         }
         }
     }
     }
-    return [48]byte{}, false
+    return [DIGEST_SIZE_384]byte{}, false
 }
 }
 
 
 hash_384 :: proc {
 hash_384 :: proc {
@@ -189,18 +255,20 @@ hash_384 :: proc {
     hash_file_384,
     hash_file_384,
     hash_bytes_384,
     hash_bytes_384,
     hash_string_384,
     hash_string_384,
+    hash_bytes_to_buffer_384,
+    hash_string_to_buffer_384,
 }
 }
 
 
 // hash_string_512 will hash the given input and return the
 // hash_string_512 will hash the given input and return the
 // computed hash
 // computed hash
-hash_string_512 :: proc(data: string) -> [64]byte {
+hash_string_512 :: proc(data: string) -> [DIGEST_SIZE_512]byte {
     return hash_bytes_512(transmute([]byte)(data))
     return hash_bytes_512(transmute([]byte)(data))
 }
 }
 
 
 // hash_bytes_512 will hash the given input and return the
 // hash_bytes_512 will hash the given input and return the
 // computed hash
 // computed hash
-hash_bytes_512 :: proc(data: []byte) -> [64]byte {
-    hash: [64]byte
+hash_bytes_512 :: proc(data: []byte) -> [DIGEST_SIZE_512]byte {
+    hash: [DIGEST_SIZE_512]byte
     ctx: Groestl_Context
     ctx: Groestl_Context
     ctx.hashbitlen = 512
     ctx.hashbitlen = 512
     init(&ctx)
     init(&ctx)
@@ -209,10 +277,29 @@ hash_bytes_512 :: proc(data: []byte) -> [64]byte {
     return hash
     return hash
 }
 }
 
 
+// hash_string_to_buffer_512 will hash the given input and assign the
+// computed hash to the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_string_to_buffer_512 :: proc(data: string, hash: []byte) {
+    hash_bytes_to_buffer_512(transmute([]byte)(data), hash)
+}
+
+// hash_bytes_to_buffer_512 will hash the given input and write the
+// computed hash into the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_bytes_to_buffer_512 :: proc(data, hash: []byte) {
+    assert(len(hash) >= DIGEST_SIZE_512, "Size of destination buffer is smaller than the digest size")
+    ctx: Groestl_Context
+    ctx.hashbitlen = 512
+    init(&ctx)
+    update(&ctx, data)
+    final(&ctx, hash)
+}
+
 // hash_stream_512 will read the stream in chunks and compute a
 // hash_stream_512 will read the stream in chunks and compute a
 // hash from its contents
 // hash from its contents
-hash_stream_512 :: proc(s: io.Stream) -> ([64]byte, bool) {
-    hash: [64]byte
+hash_stream_512 :: proc(s: io.Stream) -> ([DIGEST_SIZE_512]byte, bool) {
+    hash: [DIGEST_SIZE_512]byte
     ctx: Groestl_Context
     ctx: Groestl_Context
     ctx.hashbitlen = 512
     ctx.hashbitlen = 512
     init(&ctx)
     init(&ctx)
@@ -231,7 +318,7 @@ hash_stream_512 :: proc(s: io.Stream) -> ([64]byte, bool) {
 
 
 // hash_file_512 will read the file provided by the given handle
 // hash_file_512 will read the file provided by the given handle
 // and compute a hash
 // and compute a hash
-hash_file_512 :: proc(hd: os.Handle, load_at_once := false) -> ([64]byte, bool) {
+hash_file_512 :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE_512]byte, bool) {
     if !load_at_once {
     if !load_at_once {
         return hash_stream_512(os.stream_from_handle(hd))
         return hash_stream_512(os.stream_from_handle(hd))
     } else {
     } else {
@@ -239,7 +326,7 @@ hash_file_512 :: proc(hd: os.Handle, load_at_once := false) -> ([64]byte, bool)
             return hash_bytes_512(buf[:]), ok
             return hash_bytes_512(buf[:]), ok
         }
         }
     }
     }
-    return [64]byte{}, false
+    return [DIGEST_SIZE_512]byte{}, false
 }
 }
 
 
 hash_512 :: proc {
 hash_512 :: proc {
@@ -247,6 +334,8 @@ hash_512 :: proc {
     hash_file_512,
     hash_file_512,
     hash_bytes_512,
     hash_bytes_512,
     hash_string_512,
     hash_string_512,
+    hash_bytes_to_buffer_512,
+    hash_string_to_buffer_512,
 }
 }
 
 
 /*
 /*

File diff suppressed because it is too large
+ 400 - 95
core/crypto/haval/haval.odin


+ 117 - 28
core/crypto/jh/jh.odin

@@ -17,16 +17,21 @@ import "core:io"
     High level API
     High level API
 */
 */
 
 
+DIGEST_SIZE_224 :: 28
+DIGEST_SIZE_256 :: 32
+DIGEST_SIZE_384 :: 48
+DIGEST_SIZE_512 :: 64
+
 // hash_string_224 will hash the given input and return the
 // hash_string_224 will hash the given input and return the
 // computed hash
 // computed hash
-hash_string_224 :: proc(data: string) -> [28]byte {
+hash_string_224 :: proc(data: string) -> [DIGEST_SIZE_224]byte {
     return hash_bytes_224(transmute([]byte)(data))
     return hash_bytes_224(transmute([]byte)(data))
 }
 }
 
 
 // hash_bytes_224 will hash the given input and return the
 // hash_bytes_224 will hash the given input and return the
 // computed hash
 // computed hash
-hash_bytes_224 :: proc(data: []byte) -> [28]byte {
-    hash: [28]byte
+hash_bytes_224 :: proc(data: []byte) -> [DIGEST_SIZE_224]byte {
+    hash: [DIGEST_SIZE_224]byte
     ctx: Jh_Context
     ctx: Jh_Context
     ctx.hashbitlen = 224
     ctx.hashbitlen = 224
     init(&ctx)
     init(&ctx)
@@ -35,10 +40,29 @@ hash_bytes_224 :: proc(data: []byte) -> [28]byte {
     return hash
     return hash
 }
 }
 
 
+// hash_string_to_buffer_224 will hash the given input and assign the
+// computed hash to the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_string_to_buffer_224 :: proc(data: string, hash: []byte) {
+    hash_bytes_to_buffer_224(transmute([]byte)(data), hash)
+}
+
+// hash_bytes_to_buffer_224 will hash the given input and write the
+// computed hash into the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_bytes_to_buffer_224 :: proc(data, hash: []byte) {
+    assert(len(hash) >= DIGEST_SIZE_224, "Size of destination buffer is smaller than the digest size")
+    ctx: Jh_Context
+    ctx.hashbitlen = 224
+    init(&ctx)
+    update(&ctx, data)
+    final(&ctx, hash)
+}
+
 // hash_stream_224 will read the stream in chunks and compute a
 // hash_stream_224 will read the stream in chunks and compute a
 // hash from its contents
 // hash from its contents
-hash_stream_224 :: proc(s: io.Stream) -> ([28]byte, bool) {
-    hash: [28]byte
+hash_stream_224 :: proc(s: io.Stream) -> ([DIGEST_SIZE_224]byte, bool) {
+    hash: [DIGEST_SIZE_224]byte
     ctx: Jh_Context
     ctx: Jh_Context
     ctx.hashbitlen = 224
     ctx.hashbitlen = 224
     init(&ctx)
     init(&ctx)
@@ -57,7 +81,7 @@ hash_stream_224 :: proc(s: io.Stream) -> ([28]byte, bool) {
 
 
 // hash_file_224 will read the file provided by the given handle
 // hash_file_224 will read the file provided by the given handle
 // and compute a hash
 // and compute a hash
-hash_file_224 :: proc(hd: os.Handle, load_at_once := false) -> ([28]byte, bool) {
+hash_file_224 :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE_224]byte, bool) {
     if !load_at_once {
     if !load_at_once {
         return hash_stream_224(os.stream_from_handle(hd))
         return hash_stream_224(os.stream_from_handle(hd))
     } else {
     } else {
@@ -65,7 +89,7 @@ hash_file_224 :: proc(hd: os.Handle, load_at_once := false) -> ([28]byte, bool)
             return hash_bytes_224(buf[:]), ok
             return hash_bytes_224(buf[:]), ok
         }
         }
     }
     }
-    return [28]byte{}, false
+    return [DIGEST_SIZE_224]byte{}, false
 }
 }
 
 
 hash_224 :: proc {
 hash_224 :: proc {
@@ -73,18 +97,20 @@ hash_224 :: proc {
     hash_file_224,
     hash_file_224,
     hash_bytes_224,
     hash_bytes_224,
     hash_string_224,
     hash_string_224,
+    hash_bytes_to_buffer_224,
+    hash_string_to_buffer_224,
 }
 }
 
 
 // hash_string_256 will hash the given input and return the
 // hash_string_256 will hash the given input and return the
 // computed hash
 // computed hash
-hash_string_256 :: proc(data: string) -> [32]byte {
+hash_string_256 :: proc(data: string) -> [DIGEST_SIZE_256]byte {
     return hash_bytes_256(transmute([]byte)(data))
     return hash_bytes_256(transmute([]byte)(data))
 }
 }
 
 
 // hash_bytes_256 will hash the given input and return the
 // hash_bytes_256 will hash the given input and return the
 // computed hash
 // computed hash
-hash_bytes_256 :: proc(data: []byte) -> [32]byte {
-    hash: [32]byte
+hash_bytes_256 :: proc(data: []byte) -> [DIGEST_SIZE_256]byte {
+    hash: [DIGEST_SIZE_256]byte
     ctx: Jh_Context
     ctx: Jh_Context
     ctx.hashbitlen = 256
     ctx.hashbitlen = 256
     init(&ctx)
     init(&ctx)
@@ -93,10 +119,29 @@ hash_bytes_256 :: proc(data: []byte) -> [32]byte {
     return hash
     return hash
 }
 }
 
 
+// hash_string_to_buffer_256 will hash the given input and assign the
+// computed hash to the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_string_to_buffer_256 :: proc(data: string, hash: []byte) {
+    hash_bytes_to_buffer_256(transmute([]byte)(data), hash)
+}
+
+// hash_bytes_to_buffer_256 will hash the given input and write the
+// computed hash into the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_bytes_to_buffer_256 :: proc(data, hash: []byte) {
+    assert(len(hash) >= DIGEST_SIZE_256, "Size of destination buffer is smaller than the digest size")
+    ctx: Jh_Context
+    ctx.hashbitlen = 256
+    init(&ctx)
+    update(&ctx, data)
+    final(&ctx, hash)
+}
+
 // hash_stream_256 will read the stream in chunks and compute a
 // hash_stream_256 will read the stream in chunks and compute a
 // hash from its contents
 // hash from its contents
-hash_stream_256 :: proc(s: io.Stream) -> ([32]byte, bool) {
-    hash: [32]byte
+hash_stream_256 :: proc(s: io.Stream) -> ([DIGEST_SIZE_256]byte, bool) {
+    hash: [DIGEST_SIZE_256]byte
     ctx: Jh_Context
     ctx: Jh_Context
     ctx.hashbitlen = 256
     ctx.hashbitlen = 256
     init(&ctx)
     init(&ctx)
@@ -115,7 +160,7 @@ hash_stream_256 :: proc(s: io.Stream) -> ([32]byte, bool) {
 
 
 // hash_file_256 will read the file provided by the given handle
 // hash_file_256 will read the file provided by the given handle
 // and compute a hash
 // and compute a hash
-hash_file_256 :: proc(hd: os.Handle, load_at_once := false) -> ([32]byte, bool) {
+hash_file_256 :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE_256]byte, bool) {
     if !load_at_once {
     if !load_at_once {
         return hash_stream_256(os.stream_from_handle(hd))
         return hash_stream_256(os.stream_from_handle(hd))
     } else {
     } else {
@@ -123,7 +168,7 @@ hash_file_256 :: proc(hd: os.Handle, load_at_once := false) -> ([32]byte, bool)
             return hash_bytes_256(buf[:]), ok
             return hash_bytes_256(buf[:]), ok
         }
         }
     }
     }
-    return [32]byte{}, false
+    return [DIGEST_SIZE_256]byte{}, false
 }
 }
 
 
 hash_256 :: proc {
 hash_256 :: proc {
@@ -131,18 +176,20 @@ hash_256 :: proc {
     hash_file_256,
     hash_file_256,
     hash_bytes_256,
     hash_bytes_256,
     hash_string_256,
     hash_string_256,
+    hash_bytes_to_buffer_256,
+    hash_string_to_buffer_256,
 }
 }
 
 
 // hash_string_384 will hash the given input and return the
 // hash_string_384 will hash the given input and return the
 // computed hash
 // computed hash
-hash_string_384 :: proc(data: string) -> [48]byte {
+hash_string_384 :: proc(data: string) -> [DIGEST_SIZE_384]byte {
     return hash_bytes_384(transmute([]byte)(data))
     return hash_bytes_384(transmute([]byte)(data))
 }
 }
 
 
 // hash_bytes_384 will hash the given input and return the
 // hash_bytes_384 will hash the given input and return the
 // computed hash
 // computed hash
-hash_bytes_384 :: proc(data: []byte) -> [48]byte {
-    hash: [48]byte
+hash_bytes_384 :: proc(data: []byte) -> [DIGEST_SIZE_384]byte {
+    hash: [DIGEST_SIZE_384]byte
     ctx: Jh_Context
     ctx: Jh_Context
     ctx.hashbitlen = 384
     ctx.hashbitlen = 384
     init(&ctx)
     init(&ctx)
@@ -151,10 +198,29 @@ hash_bytes_384 :: proc(data: []byte) -> [48]byte {
     return hash
     return hash
 }
 }
 
 
+// hash_string_to_buffer_384 will hash the given input and assign the
+// computed hash to the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_string_to_buffer_384 :: proc(data: string, hash: []byte) {
+    hash_bytes_to_buffer_384(transmute([]byte)(data), hash)
+}
+
+// hash_bytes_to_buffer_384 will hash the given input and write the
+// computed hash into the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_bytes_to_buffer_384 :: proc(data, hash: []byte) {
+    assert(len(hash) >= DIGEST_SIZE_384, "Size of destination buffer is smaller than the digest size")
+    ctx: Jh_Context
+    ctx.hashbitlen = 384
+    init(&ctx)
+    update(&ctx, data)
+    final(&ctx, hash)
+}
+
 // hash_stream_384 will read the stream in chunks and compute a
 // hash_stream_384 will read the stream in chunks and compute a
 // hash from its contents
 // hash from its contents
-hash_stream_384 :: proc(s: io.Stream) -> ([48]byte, bool) {
-    hash: [48]byte
+hash_stream_384 :: proc(s: io.Stream) -> ([DIGEST_SIZE_384]byte, bool) {
+    hash: [DIGEST_SIZE_384]byte
     ctx: Jh_Context
     ctx: Jh_Context
     ctx.hashbitlen = 384
     ctx.hashbitlen = 384
     init(&ctx)
     init(&ctx)
@@ -173,7 +239,7 @@ hash_stream_384 :: proc(s: io.Stream) -> ([48]byte, bool) {
 
 
 // hash_file_384 will read the file provided by the given handle
 // hash_file_384 will read the file provided by the given handle
 // and compute a hash
 // and compute a hash
-hash_file_384 :: proc(hd: os.Handle, load_at_once := false) -> ([48]byte, bool) {
+hash_file_384 :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE_384]byte, bool) {
     if !load_at_once {
     if !load_at_once {
         return hash_stream_384(os.stream_from_handle(hd))
         return hash_stream_384(os.stream_from_handle(hd))
     } else {
     } else {
@@ -181,7 +247,7 @@ hash_file_384 :: proc(hd: os.Handle, load_at_once := false) -> ([48]byte, bool)
             return hash_bytes_384(buf[:]), ok
             return hash_bytes_384(buf[:]), ok
         }
         }
     }
     }
-    return [48]byte{}, false
+    return [DIGEST_SIZE_384]byte{}, false
 }
 }
 
 
 hash_384 :: proc {
 hash_384 :: proc {
@@ -189,18 +255,20 @@ hash_384 :: proc {
     hash_file_384,
     hash_file_384,
     hash_bytes_384,
     hash_bytes_384,
     hash_string_384,
     hash_string_384,
+    hash_bytes_to_buffer_384,
+    hash_string_to_buffer_384,
 }
 }
 
 
 // hash_string_512 will hash the given input and return the
 // hash_string_512 will hash the given input and return the
 // computed hash
 // computed hash
-hash_string_512 :: proc(data: string) -> [64]byte {
+hash_string_512 :: proc(data: string) -> [DIGEST_SIZE_512]byte {
     return hash_bytes_512(transmute([]byte)(data))
     return hash_bytes_512(transmute([]byte)(data))
 }
 }
 
 
 // hash_bytes_512 will hash the given input and return the
 // hash_bytes_512 will hash the given input and return the
 // computed hash
 // computed hash
-hash_bytes_512 :: proc(data: []byte) -> [64]byte {
-    hash: [64]byte
+hash_bytes_512 :: proc(data: []byte) -> [DIGEST_SIZE_512]byte {
+    hash: [DIGEST_SIZE_512]byte
     ctx: Jh_Context
     ctx: Jh_Context
     ctx.hashbitlen = 512
     ctx.hashbitlen = 512
     init(&ctx)
     init(&ctx)
@@ -209,10 +277,29 @@ hash_bytes_512 :: proc(data: []byte) -> [64]byte {
     return hash
     return hash
 }
 }
 
 
+// hash_string_to_buffer_512 will hash the given input and assign the
+// computed hash to the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_string_to_buffer_512 :: proc(data: string, hash: []byte) {
+    hash_bytes_to_buffer_512(transmute([]byte)(data), hash)
+}
+
+// hash_bytes_to_buffer_512 will hash the given input and write the
+// computed hash into the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_bytes_to_buffer_512 :: proc(data, hash: []byte) {
+    assert(len(hash) >= DIGEST_SIZE_512, "Size of destination buffer is smaller than the digest size")
+    ctx: Jh_Context
+    ctx.hashbitlen = 512
+    init(&ctx)
+    update(&ctx, data)
+    final(&ctx, hash)
+}
+
 // hash_stream_512 will read the stream in chunks and compute a
 // hash_stream_512 will read the stream in chunks and compute a
 // hash from its contents
 // hash from its contents
-hash_stream_512 :: proc(s: io.Stream) -> ([64]byte, bool) {
-    hash: [64]byte
+hash_stream_512 :: proc(s: io.Stream) -> ([DIGEST_SIZE_512]byte, bool) {
+    hash: [DIGEST_SIZE_512]byte
     ctx: Jh_Context
     ctx: Jh_Context
     ctx.hashbitlen = 512
     ctx.hashbitlen = 512
     init(&ctx)
     init(&ctx)
@@ -231,7 +318,7 @@ hash_stream_512 :: proc(s: io.Stream) -> ([64]byte, bool) {
 
 
 // hash_file_512 will read the file provided by the given handle
 // hash_file_512 will read the file provided by the given handle
 // and compute a hash
 // and compute a hash
-hash_file_512 :: proc(hd: os.Handle, load_at_once := false) -> ([64]byte, bool) {
+hash_file_512 :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE_512]byte, bool) {
     if !load_at_once {
     if !load_at_once {
         return hash_stream_512(os.stream_from_handle(hd))
         return hash_stream_512(os.stream_from_handle(hd))
     } else {
     } else {
@@ -239,7 +326,7 @@ hash_file_512 :: proc(hd: os.Handle, load_at_once := false) -> ([64]byte, bool)
             return hash_bytes_512(buf[:]), ok
             return hash_bytes_512(buf[:]), ok
         }
         }
     }
     }
-    return [64]byte{}, false
+    return [DIGEST_SIZE_512]byte{}, false
 }
 }
 
 
 hash_512 :: proc {
 hash_512 :: proc {
@@ -247,6 +334,8 @@ hash_512 :: proc {
     hash_file_512,
     hash_file_512,
     hash_bytes_512,
     hash_bytes_512,
     hash_string_512,
     hash_string_512,
+    hash_bytes_to_buffer_512,
+    hash_string_to_buffer_512,
 }
 }
 
 
 /*
 /*

+ 130 - 37
core/crypto/keccak/keccak.odin

@@ -21,18 +21,23 @@ import "../_sha3"
     High level API
     High level API
 */
 */
 
 
+DIGEST_SIZE_224 :: 28
+DIGEST_SIZE_256 :: 32
+DIGEST_SIZE_384 :: 48
+DIGEST_SIZE_512 :: 64
+
 // hash_string_224 will hash the given input and return the
 // hash_string_224 will hash the given input and return the
 // computed hash
 // computed hash
-hash_string_224 :: proc(data: string) -> [28]byte {
+hash_string_224 :: proc(data: string) -> [DIGEST_SIZE_224]byte {
     return hash_bytes_224(transmute([]byte)(data))
     return hash_bytes_224(transmute([]byte)(data))
 }
 }
 
 
 // hash_bytes_224 will hash the given input and return the
 // hash_bytes_224 will hash the given input and return the
 // computed hash
 // computed hash
-hash_bytes_224 :: proc(data: []byte) -> [28]byte {
-    hash: [28]byte
+hash_bytes_224 :: proc(data: []byte) -> [DIGEST_SIZE_224]byte {
+    hash: [DIGEST_SIZE_224]byte
     ctx: _sha3.Sha3_Context
     ctx: _sha3.Sha3_Context
-    ctx.mdlen = 28
+    ctx.mdlen = DIGEST_SIZE_224
     ctx.is_keccak = true
     ctx.is_keccak = true
     _sha3.init(&ctx)
     _sha3.init(&ctx)
     _sha3.update(&ctx, data)
     _sha3.update(&ctx, data)
@@ -40,12 +45,32 @@ hash_bytes_224 :: proc(data: []byte) -> [28]byte {
     return hash
     return hash
 }
 }
 
 
+// hash_string_to_buffer_224 will hash the given input and assign the
+// computed hash to the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_string_to_buffer_224 :: proc(data: string, hash: []byte) {
+    hash_bytes_to_buffer_224(transmute([]byte)(data), hash)
+}
+
+// hash_bytes_to_buffer_224 will hash the given input and write the
+// computed hash into the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_bytes_to_buffer_224 :: proc(data, hash: []byte) {
+    assert(len(hash) >= DIGEST_SIZE_224, "Size of destination buffer is smaller than the digest size")
+    ctx: _sha3.Sha3_Context
+    ctx.mdlen = DIGEST_SIZE_224
+    ctx.is_keccak = true
+    _sha3.init(&ctx)
+    _sha3.update(&ctx, data)
+    _sha3.final(&ctx, hash)
+}
+
 // hash_stream_224 will read the stream in chunks and compute a
 // hash_stream_224 will read the stream in chunks and compute a
 // hash from its contents
 // hash from its contents
-hash_stream_224 :: proc(s: io.Stream) -> ([28]byte, bool) {
-    hash: [28]byte
+hash_stream_224 :: proc(s: io.Stream) -> ([DIGEST_SIZE_224]byte, bool) {
+    hash: [DIGEST_SIZE_224]byte
     ctx: _sha3.Sha3_Context
     ctx: _sha3.Sha3_Context
-    ctx.mdlen = 28
+    ctx.mdlen = DIGEST_SIZE_224
     ctx.is_keccak = true
     ctx.is_keccak = true
     _sha3.init(&ctx)
     _sha3.init(&ctx)
     buf := make([]byte, 512)
     buf := make([]byte, 512)
@@ -63,7 +88,7 @@ hash_stream_224 :: proc(s: io.Stream) -> ([28]byte, bool) {
 
 
 // hash_file_224 will read the file provided by the given handle
 // hash_file_224 will read the file provided by the given handle
 // and compute a hash
 // and compute a hash
-hash_file_224 :: proc(hd: os.Handle, load_at_once := false) -> ([28]byte, bool) {
+hash_file_224 :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE_224]byte, bool) {
     if !load_at_once {
     if !load_at_once {
         return hash_stream_224(os.stream_from_handle(hd))
         return hash_stream_224(os.stream_from_handle(hd))
     } else {
     } else {
@@ -71,7 +96,7 @@ hash_file_224 :: proc(hd: os.Handle, load_at_once := false) -> ([28]byte, bool)
             return hash_bytes_224(buf[:]), ok
             return hash_bytes_224(buf[:]), ok
         }
         }
     }
     }
-    return [28]byte{}, false
+    return [DIGEST_SIZE_224]byte{}, false
 }
 }
 
 
 hash_224 :: proc {
 hash_224 :: proc {
@@ -79,20 +104,22 @@ hash_224 :: proc {
     hash_file_224,
     hash_file_224,
     hash_bytes_224,
     hash_bytes_224,
     hash_string_224,
     hash_string_224,
+    hash_bytes_to_buffer_224,
+    hash_string_to_buffer_224,
 }
 }
 
 
 // hash_string_256 will hash the given input and return the
 // hash_string_256 will hash the given input and return the
 // computed hash
 // computed hash
-hash_string_256 :: proc(data: string) -> [32]byte {
+hash_string_256 :: proc(data: string) -> [DIGEST_SIZE_256]byte {
     return hash_bytes_256(transmute([]byte)(data))
     return hash_bytes_256(transmute([]byte)(data))
 }
 }
 
 
 // hash_bytes_256 will hash the given input and return the
 // hash_bytes_256 will hash the given input and return the
 // computed hash
 // computed hash
-hash_bytes_256 :: proc(data: []byte) -> [32]byte {
-    hash: [32]byte
+hash_bytes_256 :: proc(data: []byte) -> [DIGEST_SIZE_256]byte {
+    hash: [DIGEST_SIZE_256]byte
     ctx: _sha3.Sha3_Context
     ctx: _sha3.Sha3_Context
-    ctx.mdlen = 32
+    ctx.mdlen = DIGEST_SIZE_256
     ctx.is_keccak = true
     ctx.is_keccak = true
     _sha3.init(&ctx)
     _sha3.init(&ctx)
     _sha3.update(&ctx, data)
     _sha3.update(&ctx, data)
@@ -100,12 +127,32 @@ hash_bytes_256 :: proc(data: []byte) -> [32]byte {
     return hash
     return hash
 }
 }
 
 
+// hash_string_to_buffer_256 will hash the given input and assign the
+// computed hash to the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_string_to_buffer_256 :: proc(data: string, hash: []byte) {
+    hash_bytes_to_buffer_256(transmute([]byte)(data), hash)
+}
+
+// hash_bytes_to_buffer_256 will hash the given input and write the
+// computed hash into the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_bytes_to_buffer_256 :: proc(data, hash: []byte) {
+    assert(len(hash) >= DIGEST_SIZE_256, "Size of destination buffer is smaller than the digest size")
+    ctx: _sha3.Sha3_Context
+    ctx.mdlen = DIGEST_SIZE_256
+    ctx.is_keccak = true
+    _sha3.init(&ctx)
+    _sha3.update(&ctx, data)
+    _sha3.final(&ctx, hash)
+}
+
 // hash_stream_256 will read the stream in chunks and compute a
 // hash_stream_256 will read the stream in chunks and compute a
 // hash from its contents
 // hash from its contents
-hash_stream_256 :: proc(s: io.Stream) -> ([32]byte, bool) {
-    hash: [32]byte
+hash_stream_256 :: proc(s: io.Stream) -> ([DIGEST_SIZE_256]byte, bool) {
+    hash: [DIGEST_SIZE_256]byte
     ctx: _sha3.Sha3_Context
     ctx: _sha3.Sha3_Context
-    ctx.mdlen = 32
+    ctx.mdlen = DIGEST_SIZE_256
     ctx.is_keccak = true
     ctx.is_keccak = true
     _sha3.init(&ctx)
     _sha3.init(&ctx)
     buf := make([]byte, 512)
     buf := make([]byte, 512)
@@ -123,7 +170,7 @@ hash_stream_256 :: proc(s: io.Stream) -> ([32]byte, bool) {
 
 
 // hash_file_256 will read the file provided by the given handle
 // hash_file_256 will read the file provided by the given handle
 // and compute a hash
 // and compute a hash
-hash_file_256 :: proc(hd: os.Handle, load_at_once := false) -> ([32]byte, bool) {
+hash_file_256 :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE_256]byte, bool) {
     if !load_at_once {
     if !load_at_once {
         return hash_stream_256(os.stream_from_handle(hd))
         return hash_stream_256(os.stream_from_handle(hd))
     } else {
     } else {
@@ -131,7 +178,7 @@ hash_file_256 :: proc(hd: os.Handle, load_at_once := false) -> ([32]byte, bool)
             return hash_bytes_256(buf[:]), ok
             return hash_bytes_256(buf[:]), ok
         }
         }
     }
     }
-    return [32]byte{}, false
+    return [DIGEST_SIZE_256]byte{}, false
 }
 }
 
 
 hash_256 :: proc {
 hash_256 :: proc {
@@ -139,20 +186,22 @@ hash_256 :: proc {
     hash_file_256,
     hash_file_256,
     hash_bytes_256,
     hash_bytes_256,
     hash_string_256,
     hash_string_256,
+    hash_bytes_to_buffer_256,
+    hash_string_to_buffer_256,
 }
 }
 
 
 // hash_string_384 will hash the given input and return the
 // hash_string_384 will hash the given input and return the
 // computed hash
 // computed hash
-hash_string_384 :: proc(data: string) -> [48]byte {
+hash_string_384 :: proc(data: string) -> [DIGEST_SIZE_384]byte {
     return hash_bytes_384(transmute([]byte)(data))
     return hash_bytes_384(transmute([]byte)(data))
 }
 }
 
 
 // hash_bytes_384 will hash the given input and return the
 // hash_bytes_384 will hash the given input and return the
 // computed hash
 // computed hash
-hash_bytes_384 :: proc(data: []byte) -> [48]byte {
-    hash: [48]byte
+hash_bytes_384 :: proc(data: []byte) -> [DIGEST_SIZE_384]byte {
+    hash: [DIGEST_SIZE_384]byte
     ctx: _sha3.Sha3_Context
     ctx: _sha3.Sha3_Context
-    ctx.mdlen = 48
+    ctx.mdlen = DIGEST_SIZE_384
     ctx.is_keccak = true
     ctx.is_keccak = true
     _sha3.init(&ctx)
     _sha3.init(&ctx)
     _sha3.update(&ctx, data)
     _sha3.update(&ctx, data)
@@ -160,12 +209,32 @@ hash_bytes_384 :: proc(data: []byte) -> [48]byte {
     return hash
     return hash
 }
 }
 
 
+// hash_string_to_buffer_384 will hash the given input and assign the
+// computed hash to the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_string_to_buffer_384 :: proc(data: string, hash: []byte) {
+    hash_bytes_to_buffer_384(transmute([]byte)(data), hash)
+}
+
+// hash_bytes_to_buffer_384 will hash the given input and write the
+// computed hash into the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_bytes_to_buffer_384 :: proc(data, hash: []byte) {
+    assert(len(hash) >= DIGEST_SIZE_384, "Size of destination buffer is smaller than the digest size")
+    ctx: _sha3.Sha3_Context
+    ctx.mdlen = DIGEST_SIZE_384
+    ctx.is_keccak = true
+    _sha3.init(&ctx)
+    _sha3.update(&ctx, data)
+    _sha3.final(&ctx, hash)
+}
+
 // hash_stream_384 will read the stream in chunks and compute a
 // hash_stream_384 will read the stream in chunks and compute a
 // hash from its contents
 // hash from its contents
-hash_stream_384 :: proc(s: io.Stream) -> ([48]byte, bool) {
-    hash: [48]byte
+hash_stream_384 :: proc(s: io.Stream) -> ([DIGEST_SIZE_384]byte, bool) {
+    hash: [DIGEST_SIZE_384]byte
     ctx: _sha3.Sha3_Context
     ctx: _sha3.Sha3_Context
-    ctx.mdlen = 48
+    ctx.mdlen = DIGEST_SIZE_384
     ctx.is_keccak = true
     ctx.is_keccak = true
     _sha3.init(&ctx)
     _sha3.init(&ctx)
     buf := make([]byte, 512)
     buf := make([]byte, 512)
@@ -183,7 +252,7 @@ hash_stream_384 :: proc(s: io.Stream) -> ([48]byte, bool) {
 
 
 // hash_file_384 will read the file provided by the given handle
 // hash_file_384 will read the file provided by the given handle
 // and compute a hash
 // and compute a hash
-hash_file_384 :: proc(hd: os.Handle, load_at_once := false) -> ([48]byte, bool) {
+hash_file_384 :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE_384]byte, bool) {
     if !load_at_once {
     if !load_at_once {
         return hash_stream_384(os.stream_from_handle(hd))
         return hash_stream_384(os.stream_from_handle(hd))
     } else {
     } else {
@@ -191,7 +260,7 @@ hash_file_384 :: proc(hd: os.Handle, load_at_once := false) -> ([48]byte, bool)
             return hash_bytes_384(buf[:]), ok
             return hash_bytes_384(buf[:]), ok
         }
         }
     }
     }
-    return [48]byte{}, false
+    return [DIGEST_SIZE_384]byte{}, false
 }
 }
 
 
 hash_384 :: proc {
 hash_384 :: proc {
@@ -199,20 +268,22 @@ hash_384 :: proc {
     hash_file_384,
     hash_file_384,
     hash_bytes_384,
     hash_bytes_384,
     hash_string_384,
     hash_string_384,
+    hash_bytes_to_buffer_384,
+    hash_string_to_buffer_384,
 }
 }
 
 
 // hash_string_512 will hash the given input and return the
 // hash_string_512 will hash the given input and return the
 // computed hash
 // computed hash
-hash_string_512 :: proc(data: string) -> [64]byte {
+hash_string_512 :: proc(data: string) -> [DIGEST_SIZE_512]byte {
     return hash_bytes_512(transmute([]byte)(data))
     return hash_bytes_512(transmute([]byte)(data))
 }
 }
 
 
 // hash_bytes_512 will hash the given input and return the
 // hash_bytes_512 will hash the given input and return the
 // computed hash
 // computed hash
-hash_bytes_512 :: proc(data: []byte) -> [64]byte {
-    hash: [64]byte
+hash_bytes_512 :: proc(data: []byte) -> [DIGEST_SIZE_512]byte {
+    hash: [DIGEST_SIZE_512]byte
     ctx: _sha3.Sha3_Context
     ctx: _sha3.Sha3_Context
-    ctx.mdlen = 64
+    ctx.mdlen = DIGEST_SIZE_512
     ctx.is_keccak = true
     ctx.is_keccak = true
     _sha3.init(&ctx)
     _sha3.init(&ctx)
     _sha3.update(&ctx, data)
     _sha3.update(&ctx, data)
@@ -220,12 +291,32 @@ hash_bytes_512 :: proc(data: []byte) -> [64]byte {
     return hash
     return hash
 }
 }
 
 
+// hash_string_to_buffer_512 will hash the given input and assign the
+// computed hash to the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_string_to_buffer_512 :: proc(data: string, hash: []byte) {
+    hash_bytes_to_buffer_512(transmute([]byte)(data), hash)
+}
+
+// hash_bytes_to_buffer_512 will hash the given input and write the
+// computed hash into the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_bytes_to_buffer_512 :: proc(data, hash: []byte) {
+    assert(len(hash) >= DIGEST_SIZE_512, "Size of destination buffer is smaller than the digest size")
+    ctx: _sha3.Sha3_Context
+    ctx.mdlen = DIGEST_SIZE_512
+    ctx.is_keccak = true
+    _sha3.init(&ctx)
+    _sha3.update(&ctx, data)
+    _sha3.final(&ctx, hash)
+}
+
 // hash_stream_512 will read the stream in chunks and compute a
 // hash_stream_512 will read the stream in chunks and compute a
 // hash from its contents
 // hash from its contents
-hash_stream_512 :: proc(s: io.Stream) -> ([64]byte, bool) {
-    hash: [64]byte
+hash_stream_512 :: proc(s: io.Stream) -> ([DIGEST_SIZE_512]byte, bool) {
+    hash: [DIGEST_SIZE_512]byte
     ctx: _sha3.Sha3_Context
     ctx: _sha3.Sha3_Context
-    ctx.mdlen = 64
+    ctx.mdlen = DIGEST_SIZE_512
     ctx.is_keccak = true
     ctx.is_keccak = true
     _sha3.init(&ctx)
     _sha3.init(&ctx)
     buf := make([]byte, 512)
     buf := make([]byte, 512)
@@ -243,7 +334,7 @@ hash_stream_512 :: proc(s: io.Stream) -> ([64]byte, bool) {
 
 
 // hash_file_512 will read the file provided by the given handle
 // hash_file_512 will read the file provided by the given handle
 // and compute a hash
 // and compute a hash
-hash_file_512 :: proc(hd: os.Handle, load_at_once := false) -> ([64]byte, bool) {
+hash_file_512 :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE_512]byte, bool) {
     if !load_at_once {
     if !load_at_once {
         return hash_stream_512(os.stream_from_handle(hd))
         return hash_stream_512(os.stream_from_handle(hd))
     } else {
     } else {
@@ -251,7 +342,7 @@ hash_file_512 :: proc(hd: os.Handle, load_at_once := false) -> ([64]byte, bool)
             return hash_bytes_512(buf[:]), ok
             return hash_bytes_512(buf[:]), ok
         }
         }
     }
     }
-    return [64]byte{}, false
+    return [DIGEST_SIZE_512]byte{}, false
 }
 }
 
 
 hash_512 :: proc {
 hash_512 :: proc {
@@ -259,13 +350,15 @@ hash_512 :: proc {
     hash_file_512,
     hash_file_512,
     hash_bytes_512,
     hash_bytes_512,
     hash_string_512,
     hash_string_512,
+    hash_bytes_to_buffer_512,
+    hash_string_to_buffer_512,
 }
 }
 
 
 /*
 /*
     Low level API
     Low level API
 */
 */
 
 
-Sha3_Context :: _sha3.Sha3_Context
+Keccak_Context :: _sha3.Sha3_Context
 
 
 init :: proc(ctx: ^_sha3.Sha3_Context) {
 init :: proc(ctx: ^_sha3.Sha3_Context) {
     ctx.is_keccak = true
     ctx.is_keccak = true

+ 43 - 21
core/crypto/md2/md2.odin

@@ -17,16 +17,18 @@ import "core:io"
     High level API
     High level API
 */
 */
 
 
+DIGEST_SIZE :: 16
+
 // hash_string will hash the given input and return the
 // hash_string will hash the given input and return the
 // computed hash
 // computed hash
-hash_string :: proc(data: string) -> [16]byte {
+hash_string :: proc(data: string) -> [DIGEST_SIZE]byte {
     return hash_bytes(transmute([]byte)(data))
     return hash_bytes(transmute([]byte)(data))
 }
 }
 
 
 // hash_bytes will hash the given input and return the
 // hash_bytes will hash the given input and return the
 // computed hash
 // computed hash
-hash_bytes :: proc(data: []byte) -> [16]byte {
-	hash: [16]byte
+hash_bytes :: proc(data: []byte) -> [DIGEST_SIZE]byte {
+	hash: [DIGEST_SIZE]byte
 	ctx: Md2_Context
 	ctx: Md2_Context
     // init(&ctx) No-op
     // init(&ctx) No-op
     update(&ctx, data)
     update(&ctx, data)
@@ -34,10 +36,28 @@ hash_bytes :: proc(data: []byte) -> [16]byte {
     return hash
     return hash
 }
 }
 
 
+// hash_string_to_buffer will hash the given input and assign the
+// computed hash to the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_string_to_buffer :: proc(data: string, hash: []byte) {
+	hash_bytes_to_buffer(transmute([]byte)(data), hash)
+}
+
+// hash_bytes_to_buffer will hash the given input and write the
+// computed hash into the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_bytes_to_buffer :: proc(data, hash: []byte) {
+	assert(len(hash) >= DIGEST_SIZE, "Size of destination buffer is smaller than the digest size")
+    ctx: Md2_Context
+    // init(&ctx) No-op
+    update(&ctx, data)
+    final(&ctx, hash)
+}
+
 // hash_stream will read the stream in chunks and compute a
 // hash_stream will read the stream in chunks and compute a
 // hash from its contents
 // hash from its contents
-hash_stream :: proc(s: io.Stream) -> ([16]byte, bool) {
-	hash: [16]byte
+hash_stream :: proc(s: io.Stream) -> ([DIGEST_SIZE]byte, bool) {
+	hash: [DIGEST_SIZE]byte
 	ctx: Md2_Context
 	ctx: Md2_Context
 	// init(&ctx) No-op
 	// init(&ctx) No-op
 	buf := make([]byte, 512)
 	buf := make([]byte, 512)
@@ -55,7 +75,7 @@ hash_stream :: proc(s: io.Stream) -> ([16]byte, bool) {
 
 
 // hash_file will read the file provided by the given handle
 // hash_file will read the file provided by the given handle
 // and compute a hash
 // and compute a hash
-hash_file :: proc(hd: os.Handle, load_at_once := false) -> ([16]byte, bool) {
+hash_file :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE]byte, bool) {
 	if !load_at_once {
 	if !load_at_once {
         return hash_stream(os.stream_from_handle(hd))
         return hash_stream(os.stream_from_handle(hd))
     } else {
     } else {
@@ -63,7 +83,7 @@ hash_file :: proc(hd: os.Handle, load_at_once := false) -> ([16]byte, bool) {
             return hash_bytes(buf[:]), ok
             return hash_bytes(buf[:]), ok
         }
         }
     }
     }
-    return [16]byte{}, false
+    return [DIGEST_SIZE]byte{}, false
 }
 }
 
 
 hash :: proc {
 hash :: proc {
@@ -71,6 +91,8 @@ hash :: proc {
     hash_file,
     hash_file,
     hash_bytes,
     hash_bytes,
     hash_string,
     hash_string,
+    hash_bytes_to_buffer,
+    hash_string_to_buffer,
 }
 }
 
 
 /*
 /*
@@ -86,7 +108,7 @@ update :: proc(ctx: ^Md2_Context, data: []byte) {
 	for i := 0; i < len(data); i += 1 {
 	for i := 0; i < len(data); i += 1 {
 		ctx.data[ctx.datalen] = data[i]
 		ctx.data[ctx.datalen] = data[i]
 		ctx.datalen += 1
 		ctx.datalen += 1
-		if (ctx.datalen == 16) {
+		if (ctx.datalen == DIGEST_SIZE) {
 			transform(ctx, ctx.data[:])
 			transform(ctx, ctx.data[:])
 			ctx.datalen = 0
 			ctx.datalen = 0
 		}
 		}
@@ -94,14 +116,14 @@ update :: proc(ctx: ^Md2_Context, data: []byte) {
 }
 }
 
 
 final :: proc(ctx: ^Md2_Context, hash: []byte) {
 final :: proc(ctx: ^Md2_Context, hash: []byte) {
-	to_pad := byte(16 - ctx.datalen)
-    for ctx.datalen < 16 {
+	to_pad := byte(DIGEST_SIZE - ctx.datalen)
+    for ctx.datalen < DIGEST_SIZE {
         ctx.data[ctx.datalen] = to_pad
         ctx.data[ctx.datalen] = to_pad
 		ctx.datalen += 1
 		ctx.datalen += 1
     }
     }
 	transform(ctx, ctx.data[:])
 	transform(ctx, ctx.data[:])
 	transform(ctx, ctx.checksum[:])
 	transform(ctx, ctx.checksum[:])
-    for i := 0; i < 16; i += 1 {
+    for i := 0; i < DIGEST_SIZE; i += 1 {
         hash[i] = ctx.state[i]
         hash[i] = ctx.state[i]
     }
     }
 }
 }
@@ -111,9 +133,9 @@ final :: proc(ctx: ^Md2_Context, hash: []byte) {
 */
 */
 
 
 Md2_Context :: struct {
 Md2_Context :: struct {
-    data:     [16]byte,
-    state:    [16 * 3]byte,
-    checksum: [16]byte,
+    data:     [DIGEST_SIZE]byte,
+    state:    [DIGEST_SIZE * 3]byte,
+    checksum: [DIGEST_SIZE]byte,
     datalen:  int,
     datalen:  int,
 }
 }
 
 
@@ -140,20 +162,20 @@ PI_TABLE := [?]byte {
 
 
 transform :: proc(ctx: ^Md2_Context, data: []byte) {
 transform :: proc(ctx: ^Md2_Context, data: []byte) {
     j,k,t: byte
     j,k,t: byte
-	for j = 0; j < 16; j += 1 {
-		ctx.state[j + 16] = data[j]
-		ctx.state[j + 16 * 2] = (ctx.state[j + 16] ~ ctx.state[j])
+	for j = 0; j < DIGEST_SIZE; j += 1 {
+		ctx.state[j + DIGEST_SIZE] = data[j]
+		ctx.state[j + DIGEST_SIZE * 2] = (ctx.state[j + DIGEST_SIZE] ~ ctx.state[j])
 	}
 	}
 	t = 0
 	t = 0
-	for j = 0; j < 16 + 2; j += 1 {
-		for k = 0; k < 16 * 3; k += 1 {
+	for j = 0; j < DIGEST_SIZE + 2; j += 1 {
+		for k = 0; k < DIGEST_SIZE * 3; k += 1 {
 			ctx.state[k] ~= PI_TABLE[t]
 			ctx.state[k] ~= PI_TABLE[t]
 			t = ctx.state[k]
 			t = ctx.state[k]
 		}
 		}
 		t = (t + j) & 0xff
 		t = (t + j) & 0xff
 	}
 	}
-	t = ctx.checksum[16 - 1]
-	for j = 0; j < 16; j += 1 {
+	t = ctx.checksum[DIGEST_SIZE - 1]
+	for j = 0; j < DIGEST_SIZE; j += 1 {
 		ctx.checksum[j] ~= PI_TABLE[data[j] ~ t]
 		ctx.checksum[j] ~= PI_TABLE[data[j] ~ t]
 		t = ctx.checksum[j]
 		t = ctx.checksum[j]
 	}
 	}

+ 31 - 9
core/crypto/md4/md4.odin

@@ -21,16 +21,18 @@ import "../util"
     High level API
     High level API
 */
 */
 
 
+DIGEST_SIZE :: 16
+
 // hash_string will hash the given input and return the
 // hash_string will hash the given input and return the
 // computed hash
 // computed hash
-hash_string :: proc(data: string) -> [16]byte {
+hash_string :: proc(data: string) -> [DIGEST_SIZE]byte {
     return hash_bytes(transmute([]byte)(data))
     return hash_bytes(transmute([]byte)(data))
 }
 }
 
 
 // hash_bytes will hash the given input and return the
 // hash_bytes will hash the given input and return the
 // computed hash
 // computed hash
-hash_bytes :: proc(data: []byte) -> [16]byte {
-    hash: [16]byte
+hash_bytes :: proc(data: []byte) -> [DIGEST_SIZE]byte {
+    hash: [DIGEST_SIZE]byte
     ctx: Md4_Context
     ctx: Md4_Context
     init(&ctx)
     init(&ctx)
     update(&ctx, data)
     update(&ctx, data)
@@ -38,10 +40,28 @@ hash_bytes :: proc(data: []byte) -> [16]byte {
     return hash
     return hash
 }
 }
 
 
+// hash_string_to_buffer will hash the given input and assign the
+// computed hash to the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_string_to_buffer :: proc(data: string, hash: []byte) {
+    hash_bytes_to_buffer(transmute([]byte)(data), hash)
+}
+
+// hash_bytes_to_buffer will hash the given input and write the
+// computed hash into the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_bytes_to_buffer :: proc(data, hash: []byte) {
+    assert(len(hash) >= DIGEST_SIZE, "Size of destination buffer is smaller than the digest size")
+    ctx: Md4_Context
+    init(&ctx)
+    update(&ctx, data)
+    final(&ctx, hash)
+}
+
 // hash_stream will read the stream in chunks and compute a
 // hash_stream will read the stream in chunks and compute a
 // hash from its contents
 // hash from its contents
-hash_stream :: proc(s: io.Stream) -> ([16]byte, bool) {
-    hash: [16]byte
+hash_stream :: proc(s: io.Stream) -> ([DIGEST_SIZE]byte, bool) {
+    hash: [DIGEST_SIZE]byte
     ctx: Md4_Context
     ctx: Md4_Context
     init(&ctx)
     init(&ctx)
     buf := make([]byte, 512)
     buf := make([]byte, 512)
@@ -59,7 +79,7 @@ hash_stream :: proc(s: io.Stream) -> ([16]byte, bool) {
 
 
 // hash_file will read the file provided by the given handle
 // hash_file will read the file provided by the given handle
 // and compute a hash
 // and compute a hash
-hash_file :: proc(hd: os.Handle, load_at_once := false) -> ([16]byte, bool) {
+hash_file :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE]byte, bool) {
     if !load_at_once {
     if !load_at_once {
         return hash_stream(os.stream_from_handle(hd))
         return hash_stream(os.stream_from_handle(hd))
     } else {
     } else {
@@ -67,7 +87,7 @@ hash_file :: proc(hd: os.Handle, load_at_once := false) -> ([16]byte, bool) {
             return hash_bytes(buf[:]), ok
             return hash_bytes(buf[:]), ok
         }
         }
     }
     }
-    return [16]byte{}, false
+    return [DIGEST_SIZE]byte{}, false
 }
 }
 
 
 hash :: proc {
 hash :: proc {
@@ -75,6 +95,8 @@ hash :: proc {
     hash_file,
     hash_file,
     hash_bytes,
     hash_bytes,
     hash_string,
     hash_string,
+    hash_bytes_to_buffer,
+    hash_string_to_buffer,
 }
 }
 
 
 /*
 /*
@@ -171,9 +193,9 @@ HH :: #force_inline proc "contextless"(a, b, c, d, x: u32, s : int) -> u32 {
 
 
 transform :: proc(ctx: ^Md4_Context, data: []byte) {
 transform :: proc(ctx: ^Md4_Context, data: []byte) {
     a, b, c, d, i, j: u32
     a, b, c, d, i, j: u32
-    m: [16]u32
+    m: [DIGEST_SIZE]u32
 
 
-    for i, j = 0, 0; i < 16; i += 1 {
+    for i, j = 0, 0; i < DIGEST_SIZE; i += 1 {
         m[i] = u32(data[j]) | (u32(data[j + 1]) << 8) | (u32(data[j + 2]) << 16) | (u32(data[j + 3]) << 24)
         m[i] = u32(data[j]) | (u32(data[j + 1]) << 8) | (u32(data[j + 2]) << 16) | (u32(data[j + 3]) << 24)
         j += 4
         j += 4
     }
     }

+ 31 - 9
core/crypto/md5/md5.odin

@@ -20,16 +20,18 @@ import "../util"
     High level API
     High level API
 */
 */
 
 
+DIGEST_SIZE :: 16
+
 // hash_string will hash the given input and return the
 // hash_string will hash the given input and return the
 // computed hash
 // computed hash
-hash_string :: proc(data: string) -> [16]byte {
+hash_string :: proc(data: string) -> [DIGEST_SIZE]byte {
     return hash_bytes(transmute([]byte)(data))
     return hash_bytes(transmute([]byte)(data))
 }
 }
 
 
 // hash_bytes will hash the given input and return the
 // hash_bytes will hash the given input and return the
 // computed hash
 // computed hash
-hash_bytes :: proc(data: []byte) -> [16]byte {
-    hash: [16]byte
+hash_bytes :: proc(data: []byte) -> [DIGEST_SIZE]byte {
+    hash: [DIGEST_SIZE]byte
     ctx: Md5_Context
     ctx: Md5_Context
     init(&ctx)
     init(&ctx)
     update(&ctx, data)
     update(&ctx, data)
@@ -37,10 +39,28 @@ hash_bytes :: proc(data: []byte) -> [16]byte {
     return hash
     return hash
 }
 }
 
 
+// hash_string_to_buffer will hash the given input and assign the
+// computed hash to the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_string_to_buffer :: proc(data: string, hash: []byte) {
+    hash_bytes_to_buffer(transmute([]byte)(data), hash)
+}
+
+// hash_bytes_to_buffer will hash the given input and write the
+// computed hash into the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_bytes_to_buffer :: proc(data, hash: []byte) {
+    assert(len(hash) >= DIGEST_SIZE, "Size of destination buffer is smaller than the digest size")
+    ctx: Md5_Context
+    init(&ctx)
+    update(&ctx, data)
+    final(&ctx, hash)
+}
+
 // hash_stream will read the stream in chunks and compute a
 // hash_stream will read the stream in chunks and compute a
 // hash from its contents
 // hash from its contents
-hash_stream :: proc(s: io.Stream) -> ([16]byte, bool) {
-    hash: [16]byte
+hash_stream :: proc(s: io.Stream) -> ([DIGEST_SIZE]byte, bool) {
+    hash: [DIGEST_SIZE]byte
     ctx: Md5_Context
     ctx: Md5_Context
     init(&ctx)
     init(&ctx)
     buf := make([]byte, 512)
     buf := make([]byte, 512)
@@ -58,7 +78,7 @@ hash_stream :: proc(s: io.Stream) -> ([16]byte, bool) {
 
 
 // hash_file will read the file provided by the given handle
 // hash_file will read the file provided by the given handle
 // and compute a hash
 // and compute a hash
-hash_file :: proc(hd: os.Handle, load_at_once := false) -> ([16]byte, bool) {
+hash_file :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE]byte, bool) {
     if !load_at_once {
     if !load_at_once {
         return hash_stream(os.stream_from_handle(hd))
         return hash_stream(os.stream_from_handle(hd))
     } else {
     } else {
@@ -66,7 +86,7 @@ hash_file :: proc(hd: os.Handle, load_at_once := false) -> ([16]byte, bool) {
             return hash_bytes(buf[:]), ok
             return hash_bytes(buf[:]), ok
         }
         }
     }
     }
-    return [16]byte{}, false
+    return [DIGEST_SIZE]byte{}, false
 }
 }
 
 
 hash :: proc {
 hash :: proc {
@@ -74,6 +94,8 @@ hash :: proc {
     hash_file,
     hash_file,
     hash_bytes,
     hash_bytes,
     hash_string,
     hash_string,
+    hash_bytes_to_buffer,
+    hash_string_to_buffer,
 }
 }
 
 
 /*
 /*
@@ -176,9 +198,9 @@ II :: #force_inline proc "contextless" (a, b, c, d, m: u32, s: int, t: u32) -> u
 
 
 transform :: proc(ctx: ^Md5_Context, data: []byte) {
 transform :: proc(ctx: ^Md5_Context, data: []byte) {
     i, j: u32
     i, j: u32
-    m: [16]u32
+    m: [DIGEST_SIZE]u32
 
 
-    for i, j = 0, 0; i < 16; i+=1 {
+    for i, j = 0, 0; i < DIGEST_SIZE; i+=1 {
         m[i] = u32(data[j]) + u32(data[j + 1]) << 8 + u32(data[j + 2]) << 16 + u32(data[j + 3]) << 24
         m[i] = u32(data[j]) + u32(data[j + 1]) << 8 + u32(data[j + 2]) << 16 + u32(data[j + 3]) << 24
         j += 4
         j += 4
     }
     }

+ 1 - 1
core/crypto/rand_generic.odin

@@ -1,6 +1,6 @@
 package crypto
 package crypto
 
 
-when ODIN_OS != "linux" {
+when ODIN_OS != .Linux && ODIN_OS != .OpenBSD && ODIN_OS != .Windows {
 	_rand_bytes :: proc (dst: []byte) {
 	_rand_bytes :: proc (dst: []byte) {
 		unimplemented("crypto: rand_bytes not supported on this OS")
 		unimplemented("crypto: rand_bytes not supported on this OS")
 	}
 	}

+ 12 - 0
core/crypto/rand_openbsd.odin

@@ -0,0 +1,12 @@
+package crypto
+
+import "core:c"
+
+foreign import libc "system:c"
+foreign libc {
+	arc4random_buf :: proc "c" (buf: rawptr, nbytes: c.size_t) ---
+}
+
+_rand_bytes :: proc (dst: []byte) {
+	arc4random_buf(raw_data(dst), len(dst))
+}

+ 23 - 0
core/crypto/rand_windows.odin

@@ -0,0 +1,23 @@
+package crypto
+
+import win32 "core:sys/windows"
+import "core:os"
+import "core:fmt"
+
+_rand_bytes :: proc(dst: []byte) {
+	ret := (os.Errno)(win32.BCryptGenRandom(nil, raw_data(dst), u32(len(dst)), win32.BCRYPT_USE_SYSTEM_PREFERRED_RNG))
+	if ret != os.ERROR_NONE {
+		switch ret {
+			case os.ERROR_INVALID_HANDLE:
+				// The handle to the first parameter is invalid.
+				// This should not happen here, since we explicitly pass nil to it
+				panic("crypto: BCryptGenRandom Invalid handle for hAlgorithm")
+			case os.ERROR_INVALID_PARAMETER:
+				// One of the parameters was invalid
+				panic("crypto: BCryptGenRandom Invalid parameter")
+			case:
+				// Unknown error
+				panic(fmt.tprintf("crypto: BCryptGenRandom failed: %d\n", ret))
+		}
+	}
+}

+ 113 - 28
core/crypto/ripemd/ripemd.odin

@@ -19,16 +19,21 @@ import "../util"
     High level API
     High level API
 */
 */
 
 
+DIGEST_SIZE_128 :: 16
+DIGEST_SIZE_160 :: 20
+DIGEST_SIZE_256 :: 32
+DIGEST_SIZE_320 :: 40
+
 // hash_string_128 will hash the given input and return the
 // hash_string_128 will hash the given input and return the
 // computed hash
 // computed hash
-hash_string_128 :: proc(data: string) -> [16]byte {
+hash_string_128 :: proc(data: string) -> [DIGEST_SIZE_128]byte {
     return hash_bytes_128(transmute([]byte)(data))
     return hash_bytes_128(transmute([]byte)(data))
 }
 }
 
 
 // hash_bytes_128 will hash the given input and return the
 // hash_bytes_128 will hash the given input and return the
 // computed hash
 // computed hash
-hash_bytes_128 :: proc(data: []byte) -> [16]byte {
-    hash: [16]byte
+hash_bytes_128 :: proc(data: []byte) -> [DIGEST_SIZE_128]byte {
+    hash: [DIGEST_SIZE_128]byte
     ctx: Ripemd128_Context
     ctx: Ripemd128_Context
     init(&ctx)
     init(&ctx)
     update(&ctx, data)
     update(&ctx, data)
@@ -36,10 +41,28 @@ hash_bytes_128 :: proc(data: []byte) -> [16]byte {
     return hash
     return hash
 }
 }
 
 
+// hash_string_to_buffer_128 will hash the given input and assign the
+// computed hash to the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_string_to_buffer_128 :: proc(data: string, hash: []byte) {
+    hash_bytes_to_buffer_128(transmute([]byte)(data), hash)
+}
+
+// hash_bytes_to_buffer_128 will hash the given input and write the
+// computed hash into the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_bytes_to_buffer_128 :: proc(data, hash: []byte) {
+    assert(len(hash) >= DIGEST_SIZE_128, "Size of destination buffer is smaller than the digest size")
+    ctx: Ripemd128_Context
+    init(&ctx)
+    update(&ctx, data)
+    final(&ctx, hash)
+}
+
 // hash_stream_128 will read the stream in chunks and compute a
 // hash_stream_128 will read the stream in chunks and compute a
 // hash from its contents
 // hash from its contents
-hash_stream_128 :: proc(s: io.Stream) -> ([16]byte, bool) {
-    hash: [16]byte
+hash_stream_128 :: proc(s: io.Stream) -> ([DIGEST_SIZE_128]byte, bool) {
+    hash: [DIGEST_SIZE_128]byte
     ctx: Ripemd128_Context
     ctx: Ripemd128_Context
     init(&ctx)
     init(&ctx)
     buf := make([]byte, 512)
     buf := make([]byte, 512)
@@ -57,7 +80,7 @@ hash_stream_128 :: proc(s: io.Stream) -> ([16]byte, bool) {
 
 
 // hash_file_128 will read the file provided by the given handle
 // hash_file_128 will read the file provided by the given handle
 // and compute a hash
 // and compute a hash
-hash_file_128 :: proc(hd: os.Handle, load_at_once := false) -> ([16]byte, bool) {
+hash_file_128 :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE_128]byte, bool) {
     if !load_at_once {
     if !load_at_once {
         return hash_stream_128(os.stream_from_handle(hd))
         return hash_stream_128(os.stream_from_handle(hd))
     } else {
     } else {
@@ -65,7 +88,7 @@ hash_file_128 :: proc(hd: os.Handle, load_at_once := false) -> ([16]byte, bool)
             return hash_bytes_128(buf[:]), ok
             return hash_bytes_128(buf[:]), ok
         }
         }
     }
     }
-    return [16]byte{}, false
+    return [DIGEST_SIZE_128]byte{}, false
 }
 }
 
 
 hash_128 :: proc {
 hash_128 :: proc {
@@ -73,18 +96,20 @@ hash_128 :: proc {
     hash_file_128,
     hash_file_128,
     hash_bytes_128,
     hash_bytes_128,
     hash_string_128,
     hash_string_128,
+    hash_bytes_to_buffer_128,
+    hash_string_to_buffer_128,
 }
 }
 
 
 // hash_string_160 will hash the given input and return the
 // hash_string_160 will hash the given input and return the
 // computed hash
 // computed hash
-hash_string_160 :: proc(data: string) -> [20]byte {
+hash_string_160 :: proc(data: string) -> [DIGEST_SIZE_160]byte {
     return hash_bytes_160(transmute([]byte)(data))
     return hash_bytes_160(transmute([]byte)(data))
 }
 }
 
 
 // hash_bytes_160 will hash the given input and return the
 // hash_bytes_160 will hash the given input and return the
 // computed hash
 // computed hash
-hash_bytes_160 :: proc(data: []byte) -> [20]byte {
-    hash: [20]byte
+hash_bytes_160 :: proc(data: []byte) -> [DIGEST_SIZE_160]byte {
+    hash: [DIGEST_SIZE_160]byte
     ctx: Ripemd160_Context
     ctx: Ripemd160_Context
     init(&ctx)
     init(&ctx)
     update(&ctx, data)
     update(&ctx, data)
@@ -92,10 +117,28 @@ hash_bytes_160 :: proc(data: []byte) -> [20]byte {
     return hash
     return hash
 }
 }
 
 
+// hash_string_to_buffer_160 will hash the given input and assign the
+// computed hash to the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_string_to_buffer_160 :: proc(data: string, hash: []byte) {
+    hash_bytes_to_buffer_160(transmute([]byte)(data), hash)
+}
+
+// hash_bytes_to_buffer_160 will hash the given input and write the
+// computed hash into the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_bytes_to_buffer_160 :: proc(data, hash: []byte) {
+    assert(len(hash) >= DIGEST_SIZE_160, "Size of destination buffer is smaller than the digest size")
+    ctx: Ripemd160_Context
+    init(&ctx)
+    update(&ctx, data)
+    final(&ctx, hash)
+}
+
 // hash_stream_160 will read the stream in chunks and compute a
 // hash_stream_160 will read the stream in chunks and compute a
 // hash from its contents
 // hash from its contents
-hash_stream_160 :: proc(s: io.Stream) -> ([20]byte, bool) {
-    hash: [20]byte
+hash_stream_160 :: proc(s: io.Stream) -> ([DIGEST_SIZE_160]byte, bool) {
+    hash: [DIGEST_SIZE_160]byte
     ctx: Ripemd160_Context
     ctx: Ripemd160_Context
     init(&ctx)
     init(&ctx)
     buf := make([]byte, 512)
     buf := make([]byte, 512)
@@ -113,7 +156,7 @@ hash_stream_160 :: proc(s: io.Stream) -> ([20]byte, bool) {
 
 
 // hash_file_160 will read the file provided by the given handle
 // hash_file_160 will read the file provided by the given handle
 // and compute a hash
 // and compute a hash
-hash_file_160 :: proc(hd: os.Handle, load_at_once := false) -> ([20]byte, bool) {
+hash_file_160 :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE_160]byte, bool) {
     if !load_at_once {
     if !load_at_once {
         return hash_stream_160(os.stream_from_handle(hd))
         return hash_stream_160(os.stream_from_handle(hd))
     } else {
     } else {
@@ -121,7 +164,7 @@ hash_file_160 :: proc(hd: os.Handle, load_at_once := false) -> ([20]byte, bool)
             return hash_bytes_160(buf[:]), ok
             return hash_bytes_160(buf[:]), ok
         }
         }
     }
     }
-    return [20]byte{}, false
+    return [DIGEST_SIZE_160]byte{}, false
 }
 }
 
 
 hash_160 :: proc {
 hash_160 :: proc {
@@ -129,18 +172,20 @@ hash_160 :: proc {
     hash_file_160,
     hash_file_160,
     hash_bytes_160,
     hash_bytes_160,
     hash_string_160,
     hash_string_160,
+    hash_bytes_to_buffer_160,
+    hash_string_to_buffer_160,
 }
 }
 
 
 // hash_string_256 will hash the given input and return the
 // hash_string_256 will hash the given input and return the
 // computed hash
 // computed hash
-hash_string_256 :: proc(data: string) -> [32]byte {
+hash_string_256 :: proc(data: string) -> [DIGEST_SIZE_256]byte {
     return hash_bytes_256(transmute([]byte)(data))
     return hash_bytes_256(transmute([]byte)(data))
 }
 }
 
 
 // hash_bytes_256 will hash the given input and return the
 // hash_bytes_256 will hash the given input and return the
 // computed hash
 // computed hash
-hash_bytes_256 :: proc(data: []byte) -> [32]byte {
-    hash: [32]byte
+hash_bytes_256 :: proc(data: []byte) -> [DIGEST_SIZE_256]byte {
+    hash: [DIGEST_SIZE_256]byte
     ctx: Ripemd256_Context
     ctx: Ripemd256_Context
     init(&ctx)
     init(&ctx)
     update(&ctx, data)
     update(&ctx, data)
@@ -148,10 +193,28 @@ hash_bytes_256 :: proc(data: []byte) -> [32]byte {
     return hash
     return hash
 }
 }
 
 
+// hash_string_to_buffer_256 will hash the given input and assign the
+// computed hash to the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_string_to_buffer_256 :: proc(data: string, hash: []byte) {
+    hash_bytes_to_buffer_256(transmute([]byte)(data), hash)
+}
+
+// hash_bytes_to_buffer_256 will hash the given input and write the
+// computed hash into the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_bytes_to_buffer_256 :: proc(data, hash: []byte) {
+    assert(len(hash) >= DIGEST_SIZE_256, "Size of destination buffer is smaller than the digest size")
+    ctx: Ripemd256_Context
+    init(&ctx)
+    update(&ctx, data)
+    final(&ctx, hash)
+}
+
 // hash_stream_256 will read the stream in chunks and compute a
 // hash_stream_256 will read the stream in chunks and compute a
 // hash from its contents
 // hash from its contents
-hash_stream_256 :: proc(s: io.Stream) -> ([32]byte, bool) {
-    hash: [32]byte
+hash_stream_256 :: proc(s: io.Stream) -> ([DIGEST_SIZE_256]byte, bool) {
+    hash: [DIGEST_SIZE_256]byte
     ctx: Ripemd256_Context
     ctx: Ripemd256_Context
     init(&ctx)
     init(&ctx)
     buf := make([]byte, 512)
     buf := make([]byte, 512)
@@ -169,7 +232,7 @@ hash_stream_256 :: proc(s: io.Stream) -> ([32]byte, bool) {
 
 
 // hash_file_256 will read the file provided by the given handle
 // hash_file_256 will read the file provided by the given handle
 // and compute a hash
 // and compute a hash
-hash_file_256 :: proc(hd: os.Handle, load_at_once := false) -> ([32]byte, bool) {
+hash_file_256 :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE_256]byte, bool) {
     if !load_at_once {
     if !load_at_once {
         return hash_stream_256(os.stream_from_handle(hd))
         return hash_stream_256(os.stream_from_handle(hd))
     } else {
     } else {
@@ -177,7 +240,7 @@ hash_file_256 :: proc(hd: os.Handle, load_at_once := false) -> ([32]byte, bool)
             return hash_bytes_256(buf[:]), ok
             return hash_bytes_256(buf[:]), ok
         }
         }
     }
     }
-    return [32]byte{}, false
+    return [DIGEST_SIZE_256]byte{}, false
 }
 }
 
 
 hash_256 :: proc {
 hash_256 :: proc {
@@ -185,18 +248,20 @@ hash_256 :: proc {
     hash_file_256,
     hash_file_256,
     hash_bytes_256,
     hash_bytes_256,
     hash_string_256,
     hash_string_256,
+    hash_bytes_to_buffer_256,
+    hash_string_to_buffer_256,
 }
 }
 
 
 // hash_string_320 will hash the given input and return the
 // hash_string_320 will hash the given input and return the
 // computed hash
 // computed hash
-hash_string_320 :: proc(data: string) -> [40]byte {
+hash_string_320 :: proc(data: string) -> [DIGEST_SIZE_320]byte {
     return hash_bytes_320(transmute([]byte)(data))
     return hash_bytes_320(transmute([]byte)(data))
 }
 }
 
 
 // hash_bytes_320 will hash the given input and return the
 // hash_bytes_320 will hash the given input and return the
 // computed hash
 // computed hash
-hash_bytes_320 :: proc(data: []byte) -> [40]byte {
-    hash: [40]byte
+hash_bytes_320 :: proc(data: []byte) -> [DIGEST_SIZE_320]byte {
+    hash: [DIGEST_SIZE_320]byte
     ctx: Ripemd320_Context
     ctx: Ripemd320_Context
     init(&ctx)
     init(&ctx)
     update(&ctx, data)
     update(&ctx, data)
@@ -204,10 +269,28 @@ hash_bytes_320 :: proc(data: []byte) -> [40]byte {
     return hash
     return hash
 }
 }
 
 
+// hash_string_to_buffer_320 will hash the given input and assign the
+// computed hash to the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_string_to_buffer_320 :: proc(data: string, hash: []byte) {
+    hash_bytes_to_buffer_320(transmute([]byte)(data), hash)
+}
+
+// hash_bytes_to_buffer_320 will hash the given input and write the
+// computed hash into the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_bytes_to_buffer_320 :: proc(data, hash: []byte) {
+    assert(len(hash) >= DIGEST_SIZE_320, "Size of destination buffer is smaller than the digest size")
+    ctx: Ripemd320_Context
+    init(&ctx)
+    update(&ctx, data)
+    final(&ctx, hash)
+}
+
 // hash_stream_320 will read the stream in chunks and compute a
 // hash_stream_320 will read the stream in chunks and compute a
 // hash from its contents
 // hash from its contents
-hash_stream_320 :: proc(s: io.Stream) -> ([40]byte, bool) {
-    hash: [40]byte
+hash_stream_320 :: proc(s: io.Stream) -> ([DIGEST_SIZE_320]byte, bool) {
+    hash: [DIGEST_SIZE_320]byte
     ctx: Ripemd320_Context
     ctx: Ripemd320_Context
     init(&ctx)
     init(&ctx)
     buf := make([]byte, 512)
     buf := make([]byte, 512)
@@ -225,7 +308,7 @@ hash_stream_320 :: proc(s: io.Stream) -> ([40]byte, bool) {
 
 
 // hash_file_320 will read the file provided by the given handle
 // hash_file_320 will read the file provided by the given handle
 // and compute a hash
 // and compute a hash
-hash_file_320 :: proc(hd: os.Handle, load_at_once := false) -> ([40]byte, bool) {
+hash_file_320 :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE_320]byte, bool) {
     if !load_at_once {
     if !load_at_once {
         return hash_stream_320(os.stream_from_handle(hd))
         return hash_stream_320(os.stream_from_handle(hd))
     } else {
     } else {
@@ -233,7 +316,7 @@ hash_file_320 :: proc(hd: os.Handle, load_at_once := false) -> ([40]byte, bool)
             return hash_bytes_320(buf[:]), ok
             return hash_bytes_320(buf[:]), ok
         }
         }
     }
     }
-    return [40]byte{}, false
+    return [DIGEST_SIZE_320]byte{}, false
 }
 }
 
 
 hash_320 :: proc {
 hash_320 :: proc {
@@ -241,6 +324,8 @@ hash_320 :: proc {
     hash_file_320,
     hash_file_320,
     hash_bytes_320,
     hash_bytes_320,
     hash_string_320,
     hash_string_320,
+    hash_bytes_to_buffer_320,
+    hash_string_to_buffer_320,
 }
 }
 
 
 /*
 /*

+ 30 - 7
core/crypto/sha1/sha1.odin

@@ -19,16 +19,19 @@ import "../util"
 /*
 /*
     High level API
     High level API
 */
 */
+
+DIGEST_SIZE :: 20
+
 // hash_string will hash the given input and return the
 // hash_string will hash the given input and return the
 // computed hash
 // computed hash
-hash_string :: proc(data: string) -> [20]byte {
+hash_string :: proc(data: string) -> [DIGEST_SIZE]byte {
     return hash_bytes(transmute([]byte)(data))
     return hash_bytes(transmute([]byte)(data))
 }
 }
 
 
 // hash_bytes will hash the given input and return the
 // hash_bytes will hash the given input and return the
 // computed hash
 // computed hash
-hash_bytes :: proc(data: []byte) -> [20]byte {
-    hash: [20]byte
+hash_bytes :: proc(data: []byte) -> [DIGEST_SIZE]byte {
+    hash: [DIGEST_SIZE]byte
     ctx: Sha1_Context
     ctx: Sha1_Context
     init(&ctx)
     init(&ctx)
     update(&ctx, data)
     update(&ctx, data)
@@ -36,10 +39,28 @@ hash_bytes :: proc(data: []byte) -> [20]byte {
     return hash
     return hash
 }
 }
 
 
+// hash_string_to_buffer will hash the given input and assign the
+// computed hash to the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_string_to_buffer :: proc(data: string, hash: []byte) {
+    hash_bytes_to_buffer(transmute([]byte)(data), hash)
+}
+
+// hash_bytes_to_buffer will hash the given input and write the
+// computed hash into the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_bytes_to_buffer :: proc(data, hash: []byte) {
+    assert(len(hash) >= DIGEST_SIZE, "Size of destination buffer is smaller than the digest size")
+    ctx: Sha1_Context
+    init(&ctx)
+    update(&ctx, data)
+    final(&ctx, hash)
+}
+
 // hash_stream will read the stream in chunks and compute a
 // hash_stream will read the stream in chunks and compute a
 // hash from its contents
 // hash from its contents
-hash_stream :: proc(s: io.Stream) -> ([20]byte, bool) {
-    hash: [20]byte
+hash_stream :: proc(s: io.Stream) -> ([DIGEST_SIZE]byte, bool) {
+    hash: [DIGEST_SIZE]byte
     ctx: Sha1_Context
     ctx: Sha1_Context
     init(&ctx)
     init(&ctx)
     buf := make([]byte, 512)
     buf := make([]byte, 512)
@@ -57,7 +78,7 @@ hash_stream :: proc(s: io.Stream) -> ([20]byte, bool) {
 
 
 // hash_file will read the file provided by the given handle
 // hash_file will read the file provided by the given handle
 // and compute a hash
 // and compute a hash
-hash_file :: proc(hd: os.Handle, load_at_once := false) -> ([20]byte, bool) {
+hash_file :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE]byte, bool) {
     if !load_at_once {
     if !load_at_once {
         return hash_stream(os.stream_from_handle(hd))
         return hash_stream(os.stream_from_handle(hd))
     } else {
     } else {
@@ -65,7 +86,7 @@ hash_file :: proc(hd: os.Handle, load_at_once := false) -> ([20]byte, bool) {
             return hash_bytes(buf[:]), ok
             return hash_bytes(buf[:]), ok
         }
         }
     }
     }
-    return [20]byte{}, false
+    return [DIGEST_SIZE]byte{}, false
 }
 }
 
 
 hash :: proc {
 hash :: proc {
@@ -73,6 +94,8 @@ hash :: proc {
     hash_file,
     hash_file,
     hash_bytes,
     hash_bytes,
     hash_string,
     hash_string,
+    hash_bytes_to_buffer,
+    hash_string_to_buffer,
 }
 }
 
 
 /*
 /*

+ 117 - 28
core/crypto/sha2/sha2.odin

@@ -21,16 +21,21 @@ import "../util"
     High level API
     High level API
 */
 */
 
 
+DIGEST_SIZE_224 :: 28
+DIGEST_SIZE_256 :: 32
+DIGEST_SIZE_384 :: 48
+DIGEST_SIZE_512 :: 64
+
 // hash_string_224 will hash the given input and return the
 // hash_string_224 will hash the given input and return the
 // computed hash
 // computed hash
-hash_string_224 :: proc(data: string) -> [28]byte {
+hash_string_224 :: proc(data: string) -> [DIGEST_SIZE_224]byte {
     return hash_bytes_224(transmute([]byte)(data))
     return hash_bytes_224(transmute([]byte)(data))
 }
 }
 
 
 // hash_bytes_224 will hash the given input and return the
 // hash_bytes_224 will hash the given input and return the
 // computed hash
 // computed hash
-hash_bytes_224 :: proc(data: []byte) -> [28]byte {
-    hash: [28]byte
+hash_bytes_224 :: proc(data: []byte) -> [DIGEST_SIZE_224]byte {
+    hash: [DIGEST_SIZE_224]byte
 	ctx: Sha256_Context
 	ctx: Sha256_Context
     ctx.is224 = true
     ctx.is224 = true
     init(&ctx)
     init(&ctx)
@@ -39,10 +44,29 @@ hash_bytes_224 :: proc(data: []byte) -> [28]byte {
     return hash
     return hash
 }
 }
 
 
+// hash_string_to_buffer_224 will hash the given input and assign the
+// computed hash to the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_string_to_buffer_224 :: proc(data: string, hash: []byte) {
+    hash_bytes_to_buffer_224(transmute([]byte)(data), hash)
+}
+
+// hash_bytes_to_buffer_224 will hash the given input and write the
+// computed hash into the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_bytes_to_buffer_224 :: proc(data, hash: []byte) {
+    assert(len(hash) >= DIGEST_SIZE_224, "Size of destination buffer is smaller than the digest size")
+    ctx: Sha256_Context
+    ctx.is224 = true
+    init(&ctx)
+    update(&ctx, data)
+    final(&ctx, hash)
+}
+
 // hash_stream_224 will read the stream in chunks and compute a
 // hash_stream_224 will read the stream in chunks and compute a
 // hash from its contents
 // hash from its contents
-hash_stream_224 :: proc(s: io.Stream) -> ([28]byte, bool) {
-	hash: [28]byte
+hash_stream_224 :: proc(s: io.Stream) -> ([DIGEST_SIZE_224]byte, bool) {
+	hash: [DIGEST_SIZE_224]byte
     ctx: Sha512_Context
     ctx: Sha512_Context
     ctx.is384 = false
     ctx.is384 = false
     init(&ctx)
     init(&ctx)
@@ -61,7 +85,7 @@ hash_stream_224 :: proc(s: io.Stream) -> ([28]byte, bool) {
 
 
 // hash_file_224 will read the file provided by the given handle
 // hash_file_224 will read the file provided by the given handle
 // and compute a hash
 // and compute a hash
-hash_file_224 :: proc(hd: os.Handle, load_at_once := false) -> ([28]byte, bool) {
+hash_file_224 :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE_224]byte, bool) {
     if !load_at_once {
     if !load_at_once {
         return hash_stream_224(os.stream_from_handle(hd))
         return hash_stream_224(os.stream_from_handle(hd))
     } else {
     } else {
@@ -69,7 +93,7 @@ hash_file_224 :: proc(hd: os.Handle, load_at_once := false) -> ([28]byte, bool)
             return hash_bytes_224(buf[:]), ok
             return hash_bytes_224(buf[:]), ok
         }
         }
     }
     }
-    return [28]byte{}, false
+    return [DIGEST_SIZE_224]byte{}, false
 }
 }
 
 
 hash_224 :: proc {
 hash_224 :: proc {
@@ -77,18 +101,20 @@ hash_224 :: proc {
     hash_file_224,
     hash_file_224,
     hash_bytes_224,
     hash_bytes_224,
     hash_string_224,
     hash_string_224,
+    hash_bytes_to_buffer_224,
+    hash_string_to_buffer_224,
 }
 }
 
 
 // hash_string_256 will hash the given input and return the
 // hash_string_256 will hash the given input and return the
 // computed hash
 // computed hash
-hash_string_256 :: proc(data: string) -> [32]byte {
+hash_string_256 :: proc(data: string) -> [DIGEST_SIZE_256]byte {
     return hash_bytes_256(transmute([]byte)(data))
     return hash_bytes_256(transmute([]byte)(data))
 }
 }
 
 
 // hash_bytes_256 will hash the given input and return the
 // hash_bytes_256 will hash the given input and return the
 // computed hash
 // computed hash
-hash_bytes_256 :: proc(data: []byte) -> [32]byte {
-    hash: [32]byte
+hash_bytes_256 :: proc(data: []byte) -> [DIGEST_SIZE_256]byte {
+    hash: [DIGEST_SIZE_256]byte
 	ctx: Sha256_Context
 	ctx: Sha256_Context
     ctx.is224 = false
     ctx.is224 = false
     init(&ctx)
     init(&ctx)
@@ -97,10 +123,29 @@ hash_bytes_256 :: proc(data: []byte) -> [32]byte {
     return hash
     return hash
 }
 }
 
 
+// hash_string_to_buffer_256 will hash the given input and assign the
+// computed hash to the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_string_to_buffer_256 :: proc(data: string, hash: []byte) {
+    hash_bytes_to_buffer_256(transmute([]byte)(data), hash)
+}
+
+// hash_bytes_to_buffer_256 will hash the given input and write the
+// computed hash into the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_bytes_to_buffer_256 :: proc(data, hash: []byte) {
+    assert(len(hash) >= DIGEST_SIZE_256, "Size of destination buffer is smaller than the digest size")
+    ctx: Sha256_Context
+    ctx.is224 = false
+    init(&ctx)
+    update(&ctx, data)
+    final(&ctx, hash)
+}
+
 // hash_stream_256 will read the stream in chunks and compute a
 // hash_stream_256 will read the stream in chunks and compute a
 // hash from its contents
 // hash from its contents
-hash_stream_256 :: proc(s: io.Stream) -> ([32]byte, bool) {
-	hash: [32]byte
+hash_stream_256 :: proc(s: io.Stream) -> ([DIGEST_SIZE_256]byte, bool) {
+	hash: [DIGEST_SIZE_256]byte
     ctx: Sha512_Context
     ctx: Sha512_Context
     ctx.is384 = false
     ctx.is384 = false
     init(&ctx)
     init(&ctx)
@@ -119,7 +164,7 @@ hash_stream_256 :: proc(s: io.Stream) -> ([32]byte, bool) {
 
 
 // hash_file_256 will read the file provided by the given handle
 // hash_file_256 will read the file provided by the given handle
 // and compute a hash
 // and compute a hash
-hash_file_256 :: proc(hd: os.Handle, load_at_once := false) -> ([32]byte, bool) {
+hash_file_256 :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE_256]byte, bool) {
     if !load_at_once {
     if !load_at_once {
         return hash_stream_256(os.stream_from_handle(hd))
         return hash_stream_256(os.stream_from_handle(hd))
     } else {
     } else {
@@ -127,7 +172,7 @@ hash_file_256 :: proc(hd: os.Handle, load_at_once := false) -> ([32]byte, bool)
             return hash_bytes_256(buf[:]), ok
             return hash_bytes_256(buf[:]), ok
         }
         }
     }
     }
-    return [32]byte{}, false
+    return [DIGEST_SIZE_256]byte{}, false
 }
 }
 
 
 hash_256 :: proc {
 hash_256 :: proc {
@@ -135,18 +180,20 @@ hash_256 :: proc {
     hash_file_256,
     hash_file_256,
     hash_bytes_256,
     hash_bytes_256,
     hash_string_256,
     hash_string_256,
+    hash_bytes_to_buffer_256,
+    hash_string_to_buffer_256,
 }
 }
 
 
 // hash_string_384 will hash the given input and return the
 // hash_string_384 will hash the given input and return the
 // computed hash
 // computed hash
-hash_string_384 :: proc(data: string) -> [48]byte {
+hash_string_384 :: proc(data: string) -> [DIGEST_SIZE_384]byte {
     return hash_bytes_384(transmute([]byte)(data))
     return hash_bytes_384(transmute([]byte)(data))
 }
 }
 
 
 // hash_bytes_384 will hash the given input and return the
 // hash_bytes_384 will hash the given input and return the
 // computed hash
 // computed hash
-hash_bytes_384 :: proc(data: []byte) -> [48]byte {
-    hash: [48]byte
+hash_bytes_384 :: proc(data: []byte) -> [DIGEST_SIZE_384]byte {
+    hash: [DIGEST_SIZE_384]byte
 	ctx: Sha512_Context
 	ctx: Sha512_Context
     ctx.is384 = true
     ctx.is384 = true
     init(&ctx)
     init(&ctx)
@@ -155,10 +202,29 @@ hash_bytes_384 :: proc(data: []byte) -> [48]byte {
     return hash
     return hash
 }
 }
 
 
+// hash_string_to_buffer_384 will hash the given input and assign the
+// computed hash to the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_string_to_buffer_384 :: proc(data: string, hash: []byte) {
+    hash_bytes_to_buffer_384(transmute([]byte)(data), hash)
+}
+
+// hash_bytes_to_buffer_384 will hash the given input and write the
+// computed hash into the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_bytes_to_buffer_384 :: proc(data, hash: []byte) {
+    assert(len(hash) >= DIGEST_SIZE_384, "Size of destination buffer is smaller than the digest size")
+    ctx: Sha512_Context
+    ctx.is384 = true
+    init(&ctx)
+    update(&ctx, data)
+    final(&ctx, hash)
+}
+
 // hash_stream_384 will read the stream in chunks and compute a
 // hash_stream_384 will read the stream in chunks and compute a
 // hash from its contents
 // hash from its contents
-hash_stream_384 :: proc(s: io.Stream) -> ([48]byte, bool) {
-	hash: [48]byte
+hash_stream_384 :: proc(s: io.Stream) -> ([DIGEST_SIZE_384]byte, bool) {
+	hash: [DIGEST_SIZE_384]byte
     ctx: Sha512_Context
     ctx: Sha512_Context
     ctx.is384 = true
     ctx.is384 = true
     init(&ctx)
     init(&ctx)
@@ -177,7 +243,7 @@ hash_stream_384 :: proc(s: io.Stream) -> ([48]byte, bool) {
 
 
 // hash_file_384 will read the file provided by the given handle
 // hash_file_384 will read the file provided by the given handle
 // and compute a hash
 // and compute a hash
-hash_file_384 :: proc(hd: os.Handle, load_at_once := false) -> ([48]byte, bool) {
+hash_file_384 :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE_384]byte, bool) {
     if !load_at_once {
     if !load_at_once {
         return hash_stream_384(os.stream_from_handle(hd))
         return hash_stream_384(os.stream_from_handle(hd))
     } else {
     } else {
@@ -185,7 +251,7 @@ hash_file_384 :: proc(hd: os.Handle, load_at_once := false) -> ([48]byte, bool)
             return hash_bytes_384(buf[:]), ok
             return hash_bytes_384(buf[:]), ok
         }
         }
     }
     }
-    return [48]byte{}, false
+    return [DIGEST_SIZE_384]byte{}, false
 }
 }
 
 
 hash_384 :: proc {
 hash_384 :: proc {
@@ -193,18 +259,20 @@ hash_384 :: proc {
     hash_file_384,
     hash_file_384,
     hash_bytes_384,
     hash_bytes_384,
     hash_string_384,
     hash_string_384,
+    hash_bytes_to_buffer_384,
+    hash_string_to_buffer_384,
 }
 }
 
 
 // hash_string_512 will hash the given input and return the
 // hash_string_512 will hash the given input and return the
 // computed hash
 // computed hash
-hash_string_512 :: proc(data: string) -> [64]byte {
+hash_string_512 :: proc(data: string) -> [DIGEST_SIZE_512]byte {
     return hash_bytes_512(transmute([]byte)(data))
     return hash_bytes_512(transmute([]byte)(data))
 }
 }
 
 
 // hash_bytes_512 will hash the given input and return the
 // hash_bytes_512 will hash the given input and return the
 // computed hash
 // computed hash
-hash_bytes_512 :: proc(data: []byte) -> [64]byte {
-    hash: [64]byte
+hash_bytes_512 :: proc(data: []byte) -> [DIGEST_SIZE_512]byte {
+    hash: [DIGEST_SIZE_512]byte
 	ctx: Sha512_Context
 	ctx: Sha512_Context
     ctx.is384 = false
     ctx.is384 = false
     init(&ctx)
     init(&ctx)
@@ -213,10 +281,29 @@ hash_bytes_512 :: proc(data: []byte) -> [64]byte {
     return hash
     return hash
 }
 }
 
 
+// hash_string_to_buffer_512 will hash the given input and assign the
+// computed hash to the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_string_to_buffer_512 :: proc(data: string, hash: []byte) {
+    hash_bytes_to_buffer_512(transmute([]byte)(data), hash)
+}
+
+// hash_bytes_to_buffer_512 will hash the given input and write the
+// computed hash into the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_bytes_to_buffer_512 :: proc(data, hash: []byte) {
+    assert(len(hash) >= DIGEST_SIZE_512, "Size of destination buffer is smaller than the digest size")
+    ctx: Sha512_Context
+    ctx.is384 = false
+    init(&ctx)
+    update(&ctx, data)
+    final(&ctx, hash)
+}
+
 // hash_stream_512 will read the stream in chunks and compute a
 // hash_stream_512 will read the stream in chunks and compute a
 // hash from its contents
 // hash from its contents
-hash_stream_512 :: proc(s: io.Stream) -> ([64]byte, bool) {
-    hash: [64]byte
+hash_stream_512 :: proc(s: io.Stream) -> ([DIGEST_SIZE_512]byte, bool) {
+    hash: [DIGEST_SIZE_512]byte
     ctx: Sha512_Context
     ctx: Sha512_Context
     ctx.is384 = false
     ctx.is384 = false
 	init(&ctx)
 	init(&ctx)
@@ -235,7 +322,7 @@ hash_stream_512 :: proc(s: io.Stream) -> ([64]byte, bool) {
 
 
 // hash_file_512 will read the file provided by the given handle
 // hash_file_512 will read the file provided by the given handle
 // and compute a hash
 // and compute a hash
-hash_file_512 :: proc(hd: os.Handle, load_at_once := false) -> ([64]byte, bool) {
+hash_file_512 :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE_512]byte, bool) {
     if !load_at_once {
     if !load_at_once {
         return hash_stream_512(os.stream_from_handle(hd))
         return hash_stream_512(os.stream_from_handle(hd))
     } else {
     } else {
@@ -243,7 +330,7 @@ hash_file_512 :: proc(hd: os.Handle, load_at_once := false) -> ([64]byte, bool)
             return hash_bytes_512(buf[:]), ok
             return hash_bytes_512(buf[:]), ok
         }
         }
     }
     }
-    return [64]byte{}, false
+    return [DIGEST_SIZE_512]byte{}, false
 }
 }
 
 
 hash_512 :: proc {
 hash_512 :: proc {
@@ -251,6 +338,8 @@ hash_512 :: proc {
     hash_file_512,
     hash_file_512,
     hash_bytes_512,
     hash_bytes_512,
     hash_string_512,
     hash_string_512,
+    hash_bytes_to_buffer_512,
+    hash_string_to_buffer_512,
 }
 }
 
 
 /*
 /*

+ 125 - 36
core/crypto/sha3/sha3.odin

@@ -20,30 +20,54 @@ import "../_sha3"
     High level API
     High level API
 */
 */
 
 
+DIGEST_SIZE_224 :: 28
+DIGEST_SIZE_256 :: 32
+DIGEST_SIZE_384 :: 48
+DIGEST_SIZE_512 :: 64
+
 // hash_string_224 will hash the given input and return the
 // hash_string_224 will hash the given input and return the
 // computed hash
 // computed hash
-hash_string_224 :: proc(data: string) -> [28]byte {
+hash_string_224 :: proc(data: string) -> [DIGEST_SIZE_224]byte {
     return hash_bytes_224(transmute([]byte)(data))
     return hash_bytes_224(transmute([]byte)(data))
 }
 }
 
 
 // hash_bytes_224 will hash the given input and return the
 // hash_bytes_224 will hash the given input and return the
 // computed hash
 // computed hash
-hash_bytes_224 :: proc(data: []byte) -> [28]byte {
-    hash: [28]byte
+hash_bytes_224 :: proc(data: []byte) -> [DIGEST_SIZE_224]byte {
+    hash: [DIGEST_SIZE_224]byte
     ctx: _sha3.Sha3_Context
     ctx: _sha3.Sha3_Context
-    ctx.mdlen = 28
+    ctx.mdlen = DIGEST_SIZE_224
     _sha3.init(&ctx)
     _sha3.init(&ctx)
     _sha3.update(&ctx, data)
     _sha3.update(&ctx, data)
     _sha3.final(&ctx, hash[:])
     _sha3.final(&ctx, hash[:])
     return hash
     return hash
 }
 }
 
 
+// hash_string_to_buffer_224 will hash the given input and assign the
+// computed hash to the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_string_to_buffer_224 :: proc(data: string, hash: []byte) {
+    hash_bytes_to_buffer_224(transmute([]byte)(data), hash)
+}
+
+// hash_bytes_to_buffer_224 will hash the given input and write the
+// computed hash into the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_bytes_to_buffer_224 :: proc(data, hash: []byte) {
+    assert(len(hash) >= DIGEST_SIZE_224, "Size of destination buffer is smaller than the digest size")
+    ctx: _sha3.Sha3_Context
+    ctx.mdlen = DIGEST_SIZE_224
+    _sha3.init(&ctx)
+    _sha3.update(&ctx, data)
+    _sha3.final(&ctx, hash)
+}
+
 // hash_stream_224 will read the stream in chunks and compute a
 // hash_stream_224 will read the stream in chunks and compute a
 // hash from its contents
 // hash from its contents
-hash_stream_224 :: proc(s: io.Stream) -> ([28]byte, bool) {
-    hash: [28]byte
+hash_stream_224 :: proc(s: io.Stream) -> ([DIGEST_SIZE_224]byte, bool) {
+    hash: [DIGEST_SIZE_224]byte
     ctx: _sha3.Sha3_Context
     ctx: _sha3.Sha3_Context
-    ctx.mdlen = 28
+    ctx.mdlen = DIGEST_SIZE_224
     _sha3.init(&ctx)
     _sha3.init(&ctx)
     buf := make([]byte, 512)
     buf := make([]byte, 512)
     defer delete(buf)
     defer delete(buf)
@@ -60,7 +84,7 @@ hash_stream_224 :: proc(s: io.Stream) -> ([28]byte, bool) {
 
 
 // hash_file_224 will read the file provided by the given handle
 // hash_file_224 will read the file provided by the given handle
 // and compute a hash
 // and compute a hash
-hash_file_224 :: proc(hd: os.Handle, load_at_once := false) -> ([28]byte, bool) {
+hash_file_224 :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE_224]byte, bool) {
     if !load_at_once {
     if !load_at_once {
         return hash_stream_224(os.stream_from_handle(hd))
         return hash_stream_224(os.stream_from_handle(hd))
     } else {
     } else {
@@ -68,7 +92,7 @@ hash_file_224 :: proc(hd: os.Handle, load_at_once := false) -> ([28]byte, bool)
             return hash_bytes_224(buf[:]), ok
             return hash_bytes_224(buf[:]), ok
         }
         }
     }
     }
-    return [28]byte{}, false
+    return [DIGEST_SIZE_224]byte{}, false
 }
 }
 
 
 hash_224 :: proc {
 hash_224 :: proc {
@@ -76,32 +100,53 @@ hash_224 :: proc {
     hash_file_224,
     hash_file_224,
     hash_bytes_224,
     hash_bytes_224,
     hash_string_224,
     hash_string_224,
+    hash_bytes_to_buffer_224,
+    hash_string_to_buffer_224,
 }
 }
 
 
 // hash_string_256 will hash the given input and return the
 // hash_string_256 will hash the given input and return the
 // computed hash
 // computed hash
-hash_string_256 :: proc(data: string) -> [32]byte {
+hash_string_256 :: proc(data: string) -> [DIGEST_SIZE_256]byte {
     return hash_bytes_256(transmute([]byte)(data))
     return hash_bytes_256(transmute([]byte)(data))
 }
 }
 
 
 // hash_bytes_256 will hash the given input and return the
 // hash_bytes_256 will hash the given input and return the
 // computed hash
 // computed hash
-hash_bytes_256 :: proc(data: []byte) -> [32]byte {
-    hash: [32]byte
+hash_bytes_256 :: proc(data: []byte) -> [DIGEST_SIZE_256]byte {
+    hash: [DIGEST_SIZE_256]byte
     ctx: _sha3.Sha3_Context
     ctx: _sha3.Sha3_Context
-    ctx.mdlen = 32
+    ctx.mdlen = DIGEST_SIZE_256
     _sha3.init(&ctx)
     _sha3.init(&ctx)
     _sha3.update(&ctx, data)
     _sha3.update(&ctx, data)
     _sha3.final(&ctx, hash[:])
     _sha3.final(&ctx, hash[:])
     return hash
     return hash
 }
 }
 
 
+// hash_string_to_buffer_256 will hash the given input and assign the
+// computed hash to the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_string_to_buffer_256 :: proc(data: string, hash: []byte) {
+    hash_bytes_to_buffer_256(transmute([]byte)(data), hash)
+}
+
+// hash_bytes_to_buffer_256 will hash the given input and write the
+// computed hash into the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_bytes_to_buffer_256 :: proc(data, hash: []byte) {
+    assert(len(hash) >= DIGEST_SIZE_256, "Size of destination buffer is smaller than the digest size")
+    ctx: _sha3.Sha3_Context
+    ctx.mdlen = DIGEST_SIZE_256
+    _sha3.init(&ctx)
+    _sha3.update(&ctx, data)
+    _sha3.final(&ctx, hash)
+}
+
 // hash_stream_256 will read the stream in chunks and compute a
 // hash_stream_256 will read the stream in chunks and compute a
 // hash from its contents
 // hash from its contents
-hash_stream_256 :: proc(s: io.Stream) -> ([32]byte, bool) {
-    hash: [32]byte
+hash_stream_256 :: proc(s: io.Stream) -> ([DIGEST_SIZE_256]byte, bool) {
+    hash: [DIGEST_SIZE_256]byte
     ctx: _sha3.Sha3_Context
     ctx: _sha3.Sha3_Context
-    ctx.mdlen = 32
+    ctx.mdlen = DIGEST_SIZE_256
     _sha3.init(&ctx)
     _sha3.init(&ctx)
     buf := make([]byte, 512)
     buf := make([]byte, 512)
     defer delete(buf)
     defer delete(buf)
@@ -118,7 +163,7 @@ hash_stream_256 :: proc(s: io.Stream) -> ([32]byte, bool) {
 
 
 // hash_file_256 will read the file provided by the given handle
 // hash_file_256 will read the file provided by the given handle
 // and compute a hash
 // and compute a hash
-hash_file_256 :: proc(hd: os.Handle, load_at_once := false) -> ([32]byte, bool) {
+hash_file_256 :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE_256]byte, bool) {
     if !load_at_once {
     if !load_at_once {
         return hash_stream_256(os.stream_from_handle(hd))
         return hash_stream_256(os.stream_from_handle(hd))
     } else {
     } else {
@@ -126,7 +171,7 @@ hash_file_256 :: proc(hd: os.Handle, load_at_once := false) -> ([32]byte, bool)
             return hash_bytes_256(buf[:]), ok
             return hash_bytes_256(buf[:]), ok
         }
         }
     }
     }
-    return [32]byte{}, false
+    return [DIGEST_SIZE_256]byte{}, false
 }
 }
 
 
 hash_256 :: proc {
 hash_256 :: proc {
@@ -134,32 +179,53 @@ hash_256 :: proc {
     hash_file_256,
     hash_file_256,
     hash_bytes_256,
     hash_bytes_256,
     hash_string_256,
     hash_string_256,
+    hash_bytes_to_buffer_256,
+    hash_string_to_buffer_256,
 }
 }
 
 
 // hash_string_384 will hash the given input and return the
 // hash_string_384 will hash the given input and return the
 // computed hash
 // computed hash
-hash_string_384 :: proc(data: string) -> [48]byte {
+hash_string_384 :: proc(data: string) -> [DIGEST_SIZE_384]byte {
     return hash_bytes_384(transmute([]byte)(data))
     return hash_bytes_384(transmute([]byte)(data))
 }
 }
 
 
 // hash_bytes_384 will hash the given input and return the
 // hash_bytes_384 will hash the given input and return the
 // computed hash
 // computed hash
-hash_bytes_384 :: proc(data: []byte) -> [48]byte {
-    hash: [48]byte
+hash_bytes_384 :: proc(data: []byte) -> [DIGEST_SIZE_384]byte {
+    hash: [DIGEST_SIZE_384]byte
     ctx: _sha3.Sha3_Context
     ctx: _sha3.Sha3_Context
-    ctx.mdlen = 48
+    ctx.mdlen = DIGEST_SIZE_384
     _sha3.init(&ctx)
     _sha3.init(&ctx)
     _sha3.update(&ctx, data)
     _sha3.update(&ctx, data)
     _sha3.final(&ctx, hash[:])
     _sha3.final(&ctx, hash[:])
     return hash
     return hash
 }
 }
 
 
+// hash_string_to_buffer_384 will hash the given input and assign the
+// computed hash to the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_string_to_buffer_384 :: proc(data: string, hash: []byte) {
+    hash_bytes_to_buffer_384(transmute([]byte)(data), hash)
+}
+
+// hash_bytes_to_buffer_384 will hash the given input and write the
+// computed hash into the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_bytes_to_buffer_384 :: proc(data, hash: []byte) {
+    assert(len(hash) >= DIGEST_SIZE_384, "Size of destination buffer is smaller than the digest size")
+    ctx: _sha3.Sha3_Context
+    ctx.mdlen = DIGEST_SIZE_384
+    _sha3.init(&ctx)
+    _sha3.update(&ctx, data)
+    _sha3.final(&ctx, hash)
+}
+
 // hash_stream_384 will read the stream in chunks and compute a
 // hash_stream_384 will read the stream in chunks and compute a
 // hash from its contents
 // hash from its contents
-hash_stream_384 :: proc(s: io.Stream) -> ([48]byte, bool) {
-    hash: [48]byte
+hash_stream_384 :: proc(s: io.Stream) -> ([DIGEST_SIZE_384]byte, bool) {
+    hash: [DIGEST_SIZE_384]byte
     ctx: _sha3.Sha3_Context
     ctx: _sha3.Sha3_Context
-    ctx.mdlen = 48
+    ctx.mdlen = DIGEST_SIZE_384
     _sha3.init(&ctx)
     _sha3.init(&ctx)
     buf := make([]byte, 512)
     buf := make([]byte, 512)
     defer delete(buf)
     defer delete(buf)
@@ -176,7 +242,7 @@ hash_stream_384 :: proc(s: io.Stream) -> ([48]byte, bool) {
 
 
 // hash_file_384 will read the file provided by the given handle
 // hash_file_384 will read the file provided by the given handle
 // and compute a hash
 // and compute a hash
-hash_file_384 :: proc(hd: os.Handle, load_at_once := false) -> ([48]byte, bool) {
+hash_file_384 :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE_384]byte, bool) {
     if !load_at_once {
     if !load_at_once {
         return hash_stream_384(os.stream_from_handle(hd))
         return hash_stream_384(os.stream_from_handle(hd))
     } else {
     } else {
@@ -184,7 +250,7 @@ hash_file_384 :: proc(hd: os.Handle, load_at_once := false) -> ([48]byte, bool)
             return hash_bytes_384(buf[:]), ok
             return hash_bytes_384(buf[:]), ok
         }
         }
     }
     }
-    return [48]byte{}, false
+    return [DIGEST_SIZE_384]byte{}, false
 }
 }
 
 
 hash_384 :: proc {
 hash_384 :: proc {
@@ -192,32 +258,53 @@ hash_384 :: proc {
     hash_file_384,
     hash_file_384,
     hash_bytes_384,
     hash_bytes_384,
     hash_string_384,
     hash_string_384,
+    hash_bytes_to_buffer_384,
+    hash_string_to_buffer_384,
 }
 }
 
 
 // hash_string_512 will hash the given input and return the
 // hash_string_512 will hash the given input and return the
 // computed hash
 // computed hash
-hash_string_512 :: proc(data: string) -> [64]byte {
+hash_string_512 :: proc(data: string) -> [DIGEST_SIZE_512]byte {
     return hash_bytes_512(transmute([]byte)(data))
     return hash_bytes_512(transmute([]byte)(data))
 }
 }
 
 
 // hash_bytes_512 will hash the given input and return the
 // hash_bytes_512 will hash the given input and return the
 // computed hash
 // computed hash
-hash_bytes_512 :: proc(data: []byte) -> [64]byte {
-    hash: [64]byte
+hash_bytes_512 :: proc(data: []byte) -> [DIGEST_SIZE_512]byte {
+    hash: [DIGEST_SIZE_512]byte
     ctx: _sha3.Sha3_Context
     ctx: _sha3.Sha3_Context
-    ctx.mdlen = 64
+    ctx.mdlen = DIGEST_SIZE_512
     _sha3.init(&ctx)
     _sha3.init(&ctx)
     _sha3.update(&ctx, data)
     _sha3.update(&ctx, data)
     _sha3.final(&ctx, hash[:])
     _sha3.final(&ctx, hash[:])
     return hash
     return hash
 }
 }
 
 
+// hash_string_to_buffer_512 will hash the given input and assign the
+// computed hash to the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_string_to_buffer_512 :: proc(data: string, hash: []byte) {
+    hash_bytes_to_buffer_512(transmute([]byte)(data), hash)
+}
+
+// hash_bytes_to_buffer_512 will hash the given input and write the
+// computed hash into the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_bytes_to_buffer_512 :: proc(data, hash: []byte) {
+    assert(len(hash) >= DIGEST_SIZE_512, "Size of destination buffer is smaller than the digest size")
+    ctx: _sha3.Sha3_Context
+    ctx.mdlen = DIGEST_SIZE_512
+    _sha3.init(&ctx)
+    _sha3.update(&ctx, data)
+    _sha3.final(&ctx, hash)
+}
+
 // hash_stream_512 will read the stream in chunks and compute a
 // hash_stream_512 will read the stream in chunks and compute a
 // hash from its contents
 // hash from its contents
-hash_stream_512 :: proc(s: io.Stream) -> ([64]byte, bool) {
-    hash: [64]byte
+hash_stream_512 :: proc(s: io.Stream) -> ([DIGEST_SIZE_512]byte, bool) {
+    hash: [DIGEST_SIZE_512]byte
     ctx: _sha3.Sha3_Context
     ctx: _sha3.Sha3_Context
-    ctx.mdlen = 64
+    ctx.mdlen = DIGEST_SIZE_512
     _sha3.init(&ctx)
     _sha3.init(&ctx)
     buf := make([]byte, 512)
     buf := make([]byte, 512)
     defer delete(buf)
     defer delete(buf)
@@ -234,7 +321,7 @@ hash_stream_512 :: proc(s: io.Stream) -> ([64]byte, bool) {
 
 
 // hash_file_512 will read the file provided by the given handle
 // hash_file_512 will read the file provided by the given handle
 // and compute a hash
 // and compute a hash
-hash_file_512 :: proc(hd: os.Handle, load_at_once := false) -> ([64]byte, bool) {
+hash_file_512 :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE_512]byte, bool) {
     if !load_at_once {
     if !load_at_once {
         return hash_stream_512(os.stream_from_handle(hd))
         return hash_stream_512(os.stream_from_handle(hd))
     } else {
     } else {
@@ -242,7 +329,7 @@ hash_file_512 :: proc(hd: os.Handle, load_at_once := false) -> ([64]byte, bool)
             return hash_bytes_512(buf[:]), ok
             return hash_bytes_512(buf[:]), ok
         }
         }
     }
     }
-    return [64]byte{}, false
+    return [DIGEST_SIZE_512]byte{}, false
 }
 }
 
 
 hash_512 :: proc {
 hash_512 :: proc {
@@ -250,6 +337,8 @@ hash_512 :: proc {
     hash_file_512,
     hash_file_512,
     hash_bytes_512,
     hash_bytes_512,
     hash_string_512,
     hash_string_512,
+    hash_bytes_to_buffer_512,
+    hash_string_to_buffer_512,
 }
 }
 
 
 /*
 /*

+ 66 - 19
core/crypto/shake/shake.odin

@@ -20,18 +20,21 @@ import "../_sha3"
     High level API
     High level API
 */
 */
 
 
+DIGEST_SIZE_128 :: 16
+DIGEST_SIZE_256 :: 32
+
 // hash_string_128 will hash the given input and return the
 // hash_string_128 will hash the given input and return the
 // computed hash
 // computed hash
-hash_string_128 :: proc(data: string) -> [16]byte {
+hash_string_128 :: proc(data: string) -> [DIGEST_SIZE_128]byte {
     return hash_bytes_128(transmute([]byte)(data))
     return hash_bytes_128(transmute([]byte)(data))
 }
 }
 
 
 // hash_bytes_128 will hash the given input and return the
 // hash_bytes_128 will hash the given input and return the
 // computed hash
 // computed hash
-hash_bytes_128 :: proc(data: []byte) -> [16]byte {
-    hash: [16]byte
+hash_bytes_128 :: proc(data: []byte) -> [DIGEST_SIZE_128]byte {
+    hash: [DIGEST_SIZE_128]byte
     ctx: _sha3.Sha3_Context
     ctx: _sha3.Sha3_Context
-    ctx.mdlen = 16
+    ctx.mdlen = DIGEST_SIZE_128
     _sha3.init(&ctx)
     _sha3.init(&ctx)
     _sha3.update(&ctx, data)
     _sha3.update(&ctx, data)
     _sha3.shake_xof(&ctx)
     _sha3.shake_xof(&ctx)
@@ -39,12 +42,32 @@ hash_bytes_128 :: proc(data: []byte) -> [16]byte {
     return hash
     return hash
 }
 }
 
 
+// hash_string_to_buffer_128 will hash the given input and assign the
+// computed hash to the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_string_to_buffer_128 :: proc(data: string, hash: []byte) {
+    hash_bytes_to_buffer_128(transmute([]byte)(data), hash)
+}
+
+// hash_bytes_to_buffer_128 will hash the given input and write the
+// computed hash into the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_bytes_to_buffer_128 :: proc(data, hash: []byte) {
+    assert(len(hash) >= DIGEST_SIZE_128, "Size of destination buffer is smaller than the digest size")
+    ctx: _sha3.Sha3_Context
+    ctx.mdlen = DIGEST_SIZE_128
+    _sha3.init(&ctx)
+    _sha3.update(&ctx, data)
+    _sha3.shake_xof(&ctx)
+    _sha3.shake_out(&ctx, hash)
+}
+
 // hash_stream_128 will read the stream in chunks and compute a
 // hash_stream_128 will read the stream in chunks and compute a
 // hash from its contents
 // hash from its contents
-hash_stream_128 :: proc(s: io.Stream) -> ([16]byte, bool) {
-    hash: [16]byte
+hash_stream_128 :: proc(s: io.Stream) -> ([DIGEST_SIZE_128]byte, bool) {
+    hash: [DIGEST_SIZE_128]byte
     ctx: _sha3.Sha3_Context
     ctx: _sha3.Sha3_Context
-    ctx.mdlen = 16
+    ctx.mdlen = DIGEST_SIZE_128
     _sha3.init(&ctx)
     _sha3.init(&ctx)
     buf := make([]byte, 512)
     buf := make([]byte, 512)
     defer delete(buf)
     defer delete(buf)
@@ -62,7 +85,7 @@ hash_stream_128 :: proc(s: io.Stream) -> ([16]byte, bool) {
 
 
 // hash_file_128 will read the file provided by the given handle
 // hash_file_128 will read the file provided by the given handle
 // and compute a hash
 // and compute a hash
-hash_file_128 :: proc(hd: os.Handle, load_at_once := false) -> ([16]byte, bool) {
+hash_file_128 :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE_128]byte, bool) {
     if !load_at_once {
     if !load_at_once {
         return hash_stream_128(os.stream_from_handle(hd))
         return hash_stream_128(os.stream_from_handle(hd))
     } else {
     } else {
@@ -70,7 +93,7 @@ hash_file_128 :: proc(hd: os.Handle, load_at_once := false) -> ([16]byte, bool)
             return hash_bytes_128(buf[:]), ok
             return hash_bytes_128(buf[:]), ok
         }
         }
     }
     }
-    return [16]byte{}, false
+    return [DIGEST_SIZE_128]byte{}, false
 }
 }
 
 
 hash_128 :: proc {
 hash_128 :: proc {
@@ -78,20 +101,22 @@ hash_128 :: proc {
     hash_file_128,
     hash_file_128,
     hash_bytes_128,
     hash_bytes_128,
     hash_string_128,
     hash_string_128,
+    hash_bytes_to_buffer_128,
+    hash_string_to_buffer_128,
 }
 }
 
 
 // hash_string_256 will hash the given input and return the
 // hash_string_256 will hash the given input and return the
 // computed hash
 // computed hash
-hash_string_256 :: proc(data: string) -> [32]byte {
+hash_string_256 :: proc(data: string) -> [DIGEST_SIZE_256]byte {
     return hash_bytes_256(transmute([]byte)(data))
     return hash_bytes_256(transmute([]byte)(data))
 }
 }
 
 
 // hash_bytes_256 will hash the given input and return the
 // hash_bytes_256 will hash the given input and return the
 // computed hash
 // computed hash
-hash_bytes_256 :: proc(data: []byte) -> [32]byte {
-    hash: [32]byte
+hash_bytes_256 :: proc(data: []byte) -> [DIGEST_SIZE_256]byte {
+    hash: [DIGEST_SIZE_256]byte
     ctx: _sha3.Sha3_Context
     ctx: _sha3.Sha3_Context
-    ctx.mdlen = 32
+    ctx.mdlen = DIGEST_SIZE_256
     _sha3.init(&ctx)
     _sha3.init(&ctx)
     _sha3.update(&ctx, data)
     _sha3.update(&ctx, data)
     _sha3.shake_xof(&ctx)
     _sha3.shake_xof(&ctx)
@@ -99,12 +124,32 @@ hash_bytes_256 :: proc(data: []byte) -> [32]byte {
     return hash
     return hash
 }
 }
 
 
+// hash_string_to_buffer_256 will hash the given input and assign the
+// computed hash to the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_string_to_buffer_256 :: proc(data: string, hash: []byte) {
+    hash_bytes_to_buffer_256(transmute([]byte)(data), hash)
+}
+
+// hash_bytes_to_buffer_256 will hash the given input and write the
+// computed hash into the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_bytes_to_buffer_256 :: proc(data, hash: []byte) {
+    assert(len(hash) >= DIGEST_SIZE_256, "Size of destination buffer is smaller than the digest size")
+    ctx: _sha3.Sha3_Context
+    ctx.mdlen = DIGEST_SIZE_256
+    _sha3.init(&ctx)
+    _sha3.update(&ctx, data)
+    _sha3.shake_xof(&ctx)
+    _sha3.shake_out(&ctx, hash)
+}
+
 // hash_stream_256 will read the stream in chunks and compute a
 // hash_stream_256 will read the stream in chunks and compute a
 // hash from its contents
 // hash from its contents
-hash_stream_256 :: proc(s: io.Stream) -> ([32]byte, bool) {
-    hash: [32]byte
+hash_stream_256 :: proc(s: io.Stream) -> ([DIGEST_SIZE_256]byte, bool) {
+    hash: [DIGEST_SIZE_256]byte
     ctx: _sha3.Sha3_Context
     ctx: _sha3.Sha3_Context
-    ctx.mdlen = 32
+    ctx.mdlen = DIGEST_SIZE_256
     _sha3.init(&ctx)
     _sha3.init(&ctx)
     buf := make([]byte, 512)
     buf := make([]byte, 512)
     defer delete(buf)
     defer delete(buf)
@@ -122,7 +167,7 @@ hash_stream_256 :: proc(s: io.Stream) -> ([32]byte, bool) {
 
 
 // hash_file_256 will read the file provided by the given handle
 // hash_file_256 will read the file provided by the given handle
 // and compute a hash
 // and compute a hash
-hash_file_256 :: proc(hd: os.Handle, load_at_once := false) -> ([32]byte, bool) {
+hash_file_256 :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE_256]byte, bool) {
     if !load_at_once {
     if !load_at_once {
         return hash_stream_256(os.stream_from_handle(hd))
         return hash_stream_256(os.stream_from_handle(hd))
     } else {
     } else {
@@ -130,7 +175,7 @@ hash_file_256 :: proc(hd: os.Handle, load_at_once := false) -> ([32]byte, bool)
             return hash_bytes_256(buf[:]), ok
             return hash_bytes_256(buf[:]), ok
         }
         }
     }
     }
-    return [32]byte{}, false
+    return [DIGEST_SIZE_256]byte{}, false
 }
 }
 
 
 hash_256 :: proc {
 hash_256 :: proc {
@@ -138,13 +183,15 @@ hash_256 :: proc {
     hash_file_256,
     hash_file_256,
     hash_bytes_256,
     hash_bytes_256,
     hash_string_256,
     hash_string_256,
+    hash_bytes_to_buffer_256,
+    hash_string_to_buffer_256,
 }
 }
 
 
 /*
 /*
     Low level API
     Low level API
 */
 */
 
 
-Sha3_Context :: _sha3.Sha3_Context
+Shake_Context :: _sha3.Sha3_Context
 
 
 init :: proc(ctx: ^_sha3.Sha3_Context) {
 init :: proc(ctx: ^_sha3.Sha3_Context) {
     _sha3.init(ctx)
     _sha3.init(ctx)

+ 335 - 0
core/crypto/siphash/siphash.odin

@@ -0,0 +1,335 @@
+package siphash
+
+/*
+    Copyright 2022 zhibog
+    Made available under the BSD-3 license.
+
+    List of contributors:
+        zhibog:  Initial implementation.
+
+    Implementation of the SipHash hashing algorithm, as defined at <https://github.com/veorq/SipHash> and <https://www.aumasson.jp/siphash/siphash.pdf>
+
+    Use the specific procedures for a certain setup. The generic procdedures will default to Siphash 2-4
+*/
+
+import "core:crypto"
+import "core:crypto/util"
+
+/*
+    High level API
+*/
+
+KEY_SIZE    :: 16
+DIGEST_SIZE :: 8
+
+// sum_string_1_3 will hash the given message with the key and return
+// the computed hash as a u64
+sum_string_1_3 :: proc(msg, key: string) -> u64 {
+    return sum_bytes_1_3(transmute([]byte)(msg), transmute([]byte)(key))
+}
+
+// sum_bytes_1_3 will hash the given message with the key and return
+// the computed hash as a u64
+sum_bytes_1_3 :: proc (msg, key: []byte) -> u64 {
+    ctx: Context
+    hash: u64
+    init(&ctx, key, 1, 3)
+    update(&ctx, msg)
+    final(&ctx, &hash)
+    return hash
+}
+
+// sum_string_to_buffer_1_3 will hash the given message with the key and write
+// the computed hash into the provided destination buffer
+sum_string_to_buffer_1_3 :: proc(msg, key: string, dst: []byte) {
+    sum_bytes_to_buffer_1_3(transmute([]byte)(msg), transmute([]byte)(key), dst)
+}
+
+// sum_bytes_to_buffer_1_3 will hash the given message with the key and write
+// the computed hash into the provided destination buffer
+sum_bytes_to_buffer_1_3 :: proc(msg, key, dst: []byte) {
+    assert(len(dst) >= DIGEST_SIZE, "crypto/siphash: Destination buffer needs to be at least of size 8")
+    hash  := sum_bytes_1_3(msg, key)
+    _collect_output(dst[:], hash)
+}
+
+sum_1_3 :: proc {
+    sum_string_1_3,
+    sum_bytes_1_3,
+    sum_string_to_buffer_1_3,
+    sum_bytes_to_buffer_1_3,
+}
+
+// verify_u64_1_3 will check if the supplied tag matches with the output you 
+// will get from the provided message and key
+verify_u64_1_3 :: proc (tag: u64 msg, key: []byte) -> bool {
+    return sum_bytes_1_3(msg, key) == tag
+}
+
+// verify_bytes will check if the supplied tag matches with the output you 
+// will get from the provided message and key
+verify_bytes_1_3 :: proc (tag, msg, key: []byte) -> bool {
+    derived_tag: [8]byte
+    sum_bytes_to_buffer_1_3(msg, key, derived_tag[:])
+    return crypto.compare_constant_time(derived_tag[:], tag) == 1
+}
+
+verify_1_3 :: proc {
+    verify_bytes_1_3,
+    verify_u64_1_3,
+}
+
+// sum_string_2_4 will hash the given message with the key and return
+// the computed hash as a u64
+sum_string_2_4 :: proc(msg, key: string) -> u64 {
+    return sum_bytes_2_4(transmute([]byte)(msg), transmute([]byte)(key))
+}
+
+// sum_bytes_2_4 will hash the given message with the key and return
+// the computed hash as a u64
+sum_bytes_2_4 :: proc (msg, key: []byte) -> u64 {
+    ctx: Context
+    hash: u64
+    init(&ctx, key, 2, 4)
+    update(&ctx, msg)
+    final(&ctx, &hash)
+    return hash
+}
+
+// sum_string_to_buffer_2_4 will hash the given message with the key and write
+// the computed hash into the provided destination buffer
+sum_string_to_buffer_2_4 :: proc(msg, key: string, dst: []byte) {
+    sum_bytes_to_buffer_2_4(transmute([]byte)(msg), transmute([]byte)(key), dst)
+}
+
+// sum_bytes_to_buffer_2_4 will hash the given message with the key and write
+// the computed hash into the provided destination buffer
+sum_bytes_to_buffer_2_4 :: proc(msg, key, dst: []byte) {
+    assert(len(dst) >= DIGEST_SIZE, "crypto/siphash: Destination buffer needs to be at least of size 8")
+    hash  := sum_bytes_2_4(msg, key)
+    _collect_output(dst[:], hash)
+}
+
+sum_2_4 :: proc {
+    sum_string_2_4,
+    sum_bytes_2_4,
+    sum_string_to_buffer_2_4,
+    sum_bytes_to_buffer_2_4,
+}
+
+sum_string           :: sum_string_2_4
+sum_bytes            :: sum_bytes_2_4
+sum_string_to_buffer :: sum_string_to_buffer_2_4
+sum_bytes_to_buffer  :: sum_bytes_to_buffer_2_4
+sum :: proc {
+    sum_string,
+    sum_bytes,
+    sum_string_to_buffer,
+    sum_bytes_to_buffer,
+}
+
+// verify_u64_2_4 will check if the supplied tag matches with the output you 
+// will get from the provided message and key
+verify_u64_2_4 :: proc (tag: u64 msg, key: []byte) -> bool {
+    return sum_bytes_2_4(msg, key) == tag
+}
+
+// verify_bytes will check if the supplied tag matches with the output you 
+// will get from the provided message and key
+verify_bytes_2_4 :: proc (tag, msg, key: []byte) -> bool {
+    derived_tag: [8]byte
+    sum_bytes_to_buffer_2_4(msg, key, derived_tag[:])
+    return crypto.compare_constant_time(derived_tag[:], tag) == 1
+}
+
+verify_2_4 :: proc {
+    verify_bytes_2_4,
+    verify_u64_2_4,
+}
+
+verify_bytes :: verify_bytes_2_4
+verify_u64   :: verify_u64_2_4
+verify :: proc {
+    verify_bytes,
+    verify_u64,
+}
+
+// sum_string_4_8 will hash the given message with the key and return
+// the computed hash as a u64
+sum_string_4_8 :: proc(msg, key: string) -> u64 {
+    return sum_bytes_4_8(transmute([]byte)(msg), transmute([]byte)(key))
+}
+
+// sum_bytes_4_8 will hash the given message with the key and return
+// the computed hash as a u64
+sum_bytes_4_8 :: proc (msg, key: []byte) -> u64 {
+    ctx: Context
+    hash: u64
+    init(&ctx, key, 4, 8)
+    update(&ctx, msg)
+    final(&ctx, &hash)
+    return hash
+}
+
+// sum_string_to_buffer_4_8 will hash the given message with the key and write
+// the computed hash into the provided destination buffer
+sum_string_to_buffer_4_8 :: proc(msg, key: string, dst: []byte) {
+    sum_bytes_to_buffer_4_8(transmute([]byte)(msg), transmute([]byte)(key), dst)
+}
+
+// sum_bytes_to_buffer_4_8 will hash the given message with the key and write
+// the computed hash into the provided destination buffer
+sum_bytes_to_buffer_4_8 :: proc(msg, key, dst: []byte) {
+    assert(len(dst) >= DIGEST_SIZE, "crypto/siphash: Destination buffer needs to be at least of size 8")
+    hash  := sum_bytes_4_8(msg, key)
+    _collect_output(dst[:], hash)
+}
+
+sum_4_8 :: proc {
+    sum_string_4_8,
+    sum_bytes_4_8,
+    sum_string_to_buffer_4_8,
+    sum_bytes_to_buffer_4_8,
+}
+
+// verify_u64_4_8 will check if the supplied tag matches with the output you 
+// will get from the provided message and key
+verify_u64_4_8 :: proc (tag: u64 msg, key: []byte) -> bool {
+    return sum_bytes_4_8(msg, key) == tag
+}
+
+// verify_bytes will check if the supplied tag matches with the output you 
+// will get from the provided message and key
+verify_bytes_4_8 :: proc (tag, msg, key: []byte) -> bool {
+    derived_tag: [8]byte
+    sum_bytes_to_buffer_4_8(msg, key, derived_tag[:])
+    return crypto.compare_constant_time(derived_tag[:], tag) == 1
+}
+
+verify_4_8 :: proc {
+    verify_bytes_4_8,
+    verify_u64_4_8,
+}
+
+/*
+    Low level API
+*/
+
+init :: proc(ctx: ^Context, key: []byte, c_rounds, d_rounds: int) {
+    assert(len(key) == KEY_SIZE, "crypto/siphash: Invalid key size, want 16")
+    ctx.c_rounds = c_rounds
+    ctx.d_rounds = d_rounds
+    is_valid_setting := (ctx.c_rounds == 1 && ctx.d_rounds == 3) ||
+                        (ctx.c_rounds == 2 && ctx.d_rounds == 4) ||
+                        (ctx.c_rounds == 4 && ctx.d_rounds == 8)
+    assert(is_valid_setting, "crypto/siphash: Incorrect rounds set up. Valid pairs are (1,3), (2,4) and (4,8)")
+    ctx.k0 = util.U64_LE(key[:8])
+    ctx.k1 = util.U64_LE(key[8:])
+    ctx.v0 = 0x736f6d6570736575 ~ ctx.k0
+    ctx.v1 = 0x646f72616e646f6d ~ ctx.k1
+    ctx.v2 = 0x6c7967656e657261 ~ ctx.k0
+    ctx.v3 = 0x7465646279746573 ~ ctx.k1
+    ctx.is_initialized = true
+}
+
+update :: proc(ctx: ^Context, data: []byte) {
+    assert(ctx.is_initialized, "crypto/siphash: Context is not initalized")
+    ctx.last_block = len(data) / 8 * 8
+    ctx.buf = data
+    i := 0
+    m: u64
+    for i < ctx.last_block {
+        m = u64(ctx.buf[i] & 0xff)
+        i += 1
+
+        for r in u64(1)..<8 {
+            m |= u64(ctx.buf[i] & 0xff) << (r * 8)
+            i += 1
+        }
+
+        ctx.v3 ~= m
+        for _ in 0..<ctx.c_rounds {
+            _compress(ctx)
+        }
+
+        ctx.v0 ~= m
+    }
+}
+
+final :: proc(ctx: ^Context, dst: ^u64) {
+    m: u64
+    for i := len(ctx.buf) - 1; i >= ctx.last_block; i -= 1 {
+        m <<= 8
+        m |= u64(ctx.buf[i] & 0xff)
+    }
+    m |= u64(len(ctx.buf) << 56)
+
+    ctx.v3 ~= m
+
+    for _ in 0..<ctx.c_rounds {
+        _compress(ctx)
+    }
+
+    ctx.v0 ~= m
+    ctx.v2 ~= 0xff
+
+    for _ in 0..<ctx.d_rounds {
+        _compress(ctx)
+    }
+
+    dst^ = ctx.v0 ~ ctx.v1 ~ ctx.v2 ~ ctx.v3
+
+    reset(ctx)
+}
+
+reset :: proc(ctx: ^Context) {
+    ctx.k0, ctx.k1 = 0, 0
+    ctx.v0, ctx.v1 = 0, 0
+    ctx.v2, ctx.v3 = 0, 0
+    ctx.last_block = 0
+    ctx.c_rounds = 0
+    ctx.d_rounds = 0
+    ctx.is_initialized = false
+}
+
+Context :: struct {
+    v0, v1, v2, v3: u64,    // State values
+    k0, k1:         u64,    // Split key
+    c_rounds:       int,    // Number of message rounds
+    d_rounds:       int,    // Number of finalization rounds
+    buf:            []byte, // Provided data
+    last_block:     int,    // Offset from the last block
+    is_initialized: bool,
+}
+
+_get_byte :: #force_inline proc "contextless" (byte_num: byte, into: u64) -> byte {
+    return byte(into >> (((~byte_num) & (size_of(u64) - 1)) << 3))
+}
+
+_collect_output :: #force_inline proc "contextless" (dst: []byte, hash: u64) {
+    dst[0] = _get_byte(7, hash)
+    dst[1] = _get_byte(6, hash)
+    dst[2] = _get_byte(5, hash)
+    dst[3] = _get_byte(4, hash)
+    dst[4] = _get_byte(3, hash)
+    dst[5] = _get_byte(2, hash)
+    dst[6] = _get_byte(1, hash)
+    dst[7] = _get_byte(0, hash)
+}
+
+_compress :: #force_inline proc "contextless" (ctx: ^Context) {
+    ctx.v0 += ctx.v1
+    ctx.v1  = util.ROTL64(ctx.v1, 13)
+    ctx.v1 ~= ctx.v0
+    ctx.v0  = util.ROTL64(ctx.v0, 32)
+    ctx.v2 += ctx.v3
+    ctx.v3  = util.ROTL64(ctx.v3, 16)
+    ctx.v3 ~= ctx.v2
+    ctx.v0 += ctx.v3
+    ctx.v3  = util.ROTL64(ctx.v3, 21)
+    ctx.v3 ~= ctx.v0
+    ctx.v2 += ctx.v1
+    ctx.v1  = util.ROTL64(ctx.v1, 17)
+    ctx.v1 ~= ctx.v2
+    ctx.v2  = util.ROTL64(ctx.v2, 32)
+}

+ 33 - 10
core/crypto/sm3/sm3.odin

@@ -15,16 +15,22 @@ import "core:io"
 
 
 import "../util"
 import "../util"
 
 
+/*
+    High level API
+*/
+
+DIGEST_SIZE :: 32
+
 // hash_string will hash the given input and return the
 // hash_string will hash the given input and return the
 // computed hash
 // computed hash
-hash_string :: proc(data: string) -> [32]byte {
+hash_string :: proc(data: string) -> [DIGEST_SIZE]byte {
     return hash_bytes(transmute([]byte)(data))
     return hash_bytes(transmute([]byte)(data))
 }
 }
 
 
 // hash_bytes will hash the given input and return the
 // hash_bytes will hash the given input and return the
 // computed hash
 // computed hash
-hash_bytes :: proc(data: []byte) -> [32]byte {
-    hash: [32]byte
+hash_bytes :: proc(data: []byte) -> [DIGEST_SIZE]byte {
+    hash: [DIGEST_SIZE]byte
     ctx: Sm3_Context
     ctx: Sm3_Context
     init(&ctx)
     init(&ctx)
     update(&ctx, data)
     update(&ctx, data)
@@ -32,10 +38,28 @@ hash_bytes :: proc(data: []byte) -> [32]byte {
     return hash
     return hash
 }
 }
 
 
+// hash_string_to_buffer will hash the given input and assign the
+// computed hash to the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_string_to_buffer :: proc(data: string, hash: []byte) {
+    hash_bytes_to_buffer(transmute([]byte)(data), hash)
+}
+
+// hash_bytes_to_buffer will hash the given input and write the
+// computed hash into the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_bytes_to_buffer :: proc(data, hash: []byte) {
+    assert(len(hash) >= DIGEST_SIZE, "Size of destination buffer is smaller than the digest size")
+    ctx: Sm3_Context
+    init(&ctx)
+    update(&ctx, data)
+    final(&ctx, hash)
+}
+
 // hash_stream will read the stream in chunks and compute a
 // hash_stream will read the stream in chunks and compute a
 // hash from its contents
 // hash from its contents
-hash_stream :: proc(s: io.Stream) -> ([32]byte, bool) {
-    hash: [32]byte
+hash_stream :: proc(s: io.Stream) -> ([DIGEST_SIZE]byte, bool) {
+    hash: [DIGEST_SIZE]byte
     ctx: Sm3_Context
     ctx: Sm3_Context
     init(&ctx)
     init(&ctx)
     buf := make([]byte, 512)
     buf := make([]byte, 512)
@@ -53,7 +77,7 @@ hash_stream :: proc(s: io.Stream) -> ([32]byte, bool) {
 
 
 // hash_file will read the file provided by the given handle
 // hash_file will read the file provided by the given handle
 // and compute a hash
 // and compute a hash
-hash_file :: proc(hd: os.Handle, load_at_once := false) -> ([32]byte, bool) {
+hash_file :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE]byte, bool) {
     if !load_at_once {
     if !load_at_once {
         return hash_stream(os.stream_from_handle(hd))
         return hash_stream(os.stream_from_handle(hd))
     } else {
     } else {
@@ -61,7 +85,7 @@ hash_file :: proc(hd: os.Handle, load_at_once := false) -> ([32]byte, bool) {
             return hash_bytes(buf[:]), ok
             return hash_bytes(buf[:]), ok
         }
         }
     }
     }
-    return [32]byte{}, false
+    return [DIGEST_SIZE]byte{}, false
 }
 }
 
 
 hash :: proc {
 hash :: proc {
@@ -69,6 +93,8 @@ hash :: proc {
     hash_file,
     hash_file,
     hash_bytes,
     hash_bytes,
     hash_string,
     hash_string,
+    hash_bytes_to_buffer,
+    hash_string_to_buffer,
 }
 }
 
 
 /*
 /*
@@ -146,9 +172,6 @@ Sm3_Context :: struct {
     length:    u64,
     length:    u64,
 }
 }
 
 
-BLOCK_SIZE_IN_BYTES :: 64
-BLOCK_SIZE_IN_32    :: 16
-
 IV := [8]u32 {
 IV := [8]u32 {
     0x7380166f, 0x4914b2b9, 0x172442d7, 0xda8a0600,
     0x7380166f, 0x4914b2b9, 0x172442d7, 0xda8a0600,
     0xa96f30bc, 0x163138aa, 0xe38dee4d, 0xb0fb0e4e,
     0xa96f30bc, 0x163138aa, 0xe38dee4d, 0xb0fb0e4e,

+ 58 - 14
core/crypto/streebog/streebog.odin

@@ -19,16 +19,19 @@ import "../util"
     High level API
     High level API
 */
 */
 
 
+DIGEST_SIZE_256 :: 32
+DIGEST_SIZE_512 :: 64
+
 // hash_string_256 will hash the given input and return the
 // hash_string_256 will hash the given input and return the
 // computed hash
 // computed hash
-hash_string_256 :: proc(data: string) -> [32]byte {
+hash_string_256 :: proc(data: string) -> [DIGEST_SIZE_256]byte {
     return hash_bytes_256(transmute([]byte)(data))
     return hash_bytes_256(transmute([]byte)(data))
 }
 }
 
 
 // hash_bytes_256 will hash the given input and return the
 // hash_bytes_256 will hash the given input and return the
 // computed hash
 // computed hash
-hash_bytes_256 :: proc(data: []byte) -> [32]byte {
-    hash: [32]byte
+hash_bytes_256 :: proc(data: []byte) -> [DIGEST_SIZE_256]byte {
+    hash: [DIGEST_SIZE_256]byte
     ctx: Streebog_Context
     ctx: Streebog_Context
     ctx.is256 = true
     ctx.is256 = true
     init(&ctx)
     init(&ctx)
@@ -37,10 +40,29 @@ hash_bytes_256 :: proc(data: []byte) -> [32]byte {
     return hash
     return hash
 }
 }
 
 
+// hash_string_to_buffer_256 will hash the given input and assign the
+// computed hash to the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_string_to_buffer_256 :: proc(data: string, hash: []byte) {
+    hash_bytes_to_buffer_256(transmute([]byte)(data), hash)
+}
+
+// hash_bytes_to_buffer_256 will hash the given input and write the
+// computed hash into the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_bytes_to_buffer_256 :: proc(data, hash: []byte) {
+    assert(len(hash) >= DIGEST_SIZE_256, "Size of destination buffer is smaller than the digest size")
+    ctx: Streebog_Context
+    ctx.is256 = true
+    init(&ctx)
+    update(&ctx, data)
+    final(&ctx, hash[:])
+}
+
 // hash_stream_256 will read the stream in chunks and compute a
 // hash_stream_256 will read the stream in chunks and compute a
 // hash from its contents
 // hash from its contents
-hash_stream_256 :: proc(s: io.Stream) -> ([32]byte, bool) {
-    hash: [32]byte
+hash_stream_256 :: proc(s: io.Stream) -> ([DIGEST_SIZE_256]byte, bool) {
+    hash: [DIGEST_SIZE_256]byte
     ctx: Streebog_Context
     ctx: Streebog_Context
     ctx.is256 = true
     ctx.is256 = true
     init(&ctx)
     init(&ctx)
@@ -59,7 +81,7 @@ hash_stream_256 :: proc(s: io.Stream) -> ([32]byte, bool) {
 
 
 // hash_file_256 will read the file provided by the given handle
 // hash_file_256 will read the file provided by the given handle
 // and compute a hash
 // and compute a hash
-hash_file_256 :: proc(hd: os.Handle, load_at_once := false) -> ([32]byte, bool) {
+hash_file_256 :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE_256]byte, bool) {
     if !load_at_once {
     if !load_at_once {
         return hash_stream_256(os.stream_from_handle(hd))
         return hash_stream_256(os.stream_from_handle(hd))
     } else {
     } else {
@@ -67,7 +89,7 @@ hash_file_256 :: proc(hd: os.Handle, load_at_once := false) -> ([32]byte, bool)
             return hash_bytes_256(buf[:]), ok
             return hash_bytes_256(buf[:]), ok
         }
         }
     }
     }
-    return [32]byte{}, false
+    return [DIGEST_SIZE_256]byte{}, false
 }
 }
 
 
 hash_256 :: proc {
 hash_256 :: proc {
@@ -75,18 +97,20 @@ hash_256 :: proc {
     hash_file_256,
     hash_file_256,
     hash_bytes_256,
     hash_bytes_256,
     hash_string_256,
     hash_string_256,
+	hash_bytes_to_buffer_256,
+    hash_string_to_buffer_256,
 }
 }
 
 
 // hash_string_512 will hash the given input and return the
 // hash_string_512 will hash the given input and return the
 // computed hash
 // computed hash
-hash_string_512 :: proc(data: string) -> [64]byte {
+hash_string_512 :: proc(data: string) -> [DIGEST_SIZE_512]byte {
     return hash_bytes_512(transmute([]byte)(data))
     return hash_bytes_512(transmute([]byte)(data))
 }
 }
 
 
 // hash_bytes_512 will hash the given input and return the
 // hash_bytes_512 will hash the given input and return the
 // computed hash
 // computed hash
-hash_bytes_512 :: proc(data: []byte) -> [64]byte {
-    hash: [64]byte
+hash_bytes_512 :: proc(data: []byte) -> [DIGEST_SIZE_512]byte {
+    hash: [DIGEST_SIZE_512]byte
     ctx: Streebog_Context
     ctx: Streebog_Context
     init(&ctx)
     init(&ctx)
     update(&ctx, data)
     update(&ctx, data)
@@ -94,10 +118,28 @@ hash_bytes_512 :: proc(data: []byte) -> [64]byte {
     return hash
     return hash
 }
 }
 
 
+// hash_string_to_buffer_512 will hash the given input and assign the
+// computed hash to the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_string_to_buffer_512 :: proc(data: string, hash: []byte) {
+    hash_bytes_to_buffer_512(transmute([]byte)(data), hash)
+}
+
+// hash_bytes_to_buffer_512 will hash the given input and write the
+// computed hash into the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_bytes_to_buffer_512 :: proc(data, hash: []byte) {
+    assert(len(hash) >= DIGEST_SIZE_512, "Size of destination buffer is smaller than the digest size")
+    ctx: Streebog_Context
+    init(&ctx)
+    update(&ctx, data)
+    final(&ctx, hash[:])
+}
+
 // hash_stream_512 will read the stream in chunks and compute a
 // hash_stream_512 will read the stream in chunks and compute a
 // hash from its contents
 // hash from its contents
-hash_stream_512 :: proc(s: io.Stream) -> ([64]byte, bool) {
-    hash: [64]byte
+hash_stream_512 :: proc(s: io.Stream) -> ([DIGEST_SIZE_512]byte, bool) {
+    hash: [DIGEST_SIZE_512]byte
     ctx: Streebog_Context
     ctx: Streebog_Context
     init(&ctx)
     init(&ctx)
     buf := make([]byte, 512)
     buf := make([]byte, 512)
@@ -115,7 +157,7 @@ hash_stream_512 :: proc(s: io.Stream) -> ([64]byte, bool) {
 
 
 // hash_file_512 will read the file provided by the given handle
 // hash_file_512 will read the file provided by the given handle
 // and compute a hash
 // and compute a hash
-hash_file_512 :: proc(hd: os.Handle, load_at_once := false) -> ([64]byte, bool) {
+hash_file_512 :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE_512]byte, bool) {
     if !load_at_once {
     if !load_at_once {
         return hash_stream_512(os.stream_from_handle(hd))
         return hash_stream_512(os.stream_from_handle(hd))
     } else {
     } else {
@@ -123,7 +165,7 @@ hash_file_512 :: proc(hd: os.Handle, load_at_once := false) -> ([64]byte, bool)
             return hash_bytes_512(buf[:]), ok
             return hash_bytes_512(buf[:]), ok
         }
         }
     }
     }
-    return [64]byte{}, false
+    return [DIGEST_SIZE_512]byte{}, false
 }
 }
 
 
 hash_512 :: proc {
 hash_512 :: proc {
@@ -131,6 +173,8 @@ hash_512 :: proc {
     hash_file_512,
     hash_file_512,
     hash_bytes_512,
     hash_bytes_512,
     hash_string_512,
     hash_string_512,
+    hash_bytes_to_buffer_512,
+    hash_string_to_buffer_512,
 }
 }
 
 
 /*
 /*

+ 88 - 21
core/crypto/tiger/tiger.odin

@@ -19,16 +19,20 @@ import "../_tiger"
     High level API
     High level API
 */
 */
 
 
+DIGEST_SIZE_128 :: 16
+DIGEST_SIZE_160 :: 20
+DIGEST_SIZE_192 :: 24
+
 // hash_string_128 will hash the given input and return the
 // hash_string_128 will hash the given input and return the
 // computed hash
 // computed hash
-hash_string_128 :: proc(data: string) -> [16]byte {
+hash_string_128 :: proc(data: string) -> [DIGEST_SIZE_128]byte {
     return hash_bytes_128(transmute([]byte)(data))
     return hash_bytes_128(transmute([]byte)(data))
 }
 }
 
 
 // hash_bytes_128 will hash the given input and return the
 // hash_bytes_128 will hash the given input and return the
 // computed hash
 // computed hash
-hash_bytes_128 :: proc(data: []byte) -> [16]byte {
-    hash: [16]byte
+hash_bytes_128 :: proc(data: []byte) -> [DIGEST_SIZE_128]byte {
+    hash: [DIGEST_SIZE_128]byte
     ctx: _tiger.Tiger_Context
     ctx: _tiger.Tiger_Context
     ctx.ver = 1
     ctx.ver = 1
     _tiger.init(&ctx)
     _tiger.init(&ctx)
@@ -37,10 +41,29 @@ hash_bytes_128 :: proc(data: []byte) -> [16]byte {
     return hash
     return hash
 }
 }
 
 
+// hash_string_to_buffer_128 will hash the given input and assign the
+// computed hash to the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_string_to_buffer_128 :: proc(data: string, hash: []byte) {
+    hash_bytes_to_buffer_128(transmute([]byte)(data), hash)
+}
+
+// hash_bytes_to_buffer_128 will hash the given input and write the
+// computed hash into the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_bytes_to_buffer_128 :: proc(data, hash: []byte) {
+    assert(len(hash) >= DIGEST_SIZE_128, "Size of destination buffer is smaller than the digest size")
+    ctx: _tiger.Tiger_Context
+    ctx.ver = 1
+    _tiger.init(&ctx)
+    _tiger.update(&ctx, data)
+    _tiger.final(&ctx, hash)
+}
+
 // hash_stream_128 will read the stream in chunks and compute a
 // hash_stream_128 will read the stream in chunks and compute a
 // hash from its contents
 // hash from its contents
-hash_stream_128 :: proc(s: io.Stream) -> ([16]byte, bool) {
-    hash: [16]byte
+hash_stream_128 :: proc(s: io.Stream) -> ([DIGEST_SIZE_128]byte, bool) {
+    hash: [DIGEST_SIZE_128]byte
     ctx: _tiger.Tiger_Context
     ctx: _tiger.Tiger_Context
     ctx.ver = 1
     ctx.ver = 1
     _tiger.init(&ctx)
     _tiger.init(&ctx)
@@ -59,7 +82,7 @@ hash_stream_128 :: proc(s: io.Stream) -> ([16]byte, bool) {
 
 
 // hash_file_128 will read the file provided by the given handle
 // hash_file_128 will read the file provided by the given handle
 // and compute a hash
 // and compute a hash
-hash_file_128 :: proc(hd: os.Handle, load_at_once := false) -> ([16]byte, bool) {
+hash_file_128 :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE_128]byte, bool) {
     if !load_at_once {
     if !load_at_once {
         return hash_stream_128(os.stream_from_handle(hd))
         return hash_stream_128(os.stream_from_handle(hd))
     } else {
     } else {
@@ -67,7 +90,7 @@ hash_file_128 :: proc(hd: os.Handle, load_at_once := false) -> ([16]byte, bool)
             return hash_bytes_128(buf[:]), ok
             return hash_bytes_128(buf[:]), ok
         }
         }
     }
     }
-    return [16]byte{}, false
+    return [DIGEST_SIZE_128]byte{}, false
 }
 }
 
 
 hash_128 :: proc {
 hash_128 :: proc {
@@ -75,18 +98,20 @@ hash_128 :: proc {
     hash_file_128,
     hash_file_128,
     hash_bytes_128,
     hash_bytes_128,
     hash_string_128,
     hash_string_128,
+    hash_bytes_to_buffer_128,
+    hash_string_to_buffer_128,
 }
 }
 
 
 // hash_string_160 will hash the given input and return the
 // hash_string_160 will hash the given input and return the
 // computed hash
 // computed hash
-hash_string_160 :: proc(data: string) -> [20]byte {
+hash_string_160 :: proc(data: string) -> [DIGEST_SIZE_160]byte {
     return hash_bytes_160(transmute([]byte)(data))
     return hash_bytes_160(transmute([]byte)(data))
 }
 }
 
 
 // hash_bytes_160 will hash the given input and return the
 // hash_bytes_160 will hash the given input and return the
 // computed hash
 // computed hash
-hash_bytes_160 :: proc(data: []byte) -> [20]byte {
-    hash: [20]byte
+hash_bytes_160 :: proc(data: []byte) -> [DIGEST_SIZE_160]byte {
+    hash: [DIGEST_SIZE_160]byte
     ctx: _tiger.Tiger_Context
     ctx: _tiger.Tiger_Context
     ctx.ver = 1
     ctx.ver = 1
     _tiger.init(&ctx)
     _tiger.init(&ctx)
@@ -95,10 +120,29 @@ hash_bytes_160 :: proc(data: []byte) -> [20]byte {
     return hash
     return hash
 }
 }
 
 
+// hash_string_to_buffer_160 will hash the given input and assign the
+// computed hash to the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_string_to_buffer_160 :: proc(data: string, hash: []byte) {
+    hash_bytes_to_buffer_160(transmute([]byte)(data), hash)
+}
+
+// hash_bytes_to_buffer_160 will hash the given input and write the
+// computed hash into the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_bytes_to_buffer_160 :: proc(data, hash: []byte) {
+    assert(len(hash) >= DIGEST_SIZE_160, "Size of destination buffer is smaller than the digest size")
+    ctx: _tiger.Tiger_Context
+    ctx.ver = 1
+    _tiger.init(&ctx)
+    _tiger.update(&ctx, data)
+    _tiger.final(&ctx, hash)
+}
+
 // hash_stream_160 will read the stream in chunks and compute a
 // hash_stream_160 will read the stream in chunks and compute a
 // hash from its contents
 // hash from its contents
-hash_stream_160 :: proc(s: io.Stream) -> ([20]byte, bool) {
-    hash: [20]byte
+hash_stream_160 :: proc(s: io.Stream) -> ([DIGEST_SIZE_160]byte, bool) {
+    hash: [DIGEST_SIZE_160]byte
     ctx: _tiger.Tiger_Context
     ctx: _tiger.Tiger_Context
     ctx.ver = 1
     ctx.ver = 1
     _tiger.init(&ctx)
     _tiger.init(&ctx)
@@ -117,7 +161,7 @@ hash_stream_160 :: proc(s: io.Stream) -> ([20]byte, bool) {
 
 
 // hash_file_160 will read the file provided by the given handle
 // hash_file_160 will read the file provided by the given handle
 // and compute a hash
 // and compute a hash
-hash_file_160 :: proc(hd: os.Handle, load_at_once := false) -> ([20]byte, bool) {
+hash_file_160 :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE_160]byte, bool) {
     if !load_at_once {
     if !load_at_once {
         return hash_stream_160(os.stream_from_handle(hd))
         return hash_stream_160(os.stream_from_handle(hd))
     } else {
     } else {
@@ -125,7 +169,7 @@ hash_file_160 :: proc(hd: os.Handle, load_at_once := false) -> ([20]byte, bool)
             return hash_bytes_160(buf[:]), ok
             return hash_bytes_160(buf[:]), ok
         }
         }
     }
     }
-    return [20]byte{}, false
+    return [DIGEST_SIZE_160]byte{}, false
 }
 }
 
 
 hash_160 :: proc {
 hash_160 :: proc {
@@ -133,18 +177,20 @@ hash_160 :: proc {
     hash_file_160,
     hash_file_160,
     hash_bytes_160,
     hash_bytes_160,
     hash_string_160,
     hash_string_160,
+    hash_bytes_to_buffer_160,
+    hash_string_to_buffer_160,
 }
 }
 
 
 // hash_string_192 will hash the given input and return the
 // hash_string_192 will hash the given input and return the
 // computed hash
 // computed hash
-hash_string_192 :: proc(data: string) -> [24]byte {
+hash_string_192 :: proc(data: string) -> [DIGEST_SIZE_192]byte {
     return hash_bytes_192(transmute([]byte)(data))
     return hash_bytes_192(transmute([]byte)(data))
 }
 }
 
 
 // hash_bytes_192 will hash the given input and return the
 // hash_bytes_192 will hash the given input and return the
 // computed hash
 // computed hash
-hash_bytes_192 :: proc(data: []byte) -> [24]byte {
-    hash: [24]byte
+hash_bytes_192 :: proc(data: []byte) -> [DIGEST_SIZE_192]byte {
+    hash: [DIGEST_SIZE_192]byte
     ctx: _tiger.Tiger_Context
     ctx: _tiger.Tiger_Context
     ctx.ver = 1
     ctx.ver = 1
     _tiger.init(&ctx)
     _tiger.init(&ctx)
@@ -153,10 +199,29 @@ hash_bytes_192 :: proc(data: []byte) -> [24]byte {
     return hash
     return hash
 }
 }
 
 
+// hash_string_to_buffer_192 will hash the given input and assign the
+// computed hash to the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_string_to_buffer_192 :: proc(data: string, hash: []byte) {
+    hash_bytes_to_buffer_192(transmute([]byte)(data), hash)
+}
+
+// hash_bytes_to_buffer_192 will hash the given input and write the
+// computed hash into the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_bytes_to_buffer_192 :: proc(data, hash: []byte) {
+    assert(len(hash) >= DIGEST_SIZE_192, "Size of destination buffer is smaller than the digest size")
+    ctx: _tiger.Tiger_Context
+    ctx.ver = 1
+    _tiger.init(&ctx)
+    _tiger.update(&ctx, data)
+    _tiger.final(&ctx, hash)
+}
+
 // hash_stream_192 will read the stream in chunks and compute a
 // hash_stream_192 will read the stream in chunks and compute a
 // hash from its contents
 // hash from its contents
-hash_stream_192 :: proc(s: io.Stream) -> ([24]byte, bool) {
-    hash: [24]byte
+hash_stream_192 :: proc(s: io.Stream) -> ([DIGEST_SIZE_192]byte, bool) {
+    hash: [DIGEST_SIZE_192]byte
     ctx: _tiger.Tiger_Context
     ctx: _tiger.Tiger_Context
     ctx.ver = 1
     ctx.ver = 1
     _tiger.init(&ctx)
     _tiger.init(&ctx)
@@ -175,7 +240,7 @@ hash_stream_192 :: proc(s: io.Stream) -> ([24]byte, bool) {
 
 
 // hash_file_192 will read the file provided by the given handle
 // hash_file_192 will read the file provided by the given handle
 // and compute a hash
 // and compute a hash
-hash_file_192 :: proc(hd: os.Handle, load_at_once := false) -> ([24]byte, bool) {
+hash_file_192 :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE_192]byte, bool) {
     if !load_at_once {
     if !load_at_once {
         return hash_stream_192(os.stream_from_handle(hd))
         return hash_stream_192(os.stream_from_handle(hd))
     } else {
     } else {
@@ -183,7 +248,7 @@ hash_file_192 :: proc(hd: os.Handle, load_at_once := false) -> ([24]byte, bool)
             return hash_bytes_192(buf[:]), ok
             return hash_bytes_192(buf[:]), ok
         }
         }
     }
     }
-    return [24]byte{}, false
+    return [DIGEST_SIZE_192]byte{}, false
 }
 }
 
 
 hash_192 :: proc {
 hash_192 :: proc {
@@ -191,6 +256,8 @@ hash_192 :: proc {
     hash_file_192,
     hash_file_192,
     hash_bytes_192,
     hash_bytes_192,
     hash_string_192,
     hash_string_192,
+    hash_bytes_to_buffer_192,
+    hash_string_to_buffer_192,
 }
 }
 
 
 /*
 /*

+ 88 - 21
core/crypto/tiger2/tiger2.odin

@@ -19,16 +19,20 @@ import "../_tiger"
     High level API
     High level API
 */
 */
 
 
+DIGEST_SIZE_128 :: 16
+DIGEST_SIZE_160 :: 20
+DIGEST_SIZE_192 :: 24
+
 // hash_string_128 will hash the given input and return the
 // hash_string_128 will hash the given input and return the
 // computed hash
 // computed hash
-hash_string_128 :: proc(data: string) -> [16]byte {
+hash_string_128 :: proc(data: string) -> [DIGEST_SIZE_128]byte {
     return hash_bytes_128(transmute([]byte)(data))
     return hash_bytes_128(transmute([]byte)(data))
 }
 }
 
 
 // hash_bytes_128 will hash the given input and return the
 // hash_bytes_128 will hash the given input and return the
 // computed hash
 // computed hash
-hash_bytes_128 :: proc(data: []byte) -> [16]byte {
-    hash: [16]byte
+hash_bytes_128 :: proc(data: []byte) -> [DIGEST_SIZE_128]byte {
+    hash: [DIGEST_SIZE_128]byte
     ctx: _tiger.Tiger_Context
     ctx: _tiger.Tiger_Context
     ctx.ver = 2
     ctx.ver = 2
     _tiger.init(&ctx)
     _tiger.init(&ctx)
@@ -37,10 +41,29 @@ hash_bytes_128 :: proc(data: []byte) -> [16]byte {
     return hash
     return hash
 }
 }
 
 
+// hash_string_to_buffer_128 will hash the given input and assign the
+// computed hash to the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_string_to_buffer_128 :: proc(data: string, hash: []byte) {
+    hash_bytes_to_buffer_128(transmute([]byte)(data), hash)
+}
+
+// hash_bytes_to_buffer_128 will hash the given input and write the
+// computed hash into the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_bytes_to_buffer_128 :: proc(data, hash: []byte) {
+    assert(len(hash) >= DIGEST_SIZE_128, "Size of destination buffer is smaller than the digest size")
+    ctx: _tiger.Tiger_Context
+    ctx.ver = 2
+    _tiger.init(&ctx)
+    _tiger.update(&ctx, data)
+    _tiger.final(&ctx, hash)
+}
+
 // hash_stream_128 will read the stream in chunks and compute a
 // hash_stream_128 will read the stream in chunks and compute a
 // hash from its contents
 // hash from its contents
-hash_stream_128 :: proc(s: io.Stream) -> ([16]byte, bool) {
-    hash: [16]byte
+hash_stream_128 :: proc(s: io.Stream) -> ([DIGEST_SIZE_128]byte, bool) {
+    hash: [DIGEST_SIZE_128]byte
     ctx: _tiger.Tiger_Context
     ctx: _tiger.Tiger_Context
     ctx.ver = 2
     ctx.ver = 2
     _tiger.init(&ctx)
     _tiger.init(&ctx)
@@ -59,7 +82,7 @@ hash_stream_128 :: proc(s: io.Stream) -> ([16]byte, bool) {
 
 
 // hash_file_128 will read the file provided by the given handle
 // hash_file_128 will read the file provided by the given handle
 // and compute a hash
 // and compute a hash
-hash_file_128 :: proc(hd: os.Handle, load_at_once := false) -> ([16]byte, bool) {
+hash_file_128 :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE_128]byte, bool) {
     if !load_at_once {
     if !load_at_once {
         return hash_stream_128(os.stream_from_handle(hd))
         return hash_stream_128(os.stream_from_handle(hd))
     } else {
     } else {
@@ -67,7 +90,7 @@ hash_file_128 :: proc(hd: os.Handle, load_at_once := false) -> ([16]byte, bool)
             return hash_bytes_128(buf[:]), ok
             return hash_bytes_128(buf[:]), ok
         }
         }
     }
     }
-    return [16]byte{}, false
+    return [DIGEST_SIZE_128]byte{}, false
 }
 }
 
 
 hash_128 :: proc {
 hash_128 :: proc {
@@ -75,18 +98,20 @@ hash_128 :: proc {
     hash_file_128,
     hash_file_128,
     hash_bytes_128,
     hash_bytes_128,
     hash_string_128,
     hash_string_128,
+    hash_bytes_to_buffer_128,
+    hash_string_to_buffer_128,
 }
 }
 
 
 // hash_string_160 will hash the given input and return the
 // hash_string_160 will hash the given input and return the
 // computed hash
 // computed hash
-hash_string_160 :: proc(data: string) -> [20]byte {
+hash_string_160 :: proc(data: string) -> [DIGEST_SIZE_160]byte {
     return hash_bytes_160(transmute([]byte)(data))
     return hash_bytes_160(transmute([]byte)(data))
 }
 }
 
 
 // hash_bytes_160 will hash the given input and return the
 // hash_bytes_160 will hash the given input and return the
 // computed hash
 // computed hash
-hash_bytes_160 :: proc(data: []byte) -> [20]byte {
-    hash: [20]byte
+hash_bytes_160 :: proc(data: []byte) -> [DIGEST_SIZE_160]byte {
+    hash: [DIGEST_SIZE_160]byte
     ctx: _tiger.Tiger_Context
     ctx: _tiger.Tiger_Context
     ctx.ver = 2
     ctx.ver = 2
     _tiger.init(&ctx)
     _tiger.init(&ctx)
@@ -95,10 +120,29 @@ hash_bytes_160 :: proc(data: []byte) -> [20]byte {
     return hash
     return hash
 }
 }
 
 
+// hash_string_to_buffer_160 will hash the given input and assign the
+// computed hash to the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_string_to_buffer_160 :: proc(data: string, hash: []byte) {
+    hash_bytes_to_buffer_160(transmute([]byte)(data), hash)
+}
+
+// hash_bytes_to_buffer_160 will hash the given input and write the
+// computed hash into the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_bytes_to_buffer_160 :: proc(data, hash: []byte) {
+    assert(len(hash) >= DIGEST_SIZE_160, "Size of destination buffer is smaller than the digest size")
+    ctx: _tiger.Tiger_Context
+    ctx.ver = 2
+    _tiger.init(&ctx)
+    _tiger.update(&ctx, data)
+    _tiger.final(&ctx, hash)
+}
+
 // hash_stream_160 will read the stream in chunks and compute a
 // hash_stream_160 will read the stream in chunks and compute a
 // hash from its contents
 // hash from its contents
-hash_stream_160 :: proc(s: io.Stream) -> ([20]byte, bool) {
-    hash: [20]byte
+hash_stream_160 :: proc(s: io.Stream) -> ([DIGEST_SIZE_160]byte, bool) {
+    hash: [DIGEST_SIZE_160]byte
     ctx: _tiger.Tiger_Context
     ctx: _tiger.Tiger_Context
     ctx.ver = 2
     ctx.ver = 2
     _tiger.init(&ctx)
     _tiger.init(&ctx)
@@ -117,7 +161,7 @@ hash_stream_160 :: proc(s: io.Stream) -> ([20]byte, bool) {
 
 
 // hash_file_160 will read the file provided by the given handle
 // hash_file_160 will read the file provided by the given handle
 // and compute a hash
 // and compute a hash
-hash_file_160 :: proc(hd: os.Handle, load_at_once := false) -> ([20]byte, bool) {
+hash_file_160 :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE_160]byte, bool) {
     if !load_at_once {
     if !load_at_once {
         return hash_stream_160(os.stream_from_handle(hd))
         return hash_stream_160(os.stream_from_handle(hd))
     } else {
     } else {
@@ -125,7 +169,7 @@ hash_file_160 :: proc(hd: os.Handle, load_at_once := false) -> ([20]byte, bool)
             return hash_bytes_160(buf[:]), ok
             return hash_bytes_160(buf[:]), ok
         }
         }
     }
     }
-    return [20]byte{}, false
+    return [DIGEST_SIZE_160]byte{}, false
 }
 }
 
 
 hash_160 :: proc {
 hash_160 :: proc {
@@ -133,18 +177,20 @@ hash_160 :: proc {
     hash_file_160,
     hash_file_160,
     hash_bytes_160,
     hash_bytes_160,
     hash_string_160,
     hash_string_160,
+    hash_bytes_to_buffer_160,
+    hash_string_to_buffer_160,
 }
 }
 
 
 // hash_string_192 will hash the given input and return the
 // hash_string_192 will hash the given input and return the
 // computed hash
 // computed hash
-hash_string_192 :: proc(data: string) -> [24]byte {
+hash_string_192 :: proc(data: string) -> [DIGEST_SIZE_192]byte {
     return hash_bytes_192(transmute([]byte)(data))
     return hash_bytes_192(transmute([]byte)(data))
 }
 }
 
 
 // hash_bytes_192 will hash the given input and return the
 // hash_bytes_192 will hash the given input and return the
 // computed hash
 // computed hash
-hash_bytes_192 :: proc(data: []byte) -> [24]byte {
-    hash: [24]byte
+hash_bytes_192 :: proc(data: []byte) -> [DIGEST_SIZE_192]byte {
+    hash: [DIGEST_SIZE_192]byte
     ctx: _tiger.Tiger_Context
     ctx: _tiger.Tiger_Context
     ctx.ver = 2
     ctx.ver = 2
     _tiger.init(&ctx)
     _tiger.init(&ctx)
@@ -153,10 +199,29 @@ hash_bytes_192 :: proc(data: []byte) -> [24]byte {
     return hash
     return hash
 }
 }
 
 
+// hash_string_to_buffer_192 will hash the given input and assign the
+// computed hash to the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_string_to_buffer_192 :: proc(data: string, hash: []byte) {
+    hash_bytes_to_buffer_192(transmute([]byte)(data), hash)
+}
+
+// hash_bytes_to_buffer_192 will hash the given input and write the
+// computed hash into the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_bytes_to_buffer_192 :: proc(data, hash: []byte) {
+    assert(len(hash) >= DIGEST_SIZE_192, "Size of destination buffer is smaller than the digest size")
+    ctx: _tiger.Tiger_Context
+    ctx.ver = 2
+    _tiger.init(&ctx)
+    _tiger.update(&ctx, data)
+    _tiger.final(&ctx, hash)
+}
+
 // hash_stream_192 will read the stream in chunks and compute a
 // hash_stream_192 will read the stream in chunks and compute a
 // hash from its contents
 // hash from its contents
-hash_stream_192 :: proc(s: io.Stream) -> ([24]byte, bool) {
-    hash: [24]byte
+hash_stream_192 :: proc(s: io.Stream) -> ([DIGEST_SIZE_192]byte, bool) {
+    hash: [DIGEST_SIZE_192]byte
     ctx: _tiger.Tiger_Context
     ctx: _tiger.Tiger_Context
     ctx.ver = 2
     ctx.ver = 2
     _tiger.init(&ctx)
     _tiger.init(&ctx)
@@ -175,7 +240,7 @@ hash_stream_192 :: proc(s: io.Stream) -> ([24]byte, bool) {
 
 
 // hash_file_192 will read the file provided by the given handle
 // hash_file_192 will read the file provided by the given handle
 // and compute a hash
 // and compute a hash
-hash_file_192 :: proc(hd: os.Handle, load_at_once := false) -> ([24]byte, bool) {
+hash_file_192 :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE_192]byte, bool) {
     if !load_at_once {
     if !load_at_once {
         return hash_stream_192(os.stream_from_handle(hd))
         return hash_stream_192(os.stream_from_handle(hd))
     } else {
     } else {
@@ -183,7 +248,7 @@ hash_file_192 :: proc(hd: os.Handle, load_at_once := false) -> ([24]byte, bool)
             return hash_bytes_192(buf[:]), ok
             return hash_bytes_192(buf[:]), ok
         }
         }
     }
     }
-    return [24]byte{}, false
+    return [DIGEST_SIZE_192]byte{}, false
 }
 }
 
 
 hash_192 :: proc {
 hash_192 :: proc {
@@ -191,6 +256,8 @@ hash_192 :: proc {
     hash_file_192,
     hash_file_192,
     hash_bytes_192,
     hash_bytes_192,
     hash_string_192,
     hash_string_192,
+    hash_bytes_to_buffer_192,
+    hash_string_to_buffer_192,
 }
 }
 
 
 /*
 /*

+ 29 - 7
core/crypto/whirlpool/whirlpool.odin

@@ -19,16 +19,18 @@ import "../util"
     High level API
     High level API
 */
 */
 
 
+DIGEST_SIZE :: 64
+
 // hash_string will hash the given input and return the
 // hash_string will hash the given input and return the
 // computed hash
 // computed hash
-hash_string :: proc(data: string) -> [64]byte {
+hash_string :: proc(data: string) -> [DIGEST_SIZE]byte {
     return hash_bytes(transmute([]byte)(data))
     return hash_bytes(transmute([]byte)(data))
 }
 }
 
 
 // hash_bytes will hash the given input and return the
 // hash_bytes will hash the given input and return the
 // computed hash
 // computed hash
-hash_bytes :: proc(data: []byte) -> [64]byte {
-	hash: [64]byte
+hash_bytes :: proc(data: []byte) -> [DIGEST_SIZE]byte {
+	hash: [DIGEST_SIZE]byte
 	ctx: Whirlpool_Context
 	ctx: Whirlpool_Context
     // init(&ctx) No-op
     // init(&ctx) No-op
     update(&ctx, data)
     update(&ctx, data)
@@ -36,10 +38,28 @@ hash_bytes :: proc(data: []byte) -> [64]byte {
     return hash
     return hash
 }
 }
 
 
+// hash_string_to_buffer will hash the given input and assign the
+// computed hash to the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_string_to_buffer :: proc(data: string, hash: []byte) {
+    hash_bytes_to_buffer(transmute([]byte)(data), hash)
+}
+
+// hash_bytes_to_buffer will hash the given input and write the
+// computed hash into the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_bytes_to_buffer :: proc(data, hash: []byte) {
+    assert(len(hash) >= DIGEST_SIZE, "Size of destination buffer is smaller than the digest size")
+    ctx: Whirlpool_Context
+    // init(&ctx) No-op
+    update(&ctx, data)
+    final(&ctx, hash)
+}
+
 // hash_stream will read the stream in chunks and compute a
 // hash_stream will read the stream in chunks and compute a
 // hash from its contents
 // hash from its contents
-hash_stream :: proc(s: io.Stream) -> ([64]byte, bool) {
-	hash: [64]byte
+hash_stream :: proc(s: io.Stream) -> ([DIGEST_SIZE]byte, bool) {
+	hash: [DIGEST_SIZE]byte
 	ctx: Whirlpool_Context
 	ctx: Whirlpool_Context
 	// init(&ctx) No-op
 	// init(&ctx) No-op
 	buf := make([]byte, 512)
 	buf := make([]byte, 512)
@@ -57,7 +77,7 @@ hash_stream :: proc(s: io.Stream) -> ([64]byte, bool) {
 
 
 // hash_file will read the file provided by the given handle
 // hash_file will read the file provided by the given handle
 // and compute a hash
 // and compute a hash
-hash_file :: proc(hd: os.Handle, load_at_once := false) -> ([64]byte, bool) {
+hash_file :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE]byte, bool) {
 	if !load_at_once {
 	if !load_at_once {
         return hash_stream(os.stream_from_handle(hd))
         return hash_stream(os.stream_from_handle(hd))
     } else {
     } else {
@@ -65,7 +85,7 @@ hash_file :: proc(hd: os.Handle, load_at_once := false) -> ([64]byte, bool) {
             return hash_bytes(buf[:]), ok
             return hash_bytes(buf[:]), ok
         }
         }
     }
     }
-    return [64]byte{}, false
+    return [DIGEST_SIZE]byte{}, false
 }
 }
 
 
 hash :: proc {
 hash :: proc {
@@ -73,6 +93,8 @@ hash :: proc {
     hash_file,
     hash_file,
     hash_bytes,
     hash_bytes,
     hash_string,
     hash_string,
+    hash_bytes_to_buffer,
+    hash_string_to_buffer,
 }
 }
 
 
 /*
 /*

+ 1 - 1
core/dynlib/lib_unix.odin

@@ -1,4 +1,4 @@
-// +build linux, darwin, freebsd
+// +build linux, darwin, freebsd, openbsd
 package dynlib
 package dynlib
 
 
 import "core:os"
 import "core:os"

+ 6 - 6
core/encoding/hxa/doc.odin

@@ -27,7 +27,7 @@
 // 	Construction history, or BSP trees would make the format too large to serve its purpose.
 // 	Construction history, or BSP trees would make the format too large to serve its purpose.
 // 	The facilities of the formats to store meta data should make the format flexible enough
 // 	The facilities of the formats to store meta data should make the format flexible enough
 // 	for most uses. Adding HxA support should be something anyone can do in a days work.
 // 	for most uses. Adding HxA support should be something anyone can do in a days work.
-
+//
 // Structure:
 // Structure:
 // ----------
 // ----------
 // HxA is designed to be extremely simple to parse, and is therefore based around conventions. It has
 // HxA is designed to be extremely simple to parse, and is therefore based around conventions. It has
@@ -45,17 +45,17 @@
 // of a number of named layers. All layers in the stack have the same number of elements. Each layer
 // of a number of named layers. All layers in the stack have the same number of elements. Each layer
 // describes one property of the primitive. Each layer can have multiple channels and each layer can
 // describes one property of the primitive. Each layer can have multiple channels and each layer can
 // store data of a different type.
 // store data of a different type.
-
+//
 // HaX stores 3 kinds of nodes
 // HaX stores 3 kinds of nodes
 // 	- Pixel data.
 // 	- Pixel data.
 // 	- Polygon geometry data.
 // 	- Polygon geometry data.
 // 	- Meta data only.
 // 	- Meta data only.
-
+//
 // Pixel Nodes stores pixels in a layer stack. A layer may store things like Albedo, Roughness,
 // Pixel Nodes stores pixels in a layer stack. A layer may store things like Albedo, Roughness,
 // Reflectance, Light maps, Masks, Normal maps, and Displacement. Layers use the channels of the
 // Reflectance, Light maps, Masks, Normal maps, and Displacement. Layers use the channels of the
 // layers to store things like color. The length of the layer stack is determined by the type and
 // layers to store things like color. The length of the layer stack is determined by the type and
 // dimensions stored in the
 // dimensions stored in the
-
+//
 // Geometry data is stored in 3 separate layer stacks for: vertex data, corner data and face data. The
 // Geometry data is stored in 3 separate layer stacks for: vertex data, corner data and face data. The
 // vertex data stores things like verities, blend shapes, weight maps, and vertex colors. The first
 // vertex data stores things like verities, blend shapes, weight maps, and vertex colors. The first
 // layer in a vertex stack has to be a 3 channel layer named "position" describing the base position
 // layer in a vertex stack has to be a 3 channel layer named "position" describing the base position
@@ -63,7 +63,7 @@
 // for things like UV, normals, and adjacency. The first layer in a corner stack has to be a 1 channel
 // for things like UV, normals, and adjacency. The first layer in a corner stack has to be a 1 channel
 // integer layer named "index" describing the vertices used to form polygons. The last value in each
 // integer layer named "index" describing the vertices used to form polygons. The last value in each
 // polygon has a negative - 1 index to indicate the end of the polygon.
 // polygon has a negative - 1 index to indicate the end of the polygon.
-
+//
 // Example:
 // Example:
 // 	A quad and a tri with the vertex index:
 // 	A quad and a tri with the vertex index:
 // 		[0, 1, 2, 3] [1, 4, 2]
 // 		[0, 1, 2, 3] [1, 4, 2]
@@ -72,7 +72,7 @@
 // The face stack stores values per face. the length of the face stack has to match the number of
 // The face stack stores values per face. the length of the face stack has to match the number of
 // negative values in the index layer in the corner stack. The face stack can be used to store things
 // negative values in the index layer in the corner stack. The face stack can be used to store things
 // like material index.
 // like material index.
-
+//
 // Storage
 // Storage
 // -------
 // -------
 // All data is stored in little endian byte order with no padding. The layout mirrors the structs
 // All data is stored in little endian byte order with no padding. The layout mirrors the structs

+ 25 - 4
core/encoding/hxa/read.odin

@@ -39,6 +39,9 @@ read :: proc(data: []byte, filename := "<input>", print_error := false, allocato
 	read_value :: proc(r: ^Reader, $T: typeid) -> (value: T, err: Read_Error) {
 	read_value :: proc(r: ^Reader, $T: typeid) -> (value: T, err: Read_Error) {
 		remaining := len(r.data) - r.offset
 		remaining := len(r.data) - r.offset
 		if remaining < size_of(T) {
 		if remaining < size_of(T) {
+			if r.print_error {
+				fmt.eprintf("file '%s' failed to read value at offset %v\n", r.filename, r.offset)
+			}
 			err = .Short_Read
 			err = .Short_Read
 			return
 			return
 		}
 		}
@@ -51,6 +54,10 @@ read :: proc(data: []byte, filename := "<input>", print_error := false, allocato
 	read_array :: proc(r: ^Reader, $T: typeid, count: int) -> (value: []T, err: Read_Error) {
 	read_array :: proc(r: ^Reader, $T: typeid, count: int) -> (value: []T, err: Read_Error) {
 		remaining := len(r.data) - r.offset
 		remaining := len(r.data) - r.offset
 		if remaining < size_of(T)*count {
 		if remaining < size_of(T)*count {
+			if r.print_error {
+				fmt.eprintf("file '%s' failed to read array of %d elements at offset %v\n",
+							r.filename, count, r.offset)
+			}
 			err = .Short_Read
 			err = .Short_Read
 			return
 			return
 		}
 		}
@@ -82,7 +89,8 @@ read :: proc(data: []byte, filename := "<input>", print_error := false, allocato
 			type := read_value(r, Meta_Value_Type) or_return
 			type := read_value(r, Meta_Value_Type) or_return
 			if type > max(Meta_Value_Type) {
 			if type > max(Meta_Value_Type) {
 				if r.print_error {
 				if r.print_error {
-					fmt.eprintf("HxA Error: file '%s' has meta value type %d. Maximum value is ", r.filename, u8(type), u8(max(Meta_Value_Type)))
+					fmt.eprintf("HxA Error: file '%s' has meta value type %d. Maximum value is %d\n",
+								r.filename, u8(type), u8(max(Meta_Value_Type)))
 				}
 				}
 				err = .Invalid_Data
 				err = .Invalid_Data
 				return
 				return
@@ -114,7 +122,8 @@ read :: proc(data: []byte, filename := "<input>", print_error := false, allocato
 			type := read_value(r, Layer_Data_Type) or_return
 			type := read_value(r, Layer_Data_Type) or_return
 			if type > max(type) {
 			if type > max(type) {
 				if r.print_error {
 				if r.print_error {
-					fmt.eprintf("HxA Error: file '%s' has layer data type %d. Maximum value is ", r.filename, u8(type), u8(max(Layer_Data_Type)))
+					fmt.eprintf("HxA Error: file '%s' has layer data type %d. Maximum value is %d\n",
+								r.filename, u8(type), u8(max(Layer_Data_Type)))
 				}
 				}
 				err = .Invalid_Data
 				err = .Invalid_Data
 				return
 				return
@@ -134,13 +143,23 @@ read :: proc(data: []byte, filename := "<input>", print_error := false, allocato
 	}
 	}
 
 
 	if len(data) < size_of(Header) {
 	if len(data) < size_of(Header) {
+		if print_error {
+			fmt.eprintf("HxA Error: file '%s' has no header\n", filename)
+		}
+		err = .Short_Read
 		return
 		return
 	}
 	}
 
 
 	context.allocator = allocator
 	context.allocator = allocator
 
 
 	header := cast(^Header)raw_data(data)
 	header := cast(^Header)raw_data(data)
-	assert(header.magic_number == MAGIC_NUMBER)
+	if (header.magic_number != MAGIC_NUMBER) {
+		if print_error {
+			fmt.eprintf("HxA Error: file '%s' has invalid magic number 0x%x\n", filename, header.magic_number)
+		}
+		err = .Invalid_Data
+		return
+	}
 
 
 	r := &Reader{
 	r := &Reader{
 		filename    = filename,
 		filename    = filename,
@@ -150,6 +169,7 @@ read :: proc(data: []byte, filename := "<input>", print_error := false, allocato
 	}
 	}
 
 
 	node_count := 0
 	node_count := 0
+	file.header = header^
 	file.nodes = make([]Node, header.internal_node_count)
 	file.nodes = make([]Node, header.internal_node_count)
 	defer if err != nil {
 	defer if err != nil {
 		nodes_destroy(file.nodes)
 		nodes_destroy(file.nodes)
@@ -162,7 +182,8 @@ read :: proc(data: []byte, filename := "<input>", print_error := false, allocato
 		type := read_value(r, Node_Type) or_return
 		type := read_value(r, Node_Type) or_return
 		if type > max(Node_Type) {
 		if type > max(Node_Type) {
 			if r.print_error {
 			if r.print_error {
-				fmt.eprintf("HxA Error: file '%s' has node type %d. Maximum value is ", r.filename, u8(type), u8(max(Node_Type)))
+				fmt.eprintf("HxA Error: file '%s' has node type %d. Maximum value is %d\n",
+							r.filename, u8(type), u8(max(Node_Type)))
 			}
 			}
 			err = .Invalid_Data
 			err = .Invalid_Data
 			return
 			return

+ 3 - 3
core/encoding/hxa/write.odin

@@ -84,7 +84,7 @@ write_internal :: proc(w: ^Writer, file: File) {
 
 
 	write_metadata :: proc(w: ^Writer, meta_data: []Meta) {
 	write_metadata :: proc(w: ^Writer, meta_data: []Meta) {
 		for m in meta_data {
 		for m in meta_data {
-			name_len := max(len(m.name), 255)
+			name_len := min(len(m.name), 255)
 			write_value(w, u8(name_len))
 			write_value(w, u8(name_len))
 			write_string(w, m.name[:name_len])
 			write_string(w, m.name[:name_len])
 
 
@@ -127,7 +127,7 @@ write_internal :: proc(w: ^Writer, file: File) {
 	write_layer_stack :: proc(w: ^Writer, layers: Layer_Stack) {
 	write_layer_stack :: proc(w: ^Writer, layers: Layer_Stack) {
 		write_value(w, u32(len(layers)))
 		write_value(w, u32(len(layers)))
 		for layer in layers {
 		for layer in layers {
-			name_len := max(len(layer.name), 255)
+			name_len := min(len(layer.name), 255)
 			write_value(w, u8(name_len))
 			write_value(w, u8(name_len))
 			write_string(w, layer .name[:name_len])
 			write_string(w, layer .name[:name_len])
 
 
@@ -152,7 +152,7 @@ write_internal :: proc(w: ^Writer, file: File) {
 		return
 		return
 	}
 	}
 
 
-	write_value(w, &Header{
+	write_value(w, Header{
 		magic_number = MAGIC_NUMBER,
 		magic_number = MAGIC_NUMBER,
 		version = LATEST_VERSION,
 		version = LATEST_VERSION,
 		internal_node_count = u32le(len(file.nodes)),
 		internal_node_count = u32le(len(file.nodes)),

+ 6 - 5
core/encoding/json/marshal.odin

@@ -8,17 +8,18 @@ import "core:strings"
 import "core:io"
 import "core:io"
 
 
 Marshal_Data_Error :: enum {
 Marshal_Data_Error :: enum {
+	None,
 	Unsupported_Type,
 	Unsupported_Type,
 }
 }
 
 
-Marshal_Error :: union {
+Marshal_Error :: union #shared_nil {
 	Marshal_Data_Error,
 	Marshal_Data_Error,
 	io.Error,
 	io.Error,
 }
 }
 
 
 marshal :: proc(v: any, allocator := context.allocator) -> (data: []byte, err: Marshal_Error) {
 marshal :: proc(v: any, allocator := context.allocator) -> (data: []byte, err: Marshal_Error) {
 	b := strings.make_builder(allocator)
 	b := strings.make_builder(allocator)
-	defer if err != .None {
+	defer if err != nil {
 		strings.destroy_builder(&b)
 		strings.destroy_builder(&b)
 	}
 	}
 
 
@@ -27,7 +28,7 @@ marshal :: proc(v: any, allocator := context.allocator) -> (data: []byte, err: M
 	if len(b.buf) != 0 {
 	if len(b.buf) != 0 {
 		data = b.buf[:]
 		data = b.buf[:]
 	}
 	}
-	return data, .None
+	return data, nil
 }
 }
 
 
 marshal_to_builder :: proc(b: ^strings.Builder, v: any) -> Marshal_Error {
 marshal_to_builder :: proc(b: ^strings.Builder, v: any) -> Marshal_Error {
@@ -285,8 +286,8 @@ marshal_to_writer :: proc(w: io.Writer, v: any) -> (err: Marshal_Error) {
 			case runtime.Type_Info_Integer:
 			case runtime.Type_Info_Integer:
 				switch info.endianness {
 				switch info.endianness {
 				case .Platform: return false
 				case .Platform: return false
-				case .Little:   return ODIN_ENDIAN != "little"
-				case .Big:      return ODIN_ENDIAN != "big"
+				case .Little:   return ODIN_ENDIAN != .Little
+				case .Big:      return ODIN_ENDIAN != .Big
 				}
 				}
 			}
 			}
 			return false
 			return false

+ 6 - 0
core/encoding/json/parser.odin

@@ -354,6 +354,12 @@ unquote_string :: proc(token: Token, spec: Specification, allocator := context.a
 
 
 	b := bytes_make(len(s) + 2*utf8.UTF_MAX, 1, allocator) or_return
 	b := bytes_make(len(s) + 2*utf8.UTF_MAX, 1, allocator) or_return
 	w := copy(b, s[0:i])
 	w := copy(b, s[0:i])
+
+	if len(b) == 0 && allocator.data == nil {
+		// `unmarshal_count_array` calls us with a nil allocator
+		return string(b[:w]), nil
+	}
+
 	loop: for i < len(s) {
 	loop: for i < len(s) {
 		c := s[i]
 		c := s[i]
 		switch {
 		switch {

+ 25 - 25
core/encoding/json/unmarshal.odin

@@ -52,11 +52,11 @@ unmarshal_any :: proc(data: []byte, v: any, spec := DEFAULT_SPECIFICATION, alloc
 	if p.spec == .MJSON {
 	if p.spec == .MJSON {
 		#partial switch p.curr_token.kind {
 		#partial switch p.curr_token.kind {
 		case .Ident, .String:
 		case .Ident, .String:
-			return unmarsal_object(&p, data, .EOF)
+			return unmarshal_object(&p, data, .EOF)
 		}
 		}
 	}
 	}
 
 
-	return unmarsal_value(&p, data)
+	return unmarshal_value(&p, data)
 }
 }
 
 
 
 
@@ -148,7 +148,7 @@ assign_float :: proc(val: any, f: $T) -> bool {
 
 
 
 
 @(private)
 @(private)
-unmarsal_string :: proc(p: ^Parser, val: any, str: string, ti: ^reflect.Type_Info) -> bool {
+unmarshal_string_token :: proc(p: ^Parser, val: any, str: string, ti: ^reflect.Type_Info) -> bool {
 	val := val
 	val := val
 	switch dst in &val {
 	switch dst in &val {
 	case string:
 	case string:
@@ -198,7 +198,7 @@ unmarsal_string :: proc(p: ^Parser, val: any, str: string, ti: ^reflect.Type_Inf
 
 
 
 
 @(private)
 @(private)
-unmarsal_value :: proc(p: ^Parser, v: any) -> (err: Unmarshal_Error) {
+unmarshal_value :: proc(p: ^Parser, v: any) -> (err: Unmarshal_Error) {
 	UNSUPPORTED_TYPE := Unsupported_Type_Error{v.id, p.curr_token}
 	UNSUPPORTED_TYPE := Unsupported_Type_Error{v.id, p.curr_token}
 	token := p.curr_token
 	token := p.curr_token
 	
 	
@@ -257,7 +257,7 @@ unmarsal_value :: proc(p: ^Parser, v: any) -> (err: Unmarshal_Error) {
 	case .Ident:
 	case .Ident:
 		advance_token(p)
 		advance_token(p)
 		if p.spec == .MJSON {
 		if p.spec == .MJSON {
-			if unmarsal_string(p, any{v.data, ti.id}, token.text, ti) {
+			if unmarshal_string_token(p, any{v.data, ti.id}, token.text, ti) {
 				return nil
 				return nil
 			}
 			}
 		}
 		}
@@ -266,7 +266,7 @@ unmarsal_value :: proc(p: ^Parser, v: any) -> (err: Unmarshal_Error) {
 	case .String:
 	case .String:
 		advance_token(p)
 		advance_token(p)
 		str := unquote_string(token, p.spec, p.allocator) or_return
 		str := unquote_string(token, p.spec, p.allocator) or_return
-		if unmarsal_string(p, any{v.data, ti.id}, str, ti) {
+		if unmarshal_string_token(p, any{v.data, ti.id}, str, ti) {
 			return nil
 			return nil
 		}
 		}
 		delete(str, p.allocator)
 		delete(str, p.allocator)
@@ -274,10 +274,10 @@ unmarsal_value :: proc(p: ^Parser, v: any) -> (err: Unmarshal_Error) {
 
 
 
 
 	case .Open_Brace:
 	case .Open_Brace:
-		return unmarsal_object(p, v, .Close_Brace)
+		return unmarshal_object(p, v, .Close_Brace)
 
 
 	case .Open_Bracket:
 	case .Open_Bracket:
-		return unmarsal_array(p, v)
+		return unmarshal_array(p, v)
 
 
 	case:
 	case:
 		if p.spec != .JSON {
 		if p.spec != .JSON {
@@ -312,16 +312,16 @@ unmarsal_value :: proc(p: ^Parser, v: any) -> (err: Unmarshal_Error) {
 
 
 
 
 @(private)
 @(private)
-unmarsal_expect_token :: proc(p: ^Parser, kind: Token_Kind, loc := #caller_location) -> Token {
+unmarshal_expect_token :: proc(p: ^Parser, kind: Token_Kind, loc := #caller_location) -> Token {
 	prev := p.curr_token
 	prev := p.curr_token
 	err := expect_token(p, kind)
 	err := expect_token(p, kind)
-	assert(err == nil, "unmarsal_expect_token")
+	assert(err == nil, "unmarshal_expect_token")
 	return prev
 	return prev
 }
 }
 
 
 
 
 @(private)
 @(private)
-unmarsal_object :: proc(p: ^Parser, v: any, end_token: Token_Kind) -> (err: Unmarshal_Error) {
+unmarshal_object :: proc(p: ^Parser, v: any, end_token: Token_Kind) -> (err: Unmarshal_Error) {
 	UNSUPPORTED_TYPE := Unsupported_Type_Error{v.id, p.curr_token}
 	UNSUPPORTED_TYPE := Unsupported_Type_Error{v.id, p.curr_token}
 	
 	
 	if end_token == .Close_Brace {
 	if end_token == .Close_Brace {
@@ -342,7 +342,7 @@ unmarsal_object :: proc(p: ^Parser, v: any, end_token: Token_Kind) -> (err: Unma
 			key, _ := parse_object_key(p, p.allocator)
 			key, _ := parse_object_key(p, p.allocator)
 			defer delete(key, p.allocator)
 			defer delete(key, p.allocator)
 			
 			
-			unmarsal_expect_token(p, .Colon)						
+			unmarshal_expect_token(p, .Colon)						
 			
 			
 			fields := reflect.struct_fields_zipped(ti.id)
 			fields := reflect.struct_fields_zipped(ti.id)
 			
 			
@@ -378,7 +378,7 @@ unmarsal_object :: proc(p: ^Parser, v: any, end_token: Token_Kind) -> (err: Unma
 				
 				
 				field_ptr := rawptr(uintptr(v.data) + offset)
 				field_ptr := rawptr(uintptr(v.data) + offset)
 				field := any{field_ptr, type.id}
 				field := any{field_ptr, type.id}
-				unmarsal_value(p, field) or_return
+				unmarshal_value(p, field) or_return
 					
 					
 				if parse_comma(p) {
 				if parse_comma(p) {
 					break struct_loop
 					break struct_loop
@@ -407,11 +407,11 @@ unmarsal_object :: proc(p: ^Parser, v: any, end_token: Token_Kind) -> (err: Unma
 		
 		
 		map_loop: for p.curr_token.kind != end_token {
 		map_loop: for p.curr_token.kind != end_token {
 			key, _ := parse_object_key(p, p.allocator)
 			key, _ := parse_object_key(p, p.allocator)
-			unmarsal_expect_token(p, .Colon)
+			unmarshal_expect_token(p, .Colon)
 			
 			
 			
 			
 			mem.zero_slice(elem_backing)
 			mem.zero_slice(elem_backing)
-			if err := unmarsal_value(p, map_backing_value); err != nil {
+			if err := unmarshal_value(p, map_backing_value); err != nil {
 				delete(key, p.allocator)
 				delete(key, p.allocator)
 				return err
 				return err
 			}
 			}
@@ -443,7 +443,7 @@ unmarsal_object :: proc(p: ^Parser, v: any, end_token: Token_Kind) -> (err: Unma
 	
 	
 		enumerated_array_loop: for p.curr_token.kind != end_token {
 		enumerated_array_loop: for p.curr_token.kind != end_token {
 			key, _ := parse_object_key(p, p.allocator)
 			key, _ := parse_object_key(p, p.allocator)
-			unmarsal_expect_token(p, .Colon)
+			unmarshal_expect_token(p, .Colon)
 			defer delete(key, p.allocator)
 			defer delete(key, p.allocator)
 
 
 			index := -1
 			index := -1
@@ -460,7 +460,7 @@ unmarsal_object :: proc(p: ^Parser, v: any, end_token: Token_Kind) -> (err: Unma
 			index_ptr := rawptr(uintptr(v.data) + uintptr(index*t.elem_size))
 			index_ptr := rawptr(uintptr(v.data) + uintptr(index*t.elem_size))
 			index_any := any{index_ptr, t.elem.id}
 			index_any := any{index_ptr, t.elem.id}
 			
 			
-			unmarsal_value(p, index_any) or_return
+			unmarshal_value(p, index_any) or_return
 			
 			
 			if parse_comma(p) {
 			if parse_comma(p) {
 				break enumerated_array_loop
 				break enumerated_array_loop
@@ -480,10 +480,10 @@ unmarsal_object :: proc(p: ^Parser, v: any, end_token: Token_Kind) -> (err: Unma
 
 
 
 
 @(private)
 @(private)
-unmarsal_count_array :: proc(p: ^Parser) -> (length: uintptr) {
+unmarshal_count_array :: proc(p: ^Parser) -> (length: uintptr) {
 	p_backup := p^
 	p_backup := p^
 	p.allocator = mem.nil_allocator()
 	p.allocator = mem.nil_allocator()
-	unmarsal_expect_token(p, .Open_Bracket)
+	unmarshal_expect_token(p, .Open_Bracket)
 	array_length_loop: for p.curr_token.kind != .Close_Bracket {
 	array_length_loop: for p.curr_token.kind != .Close_Bracket {
 		_, _ = parse_value(p)
 		_, _ = parse_value(p)
 		length += 1
 		length += 1
@@ -497,9 +497,9 @@ unmarsal_count_array :: proc(p: ^Parser) -> (length: uintptr) {
 }
 }
 
 
 @(private)
 @(private)
-unmarsal_array :: proc(p: ^Parser, v: any) -> (err: Unmarshal_Error) {
+unmarshal_array :: proc(p: ^Parser, v: any) -> (err: Unmarshal_Error) {
 	assign_array :: proc(p: ^Parser, base: rawptr, elem: ^reflect.Type_Info, length: uintptr) -> Unmarshal_Error {
 	assign_array :: proc(p: ^Parser, base: rawptr, elem: ^reflect.Type_Info, length: uintptr) -> Unmarshal_Error {
-		unmarsal_expect_token(p, .Open_Bracket)
+		unmarshal_expect_token(p, .Open_Bracket)
 		
 		
 		for idx: uintptr = 0; p.curr_token.kind != .Close_Bracket; idx += 1 {
 		for idx: uintptr = 0; p.curr_token.kind != .Close_Bracket; idx += 1 {
 			assert(idx < length)
 			assert(idx < length)
@@ -507,14 +507,14 @@ unmarsal_array :: proc(p: ^Parser, v: any) -> (err: Unmarshal_Error) {
 			elem_ptr := rawptr(uintptr(base) + idx*uintptr(elem.size))
 			elem_ptr := rawptr(uintptr(base) + idx*uintptr(elem.size))
 			elem := any{elem_ptr, elem.id}
 			elem := any{elem_ptr, elem.id}
 			
 			
-			unmarsal_value(p, elem) or_return
+			unmarshal_value(p, elem) or_return
 			
 			
 			if parse_comma(p) {
 			if parse_comma(p) {
 				break
 				break
 			}	
 			}	
 		}
 		}
 		
 		
-		unmarsal_expect_token(p, .Close_Bracket)
+		unmarshal_expect_token(p, .Close_Bracket)
 		
 		
 		
 		
 		return nil
 		return nil
@@ -524,7 +524,7 @@ unmarsal_array :: proc(p: ^Parser, v: any) -> (err: Unmarshal_Error) {
 	
 	
 	ti := reflect.type_info_base(type_info_of(v.id))
 	ti := reflect.type_info_base(type_info_of(v.id))
 	
 	
-	length := unmarsal_count_array(p)
+	length := unmarshal_count_array(p)
 	
 	
 	#partial switch t in ti.variant {
 	#partial switch t in ti.variant {
 	case reflect.Type_Info_Slice:	
 	case reflect.Type_Info_Slice:	
@@ -578,4 +578,4 @@ unmarsal_array :: proc(p: ^Parser, v: any) -> (err: Unmarshal_Error) {
 	}
 	}
 		
 		
 	return UNSUPPORTED_TYPE
 	return UNSUPPORTED_TYPE
-}
+}

+ 28 - 0
core/encoding/varint/doc.odin

@@ -0,0 +1,28 @@
+/*
+	Implementation of the LEB128 variable integer encoding as used by DWARF encoding and DEX files, among others.
+
+	Author of this Odin package: Jeroen van Rijn
+
+	Example:
+	```odin
+	import "core:encoding/varint"
+	import "core:fmt"
+
+	main :: proc() {
+		buf: [varint.LEB128_MAX_BYTES]u8
+
+		value := u128(42)
+
+		encode_size, encode_err := varint.encode_uleb128(buf[:], value)
+		assert(encode_size == 1 && encode_err == .None)
+
+		fmt.printf("Encoded as %v\n", buf[:encode_size])
+		decoded_val, decode_size, decode_err := varint.decode_uleb128(buf[:])
+
+		assert(decoded_val == value && decode_size == encode_size && decode_err == .None)
+		fmt.printf("Decoded as %v, using %v byte%v\n", decoded_val, decode_size, "" if decode_size == 1 else "s")
+	}
+	```
+
+*/
+package varint

+ 165 - 0
core/encoding/varint/leb128.odin

@@ -0,0 +1,165 @@
+/*
+	Copyright 2022 Jeroen van Rijn <[email protected]>.
+	Made available under Odin's BSD-3 license.
+
+	List of contributors:
+		Jeroen van Rijn: Initial implementation.
+*/
+
+// package varint implements variable length integer encoding and decoding using
+// the LEB128 format as used by DWARF debug info, Android .dex and other file formats.
+package varint
+
+// In theory we should use the bigint package. In practice, varints bigger than this indicate a corrupted file.
+// Instead we'll set limits on the values we'll encode/decode
+// 18 * 7 bits = 126, which means that a possible 19th byte may at most be `0b0000_0011`.
+LEB128_MAX_BYTES    :: 19
+
+Error :: enum {
+	None             = 0,
+	Buffer_Too_Small = 1,
+	Value_Too_Large  = 2,
+}
+
+// Decode a slice of bytes encoding an unsigned LEB128 integer into value and number of bytes used.
+// Returns `size` == 0 for an invalid value, empty slice, or a varint > 18 bytes.
+decode_uleb128_buffer :: proc(buf: []u8) -> (val: u128, size: int, err: Error) {
+	if len(buf) == 0 {
+		return 0, 0, .Buffer_Too_Small
+	}
+
+	for v in buf {
+		val, size, err = decode_uleb128_byte(v, size, val)
+		if err != .Buffer_Too_Small {
+			return
+		}
+	}
+
+	if err == .Buffer_Too_Small {
+		val, size = 0, 0
+	}
+	return
+}
+
+// Decodes an unsigned LEB128 integer into value a byte at a time.
+// Returns `.None` when decoded properly, `.Value_Too_Large` when they value
+// exceeds the limits of a u128, and `.Buffer_Too_Small` when it's not yet fully decoded.
+decode_uleb128_byte :: proc(input: u8, offset: int, accumulator: u128) -> (val: u128, size: int, err: Error) {
+	size = offset + 1
+
+	// 18 * 7 bits = 126, which means that a possible 19th byte may at most be 0b0000_0011.
+	if size > LEB128_MAX_BYTES || size == LEB128_MAX_BYTES && input > 0b0000_0011 {
+		return 0, 0, .Value_Too_Large
+	}
+
+	val = accumulator | u128(input & 0x7f) << uint(offset * 7)
+
+	if input < 128 {
+		// We're done
+		return
+	}
+
+	// If the buffer runs out before the number ends, return an error.
+	return val, size, .Buffer_Too_Small
+}
+decode_uleb128 :: proc {decode_uleb128_buffer, decode_uleb128_byte}
+
+// Decode a slice of bytes encoding a signed LEB128 integer into value and number of bytes used.
+// Returns `size` == 0 for an invalid value, empty slice, or a varint > 18 bytes.
+decode_ileb128_buffer :: proc(buf: []u8) -> (val: i128, size: int, err: Error) {
+	if len(buf) == 0 {
+		return 0, 0, .Buffer_Too_Small
+	}
+
+	for v in buf {
+		val, size, err = decode_ileb128_byte(v, size, val)
+		if err != .Buffer_Too_Small {
+			return
+		}
+	}
+
+	if err == .Buffer_Too_Small {
+		val, size = 0, 0
+	}
+	return
+}
+
+// Decode a a signed LEB128 integer into value and number of bytes used, one byte at a time.
+// Returns `size` == 0 for an invalid value, empty slice, or a varint > 18 bytes.
+decode_ileb128_byte :: proc(input: u8, offset: int, accumulator: i128) -> (val: i128, size: int, err: Error) {
+	size = offset + 1
+	shift := uint(offset * 7)
+
+	// 18 * 7 bits = 126, which including sign means we can have a 19th byte.
+	if size > LEB128_MAX_BYTES || size == LEB128_MAX_BYTES && input > 0x7f {
+		return 0, 0, .Value_Too_Large
+	}
+
+	val = accumulator | i128(input & 0x7f) << shift
+
+	if input < 128 {
+		if input & 0x40 == 0x40 {
+			val |= max(i128) << (shift + 7)
+		}
+		return val, size, .None
+	}
+	return val, size, .Buffer_Too_Small
+}
+decode_ileb128 :: proc{decode_ileb128_buffer, decode_ileb128_byte}
+
+// Encode `val` into `buf` as an unsigned LEB128 encoded series of bytes.
+// `buf` must be appropriately sized.
+encode_uleb128 :: proc(buf: []u8, val: u128) -> (size: int, err: Error) {
+	val := val
+
+	for {
+		size += 1
+
+		if size > len(buf) {
+			return 0, .Buffer_Too_Small
+		}
+
+		low := val & 0x7f
+		val >>= 7
+
+		if val > 0 {
+			low |= 0x80 // more bytes to follow
+		}
+		buf[size - 1] = u8(low)
+
+		if val == 0 { break }
+	}
+	return
+}
+
+@(private)
+SIGN_MASK :: (i128(1) << 121) // sign extend mask
+
+// Encode `val` into `buf` as a signed LEB128 encoded series of bytes.
+// `buf` must be appropriately sized.
+encode_ileb128 :: proc(buf: []u8, val: i128) -> (size: int, err: Error) {
+	val      := val
+	more     := true
+
+	for more {
+		size += 1
+
+		if size > len(buf) {
+			return 0, .Buffer_Too_Small
+		}
+
+		low := val & 0x7f
+		val >>= 7
+
+		low = (low ~ SIGN_MASK) - SIGN_MASK
+
+		if (val == 0 && low & 0x40 != 0x40) || (val == -1 && low & 0x40 == 0x40) {
+			more = false
+		} else {
+			low |= 0x80
+		}
+
+		buf[size - 1] = u8(low)
+	}
+	return
+}

+ 1 - 1
core/fmt/doc.odin

@@ -64,6 +64,7 @@ If not present, the width is whatever is necessary to represent the value.
 Precision is specified after the (optional) width followed by a period followed by a decimal number.
 Precision is specified after the (optional) width followed by a period followed by a decimal number.
 If no period is present, a default precision is used.
 If no period is present, a default precision is used.
 A period with no following number specifies a precision of 0.
 A period with no following number specifies a precision of 0.
+
 Examples:
 Examples:
 	%f     default width, default precision
 	%f     default width, default precision
 	%8f    width 8, default precision
 	%8f    width 8, default precision
@@ -84,7 +85,6 @@ Other flags:
 	               add leading 0z for dozenal (%#z)
 	               add leading 0z for dozenal (%#z)
 	               add leading 0x or 0X for hexadecimal (%#x or %#X)
 	               add leading 0x or 0X for hexadecimal (%#x or %#X)
 	               remove leading 0x for %p (%#p)
 	               remove leading 0x for %p (%#p)
-
 	' '    (space) leave a space for elided sign in numbers (% d)
 	' '    (space) leave a space for elided sign in numbers (% d)
 	0      pad with leading zeros rather than spaces
 	0      pad with leading zeros rather than spaces
 
 

File diff suppressed because it is too large
+ 191 - 160
core/fmt/fmt.odin


+ 6 - 1
core/fmt/fmt_js.odin

@@ -34,11 +34,16 @@ stderr := io.Writer{
 	},
 	},
 }
 }
 
 
-// print* procedures return the number of bytes written
+// print formats using the default print settings and writes to stdout
 print   :: proc(args: ..any, sep := " ") -> int { return wprint(w=stdout, args=args, sep=sep) }
 print   :: proc(args: ..any, sep := " ") -> int { return wprint(w=stdout, args=args, sep=sep) }
+// println formats using the default print settings and writes to stdout
 println :: proc(args: ..any, sep := " ") -> int { return wprintln(w=stdout, args=args, sep=sep) }
 println :: proc(args: ..any, sep := " ") -> int { return wprintln(w=stdout, args=args, sep=sep) }
+// printf formats according to the specififed format string and writes to stdout
 printf  :: proc(fmt: string, args: ..any) -> int { return wprintf(stdout, fmt, ..args) }
 printf  :: proc(fmt: string, args: ..any) -> int { return wprintf(stdout, fmt, ..args) }
 
 
+// eprint formats using the default print settings and writes to stderr
 eprint   :: proc(args: ..any, sep := " ") -> int { return wprint(w=stderr, args=args, sep=sep) }
 eprint   :: proc(args: ..any, sep := " ") -> int { return wprint(w=stderr, args=args, sep=sep) }
+// eprintln formats using the default print settings and writes to stderr
 eprintln :: proc(args: ..any, sep := " ") -> int { return wprintln(w=stderr, args=args, sep=sep) }
 eprintln :: proc(args: ..any, sep := " ") -> int { return wprintln(w=stderr, args=args, sep=sep) }
+// eprintf formats according to the specififed format string and writes to stderr
 eprintf  :: proc(fmt: string, args: ..any) -> int { return wprintf(stderr, fmt, ..args) }
 eprintf  :: proc(fmt: string, args: ..any) -> int { return wprintf(stderr, fmt, ..args) }

+ 9 - 1
core/fmt/fmt_os.odin

@@ -5,15 +5,18 @@ import "core:runtime"
 import "core:os"
 import "core:os"
 import "core:io"
 import "core:io"
 
 
+// fprint formats using the default print settings and writes to fd
 fprint :: proc(fd: os.Handle, args: ..any, sep := " ") -> int {
 fprint :: proc(fd: os.Handle, args: ..any, sep := " ") -> int {
 	w := io.to_writer(os.stream_from_handle(fd))
 	w := io.to_writer(os.stream_from_handle(fd))
 	return wprint(w=w, args=args, sep=sep)
 	return wprint(w=w, args=args, sep=sep)
 }
 }
 
 
+// fprintln formats using the default print settings and writes to fd
 fprintln :: proc(fd: os.Handle, args: ..any, sep := " ") -> int {
 fprintln :: proc(fd: os.Handle, args: ..any, sep := " ") -> int {
 	w := io.to_writer(os.stream_from_handle(fd))
 	w := io.to_writer(os.stream_from_handle(fd))
 	return wprintln(w=w, args=args, sep=sep)
 	return wprintln(w=w, args=args, sep=sep)
 }
 }
+// fprintf formats according to the specififed format string and writes to fd
 fprintf :: proc(fd: os.Handle, fmt: string, args: ..any) -> int {
 fprintf :: proc(fd: os.Handle, fmt: string, args: ..any) -> int {
 	w := io.to_writer(os.stream_from_handle(fd))
 	w := io.to_writer(os.stream_from_handle(fd))
 	return wprintf(w, fmt, ..args)
 	return wprintf(w, fmt, ..args)
@@ -27,11 +30,16 @@ fprint_typeid :: proc(fd: os.Handle, id: typeid) -> (n: int, err: io.Error) {
 	return wprint_typeid(w, id)
 	return wprint_typeid(w, id)
 }
 }
 
 
-// print* procedures return the number of bytes written
+// print formats using the default print settings and writes to os.stdout
 print   :: proc(args: ..any, sep := " ") -> int { return fprint(fd=os.stdout, args=args, sep=sep) }
 print   :: proc(args: ..any, sep := " ") -> int { return fprint(fd=os.stdout, args=args, sep=sep) }
+// println formats using the default print settings and writes to os.stdout
 println :: proc(args: ..any, sep := " ") -> int { return fprintln(fd=os.stdout, args=args, sep=sep) }
 println :: proc(args: ..any, sep := " ") -> int { return fprintln(fd=os.stdout, args=args, sep=sep) }
+// printf formats according to the specififed format string and writes to os.stdout
 printf  :: proc(fmt: string, args: ..any) -> int { return fprintf(os.stdout, fmt, ..args) }
 printf  :: proc(fmt: string, args: ..any) -> int { return fprintf(os.stdout, fmt, ..args) }
 
 
+// eprint formats using the default print settings and writes to os.stderr
 eprint   :: proc(args: ..any, sep := " ") -> int { return fprint(fd=os.stderr, args=args, sep=sep) }
 eprint   :: proc(args: ..any, sep := " ") -> int { return fprint(fd=os.stderr, args=args, sep=sep) }
+// eprintln formats using the default print settings and writes to os.stderr
 eprintln :: proc(args: ..any, sep := " ") -> int { return fprintln(fd=os.stderr, args=args, sep=sep) }
 eprintln :: proc(args: ..any, sep := " ") -> int { return fprintln(fd=os.stderr, args=args, sep=sep) }
+// eprintf formats according to the specififed format string and writes to os.stderr
 eprintf  :: proc(fmt: string, args: ..any) -> int { return fprintf(os.stderr, fmt, ..args) }
 eprintf  :: proc(fmt: string, args: ..any) -> int { return fprintf(os.stderr, fmt, ..args) }

+ 18 - 1
core/hash/hash.odin

@@ -55,6 +55,23 @@ djb2 :: proc(data: []byte, seed := u32(5381)) -> u32 {
 	return hash
 	return hash
 }
 }
 
 
+djbx33a :: proc(data: []byte, seed := u32(5381)) -> (result: [16]byte) #no_bounds_check {
+	state := [4]u32{seed, seed, seed, seed}
+	
+	s: u32 = 0
+	for p in data {
+		state[s] = (state[s] << 5) + state[s] + u32(p) // hash * 33 + u32(b)
+		s = (s + 1) & 3
+	}
+	
+	
+	(^u32le)(&result[0])^  = u32le(state[0])
+	(^u32le)(&result[4])^  = u32le(state[1])
+	(^u32le)(&result[8])^  = u32le(state[2])
+	(^u32le)(&result[12])^ = u32le(state[3])
+	return
+}
+
 @(optimization_mode="speed")
 @(optimization_mode="speed")
 fnv32 :: proc(data: []byte, seed := u32(0x811c9dc5)) -> u32 {
 fnv32 :: proc(data: []byte, seed := u32(0x811c9dc5)) -> u32 {
 	h: u32 = seed
 	h: u32 = seed
@@ -134,7 +151,7 @@ murmur32 :: proc(data: []byte, seed := u32(0)) -> u32 {
 		k1 ~= u32(tail[2]) << 16
 		k1 ~= u32(tail[2]) << 16
 		fallthrough
 		fallthrough
 	case 2:
 	case 2:
-		k1 ~= u32(tail[2]) << 8
+		k1 ~= u32(tail[1]) << 8
 		fallthrough
 		fallthrough
 	case 1:
 	case 1:
 		k1 ~= u32(tail[0])
 		k1 ~= u32(tail[0])

+ 1 - 1
core/hash/xxhash/streaming.odin

@@ -96,7 +96,7 @@ XXH3_128_canonical_from_hash :: proc(hash: XXH128_hash_t) -> (canonical: XXH128_
 	#assert(size_of(XXH128_canonical) == size_of(XXH128_hash_t))
 	#assert(size_of(XXH128_canonical) == size_of(XXH128_hash_t))
 
 
 	t := hash
 	t := hash
-	when ODIN_ENDIAN == "little" {
+	when ODIN_ENDIAN == .Little {
 		t.high = byte_swap(t.high)
 		t.high = byte_swap(t.high)
 		t.low  = byte_swap(t.low)
 		t.low  = byte_swap(t.low)
 	}
 	}

+ 829 - 20
core/image/common.odin

@@ -6,6 +6,8 @@
 		Jeroen van Rijn: Initial implementation, optimization.
 		Jeroen van Rijn: Initial implementation, optimization.
 		Ginger Bill:     Cosmetic changes.
 		Ginger Bill:     Cosmetic changes.
 */
 */
+
+// package image implements a general 2D image library to be used with other image related packages
 package image
 package image
 
 
 import "core:bytes"
 import "core:bytes"
@@ -13,6 +15,32 @@ import "core:mem"
 import "core:compress"
 import "core:compress"
 import "core:runtime"
 import "core:runtime"
 
 
+/*
+	67_108_864 pixels max by default.
+
+	For QOI, the Worst case scenario means all pixels will be encoded as RGBA literals, costing 5 bytes each.
+	This caps memory usage at 320 MiB.
+
+	The tunable is limited to 4_294_836_225 pixels maximum, or 4 GiB per 8-bit channel.
+	It is not advised to tune it this large.
+
+	The 64 Megapixel default is considered to be a decent upper bound you won't run into in practice,
+	except in very specific circumstances.
+
+*/
+MAX_DIMENSIONS :: min(#config(MAX_DIMENSIONS, 8192 * 8192), 65535 * 65535)
+
+// Color
+RGB_Pixel     :: [3]u8
+RGBA_Pixel    :: [4]u8
+RGB_Pixel_16  :: [3]u16
+RGBA_Pixel_16 :: [4]u16
+// Grayscale
+G_Pixel       :: [1]u8
+GA_Pixel      :: [2]u8
+G_Pixel_16    :: [1]u16
+GA_Pixel_16   :: [2]u16
+
 Image :: struct {
 Image :: struct {
 	width:         int,
 	width:         int,
 	height:        int,
 	height:        int,
@@ -24,15 +52,17 @@ Image :: struct {
 		For convenience, we return them as u16 so we don't need to switch on the type
 		For convenience, we return them as u16 so we don't need to switch on the type
 		in our viewer, and can just test against nil.
 		in our viewer, and can just test against nil.
 	*/
 	*/
-	background:    Maybe([3]u16),
-
+	background:    Maybe(RGB_Pixel_16),
 	metadata:      Image_Metadata,
 	metadata:      Image_Metadata,
 }
 }
 
 
 Image_Metadata :: union {
 Image_Metadata :: union {
 	^PNG_Info,
 	^PNG_Info,
+	^QOI_Info,
 }
 }
 
 
+
+
 /*
 /*
 	IMPORTANT: `.do_not_expand_*` options currently skip handling of the `alpha_*` options,
 	IMPORTANT: `.do_not_expand_*` options currently skip handling of the `alpha_*` options,
 		therefore Gray+Alpha will be returned as such even if you add `.alpha_drop_if_present`,
 		therefore Gray+Alpha will be returned as such even if you add `.alpha_drop_if_present`,
@@ -44,13 +74,13 @@ Image_Metadata :: union {
 /*
 /*
 Image_Option:
 Image_Option:
 	`.info`
 	`.info`
-		This option behaves as `.return_ihdr` and `.do_not_decompress_image` and can be used
+		This option behaves as `.return_metadata` and `.do_not_decompress_image` and can be used
 		to gather an image's dimensions and color information.
 		to gather an image's dimensions and color information.
 
 
 	`.return_header`
 	`.return_header`
-		Fill out img.sidecar.header with the image's format-specific header struct.
+		Fill out img.metadata.header with the image's format-specific header struct.
 		If we only care about the image specs, we can set `.return_header` +
 		If we only care about the image specs, we can set `.return_header` +
-		`.do_not_decompress_image`, or `.info`, which works as if both of these were set.
+		`.do_not_decompress_image`, or `.info`.
 
 
 	`.return_metadata`
 	`.return_metadata`
 		Returns all chunks not needed to decode the data.
 		Returns all chunks not needed to decode the data.
@@ -86,7 +116,7 @@ Image_Option:
 
 
 	`.alpha_premultiply`
 	`.alpha_premultiply`
 		If the image has an alpha channel, returns image data as follows:
 		If the image has an alpha channel, returns image data as follows:
-			RGB  *= A, Gray = Gray *= A
+			RGB *= A, Gray = Gray *= A
 
 
 	`.blend_background`
 	`.blend_background`
 		If a bKGD chunk is present in a PNG, we normally just set `img.background`
 		If a bKGD chunk is present in a PNG, we normally just set `img.background`
@@ -101,24 +131,29 @@ Image_Option:
 */
 */
 
 
 Option :: enum {
 Option :: enum {
+	// LOAD OPTIONS
 	info = 0,
 	info = 0,
 	do_not_decompress_image,
 	do_not_decompress_image,
 	return_header,
 	return_header,
 	return_metadata,
 	return_metadata,
-	alpha_add_if_missing,
-	alpha_drop_if_present,
-	alpha_premultiply,
-	blend_background,
+	alpha_add_if_missing,          // Ignored for QOI. Always returns RGBA8.
+	alpha_drop_if_present,         // Unimplemented for QOI. Returns error.
+	alpha_premultiply,             // Unimplemented for QOI. Returns error.
+	blend_background,              // Ignored for non-PNG formats
 	// Unimplemented
 	// Unimplemented
 	do_not_expand_grayscale,
 	do_not_expand_grayscale,
 	do_not_expand_indexed,
 	do_not_expand_indexed,
 	do_not_expand_channels,
 	do_not_expand_channels,
+
+	// SAVE OPTIONS
+	qoi_all_channels_linear,       // QOI, informative info. If not set, defaults to sRGB with linear alpha.
 }
 }
 Options :: distinct bit_set[Option]
 Options :: distinct bit_set[Option]
 
 
-Error :: union {
+Error :: union #shared_nil {
 	General_Image_Error,
 	General_Image_Error,
 	PNG_Error,
 	PNG_Error,
+	QOI_Error,
 
 
 	compress.Error,
 	compress.Error,
 	compress.General_Error,
 	compress.General_Error,
@@ -132,9 +167,15 @@ General_Image_Error :: enum {
 	Invalid_Image_Dimensions,
 	Invalid_Image_Dimensions,
 	Image_Dimensions_Too_Large,
 	Image_Dimensions_Too_Large,
 	Image_Does_Not_Adhere_to_Spec,
 	Image_Does_Not_Adhere_to_Spec,
+	Invalid_Input_Image,
+	Invalid_Output,
 }
 }
 
 
+/*
+	PNG-specific definitions
+*/
 PNG_Error :: enum {
 PNG_Error :: enum {
+	None = 0,
 	Invalid_PNG_Signature,
 	Invalid_PNG_Signature,
 	IHDR_Not_First_Chunk,
 	IHDR_Not_First_Chunk,
 	IHDR_Corrupt,
 	IHDR_Corrupt,
@@ -144,7 +185,9 @@ PNG_Error :: enum {
 	IDAT_Size_Too_Large,
 	IDAT_Size_Too_Large,
 	PLTE_Encountered_Unexpectedly,
 	PLTE_Encountered_Unexpectedly,
 	PLTE_Invalid_Length,
 	PLTE_Invalid_Length,
+	PLTE_Missing,
 	TRNS_Encountered_Unexpectedly,
 	TRNS_Encountered_Unexpectedly,
+	TNRS_Invalid_Length,
 	BKGD_Invalid_Length,
 	BKGD_Invalid_Length,
 	Unknown_Color_Type,
 	Unknown_Color_Type,
 	Invalid_Color_Bit_Depth_Combo,
 	Invalid_Color_Bit_Depth_Combo,
@@ -155,9 +198,6 @@ PNG_Error :: enum {
 	Invalid_Chunk_Length,
 	Invalid_Chunk_Length,
 }
 }
 
 
-/*
-	PNG-specific structs
-*/
 PNG_Info :: struct {
 PNG_Info :: struct {
 	header: PNG_IHDR,
 	header: PNG_IHDR,
 	chunks: [dynamic]PNG_Chunk,
 	chunks: [dynamic]PNG_Chunk,
@@ -220,7 +260,7 @@ PNG_Chunk_Type :: enum u32be {
 
 
 	*/
 	*/
 	iDOT = 'i' << 24 | 'D' << 16 | 'O' << 8 | 'T',
 	iDOT = 'i' << 24 | 'D' << 16 | 'O' << 8 | 'T',
-	CbGI = 'C' << 24 | 'b' << 16 | 'H' << 8 | 'I',
+	CgBI = 'C' << 24 | 'g' << 16 | 'B' << 8 | 'I',
 }
 }
 
 
 PNG_IHDR :: struct #packed {
 PNG_IHDR :: struct #packed {
@@ -248,16 +288,58 @@ PNG_Interlace_Method :: enum u8 {
 }
 }
 
 
 /*
 /*
-	Functions to help with image buffer calculations
+	QOI-specific definitions
 */
 */
+QOI_Error :: enum {
+	None = 0,
+	Invalid_QOI_Signature,
+	Invalid_Number_Of_Channels, // QOI allows 3 or 4 channel data.
+	Invalid_Bit_Depth,          // QOI supports only 8-bit images, error only returned from writer.
+	Invalid_Color_Space,        // QOI allows 0 = sRGB or 1 = linear.
+	Corrupt,                    // More data than pixels to decode into, for example.
+	Missing_Or_Corrupt_Trailer, // Image seemed to have decoded okay, but trailer is missing or corrupt.
+}
+
+QOI_Magic :: u32be(0x716f6966)      // "qoif"
+
+QOI_Color_Space :: enum u8 {
+	sRGB   = 0,
+	Linear = 1,
+}
+
+QOI_Header :: struct #packed {
+	magic:       u32be,
+	width:       u32be,
+	height:      u32be,
+	channels:    u8,
+	color_space: QOI_Color_Space,
+}
+#assert(size_of(QOI_Header) == 14)
+
+QOI_Info :: struct {
+	header: QOI_Header,
+}
+
+TGA_Header :: struct #packed {
+	id_length:        u8,
+	color_map_type:   u8,
+	data_type_code:   u8,
+	color_map_origin: u16le,
+	color_map_length: u16le,
+	color_map_depth:  u8,
+	origin:           [2]u16le,
+	dimensions:       [2]u16le,
+	bits_per_pixel:   u8,
+	image_descriptor: u8,
+}
+#assert(size_of(TGA_Header) == 18)
+
+// Function to help with image buffer calculations
 compute_buffer_size :: proc(width, height, channels, depth: int, extra_row_bytes := int(0)) -> (size: int) {
 compute_buffer_size :: proc(width, height, channels, depth: int, extra_row_bytes := int(0)) -> (size: int) {
 	size = ((((channels * width * depth) + 7) >> 3) + extra_row_bytes) * height
 	size = ((((channels * width * depth) + 7) >> 3) + extra_row_bytes) * height
 	return
 	return
 }
 }
 
 
-/*
-	For when you have an RGB(A) image, but want a particular channel.
-*/
 Channel :: enum u8 {
 Channel :: enum u8 {
 	R = 1,
 	R = 1,
 	G = 2,
 	G = 2,
@@ -265,7 +347,13 @@ Channel :: enum u8 {
 	A = 4,
 	A = 4,
 }
 }
 
 
+// When you have an RGB(A) image, but want a particular channel.
 return_single_channel :: proc(img: ^Image, channel: Channel) -> (res: ^Image, ok: bool) {
 return_single_channel :: proc(img: ^Image, channel: Channel) -> (res: ^Image, ok: bool) {
+	// Were we actually given a valid image?
+	if img == nil {
+		return nil, false
+	}
+
 	ok = false
 	ok = false
 	t: bytes.Buffer
 	t: bytes.Buffer
 
 
@@ -295,7 +383,7 @@ return_single_channel :: proc(img: ^Image, channel: Channel) -> (res: ^Image, ok
 			o = o[1:]
 			o = o[1:]
 		}
 		}
 	case 16:
 	case 16:
-		buffer_size := compute_buffer_size(img.width, img.height, 2, 8)
+		buffer_size := compute_buffer_size(img.width, img.height, 1, 16)
 		t = bytes.Buffer{}
 		t = bytes.Buffer{}
 		resize(&t.buf, buffer_size)
 		resize(&t.buf, buffer_size)
 
 
@@ -323,3 +411,724 @@ return_single_channel :: proc(img: ^Image, channel: Channel) -> (res: ^Image, ok
 
 
 	return res, true
 	return res, true
 }
 }
+
+// Does the image have 1 or 2 channels, a valid bit depth (8 or 16),
+// Is the pointer valid, are the dimenions valid?
+is_valid_grayscale_image :: proc(img: ^Image) -> (ok: bool) {
+	// Were we actually given a valid image?
+	if img == nil {
+		return false
+	}
+
+	// Are we a Gray or Gray + Alpha image?
+	if img.channels != 1 && img.channels != 2 {
+		return false
+	}
+
+	// Do we have an acceptable bit depth?
+	if img.depth != 8 && img.depth != 16 {
+		return false
+	}
+
+	// This returns 0 if any of the inputs is zero.
+	bytes_expected := compute_buffer_size(img.width, img.height, img.channels, img.depth)
+
+	// If the dimenions are invalid or the buffer size doesn't match the image characteristics, bail.
+	if bytes_expected == 0 || bytes_expected != len(img.pixels.buf) || img.width * img.height > MAX_DIMENSIONS {
+		return false
+	}
+
+	return true
+}
+
+// Does the image have 3 or 4 channels, a valid bit depth (8 or 16),
+// Is the pointer valid, are the dimenions valid?
+is_valid_color_image :: proc(img: ^Image) -> (ok: bool) {
+	// Were we actually given a valid image?
+	if img == nil {
+		return false
+	}
+
+	// Are we an RGB or RGBA image?
+	if img.channels != 3 && img.channels != 4 {
+		return false
+	}
+
+	// Do we have an acceptable bit depth?
+	if img.depth != 8 && img.depth != 16 {
+		return false
+	}
+
+	// This returns 0 if any of the inputs is zero.
+	bytes_expected := compute_buffer_size(img.width, img.height, img.channels, img.depth)
+
+	// If the dimenions are invalid or the buffer size doesn't match the image characteristics, bail.
+	if bytes_expected == 0 || bytes_expected != len(img.pixels.buf) || img.width * img.height > MAX_DIMENSIONS {
+		return false
+	}
+
+	return true
+}
+
+// Does the image have 1..4 channels, a valid bit depth (8 or 16),
+// Is the pointer valid, are the dimenions valid?
+is_valid_image :: proc(img: ^Image) -> (ok: bool) {
+	// Were we actually given a valid image?
+	if img == nil {
+		return false
+	}
+
+	return is_valid_color_image(img) || is_valid_grayscale_image(img)
+}
+
+Alpha_Key :: union {
+	GA_Pixel,
+	RGBA_Pixel,
+	GA_Pixel_16,
+	RGBA_Pixel_16,
+}
+
+/*
+	Add alpha channel if missing, in-place.
+
+	Expects 1..4 channels (Gray, Gray + Alpha, RGB, RGBA).
+	Any other number of channels will be considered an error, returning `false` without modifying the image.
+	If the input image already has an alpha channel, it'll return `true` early (without considering optional keyed alpha).
+
+	If an image doesn't already have an alpha channel:
+	If the optional `alpha_key` is provided, it will be resolved as follows:
+		- For RGB,  if pix = key.rgb -> pix = {0, 0, 0, key.a}
+		- For Gray, if pix = key.r  -> pix = {0, key.g}
+	Otherwise, an opaque alpha channel will be added.
+*/
+alpha_add_if_missing :: proc(img: ^Image, alpha_key := Alpha_Key{}, allocator := context.allocator) -> (ok: bool) {
+	context.allocator = allocator
+
+	if !is_valid_image(img) {
+		return false
+	}
+
+	// We should now have a valid Image with 1..4 channels. Do we already have alpha?
+	if img.channels == 2 || img.channels == 4 {
+		// We're done.
+		return true
+	}
+
+	channels     := img.channels + 1
+	bytes_wanted := compute_buffer_size(img.width, img.height, channels, img.depth)
+
+	buf := bytes.Buffer{}
+
+	// Can we allocate the return buffer?
+	if !resize(&buf.buf, bytes_wanted) {
+		delete(buf.buf)
+		return false
+	}
+
+	switch img.depth {
+	case 8:
+		switch channels {
+		case 2:
+			// Turn Gray into Gray + Alpha
+			inp := mem.slice_data_cast([]G_Pixel,  img.pixels.buf[:])
+			out := mem.slice_data_cast([]GA_Pixel, buf.buf[:])
+
+			if key, key_ok := alpha_key.(GA_Pixel); key_ok {
+				// We have keyed alpha.
+				o: GA_Pixel
+				for p in inp {
+					if p == key.r {
+						o = GA_Pixel{0, key.g}
+					} else {
+						o = GA_Pixel{p.r, 255}
+					}
+					out[0] = o
+					out = out[1:]
+				}
+			} else {
+				// No keyed alpha, just make all pixels opaque.
+				o := GA_Pixel{0, 255}
+				for p in inp {
+					o.r    = p.r
+					out[0] = o
+					out = out[1:]
+				}
+			}
+
+		case 4:
+			// Turn RGB into RGBA
+			inp := mem.slice_data_cast([]RGB_Pixel,  img.pixels.buf[:])
+			out := mem.slice_data_cast([]RGBA_Pixel, buf.buf[:])
+
+			if key, key_ok := alpha_key.(RGBA_Pixel); key_ok {
+				// We have keyed alpha.
+				o: RGBA_Pixel
+				for p in inp {
+					if p == key.rgb {
+						o = RGBA_Pixel{0, 0, 0, key.a}
+					} else {
+						o = RGBA_Pixel{p.r, p.g, p.b, 255}
+					}
+					out[0] = o
+					out = out[1:]
+				}
+			} else {
+				// No keyed alpha, just make all pixels opaque.
+				o := RGBA_Pixel{0, 0, 0, 255}
+				for p in inp {
+					o.rgb  = p
+					out[0] = o
+					out = out[1:]
+				}
+			}
+		case:
+			// We shouldn't get here.
+			unreachable()
+		}
+	case 16:
+		switch channels {
+		case 2:
+			// Turn Gray into Gray + Alpha
+			inp := mem.slice_data_cast([]G_Pixel_16,  img.pixels.buf[:])
+			out := mem.slice_data_cast([]GA_Pixel_16, buf.buf[:])
+
+			if key, key_ok := alpha_key.(GA_Pixel_16); key_ok {
+				// We have keyed alpha.
+				o: GA_Pixel_16
+				for p in inp {
+					if p == key.r {
+						o = GA_Pixel_16{0, key.g}
+					} else {
+						o = GA_Pixel_16{p.r, 65535}
+					}
+					out[0] = o
+					out = out[1:]
+				}
+			} else {
+				// No keyed alpha, just make all pixels opaque.
+				o := GA_Pixel_16{0, 65535}
+				for p in inp {
+					o.r    = p.r
+					out[0] = o
+					out = out[1:]
+				}
+			}
+
+		case 4:
+			// Turn RGB into RGBA
+			inp := mem.slice_data_cast([]RGB_Pixel_16,  img.pixels.buf[:])
+			out := mem.slice_data_cast([]RGBA_Pixel_16, buf.buf[:])
+
+			if key, key_ok := alpha_key.(RGBA_Pixel_16); key_ok {
+				// We have keyed alpha.
+				o: RGBA_Pixel_16
+				for p in inp {
+					if p == key.rgb {
+						o = RGBA_Pixel_16{0, 0, 0, key.a}
+					} else {
+						o = RGBA_Pixel_16{p.r, p.g, p.b, 65535}
+					}
+					out[0] = o
+					out = out[1:]
+				}
+			} else {
+				// No keyed alpha, just make all pixels opaque.
+				o := RGBA_Pixel_16{0, 0, 0, 65535}
+				for p in inp {
+					o.rgb  = p
+					out[0] = o
+					out = out[1:]
+				}
+			}
+		case:
+			// We shouldn't get here.
+			unreachable()
+		}
+	}
+
+	// If we got here, that means we've now got a buffer with the alpha channel added.
+	// Destroy the old pixel buffer and replace it with the new one, and update the channel count.
+	bytes.buffer_destroy(&img.pixels)
+	img.pixels   = buf
+	img.channels = channels
+	return true
+}
+alpha_apply_keyed_alpha :: alpha_add_if_missing
+
+/*
+	Drop alpha channel if present, in-place.
+
+	Expects 1..4 channels (Gray, Gray + Alpha, RGB, RGBA).
+	Any other number of channels will be considered an error, returning `false` without modifying the image.
+
+	Of the `options`, the following are considered:
+	`.alpha_premultiply`
+		If the image has an alpha channel, returns image data as follows:
+			RGB *= A, Gray = Gray *= A
+
+	`.blend_background`
+		If `img.background` is set, it'll be blended in like this:
+			RGB = (1 - A) * Background + A * RGB
+
+	If an image has 1 (Gray) or 3 (RGB) channels, it'll return early without modifying the image,
+	with one exception: `alpha_key` and `img.background` are present, and `.blend_background` is set.
+
+	In this case a keyed alpha pixel will be replaced with the background color.
+*/
+alpha_drop_if_present :: proc(img: ^Image, options := Options{}, alpha_key := Alpha_Key{}, allocator := context.allocator) -> (ok: bool) {
+	context.allocator = allocator
+
+	if !is_valid_image(img) {
+		return false
+	}
+
+	// Do we have a background to blend?
+	will_it_blend := false
+	switch v in img.background {
+	case RGB_Pixel_16: will_it_blend = true if .blend_background in options else false
+	}
+
+	// Do we have keyed alpha?
+	keyed := false
+	switch v in alpha_key {
+	case GA_Pixel:      keyed = true if img.channels == 1 && img.depth ==  8 else false
+	case RGBA_Pixel:    keyed = true if img.channels == 3 && img.depth ==  8 else false
+	case GA_Pixel_16:   keyed = true if img.channels == 1 && img.depth == 16 else false
+	case RGBA_Pixel_16: keyed = true if img.channels == 3 && img.depth == 16 else false
+	}
+
+	// We should now have a valid Image with 1..4 channels. Do we have alpha?
+	if img.channels == 1 || img.channels == 3 {
+		if !(will_it_blend && keyed) {
+			// We're done
+			return true
+		}
+	}
+
+	// # of destination channels
+	channels := 1 if img.channels < 3 else 3
+
+	bytes_wanted := compute_buffer_size(img.width, img.height, channels, img.depth)
+	buf := bytes.Buffer{}
+
+	// Can we allocate the return buffer?
+	if !resize(&buf.buf, bytes_wanted) {
+		delete(buf.buf)
+		return false
+	}
+
+	switch img.depth {
+	case 8:
+		switch img.channels {
+		case 1: // Gray to Gray, but we should have keyed alpha + background.
+			inp := mem.slice_data_cast([]G_Pixel, img.pixels.buf[:])
+			out := mem.slice_data_cast([]G_Pixel, buf.buf[:])
+
+			key := alpha_key.(GA_Pixel).r
+			bg  := G_Pixel{}
+			if temp_bg, temp_bg_ok := img.background.(RGB_Pixel_16); temp_bg_ok {
+				// Background is RGB 16-bit, take just the red channel's topmost byte.
+				bg = u8(temp_bg.r >> 8)
+			}
+
+			for p in inp {
+				out[0] = bg if p == key else p
+				out    = out[1:]
+			}
+
+		case 2: // Gray + Alpha to Gray, no keyed alpha but we can have a background.
+			inp := mem.slice_data_cast([]GA_Pixel, img.pixels.buf[:])
+			out := mem.slice_data_cast([]G_Pixel,  buf.buf[:])
+
+			if will_it_blend {
+				// Blend with background "color", then drop alpha.
+				bg  := f32(0.0)
+				if temp_bg, temp_bg_ok := img.background.(RGB_Pixel_16); temp_bg_ok {
+					// Background is RGB 16-bit, take just the red channel's topmost byte.
+					bg = f32(temp_bg.r >> 8)
+				}
+
+				for p in inp {
+					a := f32(p.g) / 255.0
+					c := ((1.0 - a) * bg + a * f32(p.r))
+					out[0] = u8(c)
+					out    = out[1:]
+				}
+
+			} else if .alpha_premultiply in options {
+				// Premultiply component with alpha, then drop alpha.
+				for p in inp {
+					a := f32(p.g) / 255.0
+					c := f32(p.r) * a
+					out[0] = u8(c)
+					out    = out[1:]
+				}
+			} else {
+				// Just drop alpha on the floor.
+				for p in inp {
+					out[0] = p.r
+					out    = out[1:]
+				}
+			}
+
+		case 3: // RGB to RGB, but we should have keyed alpha + background.
+			inp := mem.slice_data_cast([]RGB_Pixel, img.pixels.buf[:])
+			out := mem.slice_data_cast([]RGB_Pixel, buf.buf[:])
+
+			key := alpha_key.(RGBA_Pixel)
+			bg  := RGB_Pixel{}
+			if temp_bg, temp_bg_ok := img.background.(RGB_Pixel_16); temp_bg_ok {
+				// Background is RGB 16-bit, squash down to 8 bits.
+				bg = {u8(temp_bg.r >> 8), u8(temp_bg.g >> 8), u8(temp_bg.b >> 8)}
+			}
+
+			for p in inp {
+				out[0] = bg if p == key.rgb else p
+				out    = out[1:]
+			}
+
+		case 4: // RGBA to RGB, no keyed alpha but we can have a background or need to premultiply.
+			inp := mem.slice_data_cast([]RGBA_Pixel, img.pixels.buf[:])
+			out := mem.slice_data_cast([]RGB_Pixel,  buf.buf[:])
+
+			if will_it_blend {
+				// Blend with background "color", then drop alpha.
+				bg := [3]f32{}
+				if temp_bg, temp_bg_ok := img.background.(RGB_Pixel_16); temp_bg_ok {
+					// Background is RGB 16-bit, take just the red channel's topmost byte.
+					bg = {f32(temp_bg.r >> 8), f32(temp_bg.g >> 8), f32(temp_bg.b >> 8)}
+				}
+
+				for p in inp {
+					a   := f32(p.a) / 255.0
+					rgb := [3]f32{f32(p.r), f32(p.g), f32(p.b)}
+					c   := ((1.0 - a) * bg + a * rgb)
+
+					out[0] = {u8(c.r), u8(c.g), u8(c.b)}
+					out    = out[1:]
+				}
+
+			} else if .alpha_premultiply in options {
+				// Premultiply component with alpha, then drop alpha.
+				for p in inp {
+					a   := f32(p.a) / 255.0
+					rgb := [3]f32{f32(p.r), f32(p.g), f32(p.b)}
+					c   := rgb * a
+
+					out[0] = {u8(c.r), u8(c.g), u8(c.b)}
+					out    = out[1:]
+				}
+			} else {
+				// Just drop alpha on the floor.
+				for p in inp {
+					out[0] = p.rgb
+					out    = out[1:]
+				}
+			}
+		}
+
+	case 16:
+		switch img.channels {
+		case 1: // Gray to Gray, but we should have keyed alpha + background.
+			inp := mem.slice_data_cast([]G_Pixel_16, img.pixels.buf[:])
+			out := mem.slice_data_cast([]G_Pixel_16, buf.buf[:])
+
+			key := alpha_key.(GA_Pixel_16).r
+			bg  := G_Pixel_16{}
+			if temp_bg, temp_bg_ok := img.background.(RGB_Pixel_16); temp_bg_ok {
+				// Background is RGB 16-bit, take just the red channel.
+				bg = temp_bg.r
+			}
+
+			for p in inp {
+				out[0] = bg if p == key else p
+				out    = out[1:]
+			}
+
+		case 2: // Gray + Alpha to Gray, no keyed alpha but we can have a background.
+			inp := mem.slice_data_cast([]GA_Pixel_16, img.pixels.buf[:])
+			out := mem.slice_data_cast([]G_Pixel_16,  buf.buf[:])
+
+			if will_it_blend {
+				// Blend with background "color", then drop alpha.
+				bg  := f32(0.0)
+				if temp_bg, temp_bg_ok := img.background.(RGB_Pixel_16); temp_bg_ok {
+					// Background is RGB 16-bit, take just the red channel.
+					bg = f32(temp_bg.r)
+				}
+
+				for p in inp {
+					a := f32(p.g) / 65535.0
+					c := ((1.0 - a) * bg + a * f32(p.r))
+					out[0] = u16(c)
+					out    = out[1:]
+				}
+
+			} else if .alpha_premultiply in options {
+				// Premultiply component with alpha, then drop alpha.
+				for p in inp {
+					a := f32(p.g) / 65535.0
+					c := f32(p.r) * a
+					out[0] = u16(c)
+					out    = out[1:]
+				}
+			} else {
+				// Just drop alpha on the floor.
+				for p in inp {
+					out[0] = p.r
+					out    = out[1:]
+				}
+			}
+
+		case 3: // RGB to RGB, but we should have keyed alpha + background.
+			inp := mem.slice_data_cast([]RGB_Pixel_16, img.pixels.buf[:])
+			out := mem.slice_data_cast([]RGB_Pixel_16, buf.buf[:])
+
+			key := alpha_key.(RGBA_Pixel_16)
+			bg  := img.background.(RGB_Pixel_16)
+
+			for p in inp {
+				out[0] = bg if p == key.rgb else p
+				out    = out[1:]
+			}
+
+		case 4: // RGBA to RGB, no keyed alpha but we can have a background or need to premultiply.
+			inp := mem.slice_data_cast([]RGBA_Pixel_16, img.pixels.buf[:])
+			out := mem.slice_data_cast([]RGB_Pixel_16,  buf.buf[:])
+
+			if will_it_blend {
+				// Blend with background "color", then drop alpha.
+				bg := [3]f32{}
+				if temp_bg, temp_bg_ok := img.background.(RGB_Pixel_16); temp_bg_ok {
+					// Background is RGB 16-bit, convert to [3]f32 to blend.
+					bg = {f32(temp_bg.r), f32(temp_bg.g), f32(temp_bg.b)}
+				}
+
+				for p in inp {
+					a   := f32(p.a) / 65535.0
+					rgb := [3]f32{f32(p.r), f32(p.g), f32(p.b)}
+					c   := ((1.0 - a) * bg + a * rgb)
+
+					out[0] = {u16(c.r), u16(c.g), u16(c.b)}
+					out    = out[1:]
+				}
+
+			} else if .alpha_premultiply in options {
+				// Premultiply component with alpha, then drop alpha.
+				for p in inp {
+					a   := f32(p.a) / 65535.0
+					rgb := [3]f32{f32(p.r), f32(p.g), f32(p.b)}
+					c   := rgb * a
+
+					out[0] = {u16(c.r), u16(c.g), u16(c.b)}
+					out    = out[1:]
+				}
+			} else {
+				// Just drop alpha on the floor.
+				for p in inp {
+					out[0] = p.rgb
+					out    = out[1:]
+				}
+			}
+		}
+
+	case:
+		unreachable()
+	}
+
+	// If we got here, that means we've now got a buffer with the alpha channel dropped.
+	// Destroy the old pixel buffer and replace it with the new one, and update the channel count.
+	bytes.buffer_destroy(&img.pixels)
+	img.pixels   = buf
+	img.channels = channels
+	return true
+}
+
+// Apply palette to 8-bit single-channel image and return an 8-bit RGB image, in-place.
+// If the image given is not a valid 8-bit single channel image, the procedure will return `false` early.
+apply_palette_rgb :: proc(img: ^Image, palette: [256]RGB_Pixel, allocator := context.allocator) -> (ok: bool) {
+	context.allocator = allocator
+
+	if img == nil || img.channels != 1 || img.depth != 8 {
+		return false
+	}
+
+	bytes_expected := compute_buffer_size(img.width, img.height, 1, 8)
+	if bytes_expected == 0 || bytes_expected != len(img.pixels.buf) || img.width * img.height > MAX_DIMENSIONS {
+		return false
+	}
+
+	// Can we allocate the return buffer?
+	buf := bytes.Buffer{}
+	bytes_wanted := compute_buffer_size(img.width, img.height, 3, 8)
+	if !resize(&buf.buf, bytes_wanted) {
+		delete(buf.buf)
+		return false
+	}
+
+	out := mem.slice_data_cast([]RGB_Pixel, buf.buf[:])
+
+	// Apply the palette
+	for p, i in img.pixels.buf {
+		out[i] = palette[p]
+	}
+
+	// If we got here, that means we've now got a buffer with the alpha channel dropped.
+	// Destroy the old pixel buffer and replace it with the new one, and update the channel count.
+	bytes.buffer_destroy(&img.pixels)
+	img.pixels   = buf
+	img.channels = 3
+	return true
+}
+
+// Apply palette to 8-bit single-channel image and return an 8-bit RGBA image, in-place.
+// If the image given is not a valid 8-bit single channel image, the procedure will return `false` early.
+apply_palette_rgba :: proc(img: ^Image, palette: [256]RGBA_Pixel, allocator := context.allocator) -> (ok: bool) {
+	context.allocator = allocator
+
+	if img == nil || img.channels != 1 || img.depth != 8 {
+		return false
+	}
+
+	bytes_expected := compute_buffer_size(img.width, img.height, 1, 8)
+	if bytes_expected == 0 || bytes_expected != len(img.pixels.buf) || img.width * img.height > MAX_DIMENSIONS {
+		return false
+	}
+
+	// Can we allocate the return buffer?
+	buf := bytes.Buffer{}
+	bytes_wanted := compute_buffer_size(img.width, img.height, 4, 8)
+	if !resize(&buf.buf, bytes_wanted) {
+		delete(buf.buf)
+		return false
+	}
+
+	out := mem.slice_data_cast([]RGBA_Pixel, buf.buf[:])
+
+	// Apply the palette
+	for p, i in img.pixels.buf {
+		out[i] = palette[p]
+	}
+
+	// If we got here, that means we've now got a buffer with the alpha channel dropped.
+	// Destroy the old pixel buffer and replace it with the new one, and update the channel count.
+	bytes.buffer_destroy(&img.pixels)
+	img.pixels   = buf
+	img.channels = 4
+	return true
+}
+apply_palette :: proc{apply_palette_rgb, apply_palette_rgba}
+
+
+// Replicates grayscale values into RGB(A) 8- or 16-bit images as appropriate.
+// Returns early with `false` if already an RGB(A) image.
+expand_grayscale :: proc(img: ^Image, allocator := context.allocator) -> (ok: bool) {
+	context.allocator = allocator
+
+	if !is_valid_grayscale_image(img) {
+		return false
+	}
+
+	// We should have 1 or 2 channels of 8- or 16 bits now. We need to turn that into 3 or 4.
+	// Can we allocate the return buffer?
+	buf := bytes.Buffer{}
+	bytes_wanted := compute_buffer_size(img.width, img.height, img.channels + 2, img.depth)
+	if !resize(&buf.buf, bytes_wanted) {
+		delete(buf.buf)
+		return false
+	}
+
+	switch img.depth {
+		case 8:
+			switch img.channels {
+			case 1: // Turn Gray into RGB
+				out := mem.slice_data_cast([]RGB_Pixel, buf.buf[:])
+
+				for p in img.pixels.buf {
+					out[0] = p // Broadcast gray value into RGB components.
+					out    = out[1:]
+				}
+
+			case 2: // Turn Gray + Alpha into RGBA
+				inp := mem.slice_data_cast([]GA_Pixel,   img.pixels.buf[:])
+				out := mem.slice_data_cast([]RGBA_Pixel, buf.buf[:])
+
+				for p in inp {
+					out[0].rgb = p.r // Gray component.
+					out[0].a   = p.g // Alpha component.
+				}
+
+			case:
+				unreachable()
+			}
+
+		case 16:
+			switch img.channels {
+			case 1: // Turn Gray into RGB
+				inp := mem.slice_data_cast([]u16, img.pixels.buf[:])
+				out := mem.slice_data_cast([]RGB_Pixel_16, buf.buf[:])
+
+				for p in inp {
+					out[0] = p // Broadcast gray value into RGB components.
+					out    = out[1:]
+				}
+
+			case 2: // Turn Gray + Alpha into RGBA
+				inp := mem.slice_data_cast([]GA_Pixel_16,   img.pixels.buf[:])
+				out := mem.slice_data_cast([]RGBA_Pixel_16, buf.buf[:])
+
+				for p in inp {
+					out[0].rgb = p.r // Gray component.
+					out[0].a   = p.g // Alpha component.
+				}
+
+			case:
+				unreachable()
+			}
+
+		case:
+			unreachable()
+	}
+
+
+	// If we got here, that means we've now got a buffer with the extra alpha channel.
+	// Destroy the old pixel buffer and replace it with the new one, and update the channel count.
+	bytes.buffer_destroy(&img.pixels)
+	img.pixels   = buf
+	img.channels += 2
+	return true
+}
+
+/*
+	Helper functions to read and write data from/to a Context, etc.
+*/
+@(optimization_mode="speed")
+read_data :: proc(z: $C, $T: typeid) -> (res: T, err: compress.General_Error) {
+	if r, e := compress.read_data(z, T); e != .None {
+		return {}, .Stream_Too_Short
+	} else {
+		return r, nil
+	}
+}
+
+@(optimization_mode="speed")
+read_u8 :: proc(z: $C) -> (res: u8, err: compress.General_Error) {
+	if r, e := compress.read_u8(z); e != .None {
+		return {}, .Stream_Too_Short
+	} else {
+		return r, nil
+	}
+}
+
+write_bytes :: proc(buf: ^bytes.Buffer, data: []u8) -> (err: compress.General_Error) {
+	if len(data) == 0 {
+		return nil
+	} else if len(data) == 1 {
+		if bytes.buffer_write_byte(buf, data[0]) != nil {
+			return compress.General_Error.Resize_Failed
+		}
+	} else if n, _ := bytes.buffer_write(buf, data); n != len(data) {
+		return compress.General_Error.Resize_Failed
+	}
+	return nil
+}

+ 2 - 2
core/image/png/example.odin

@@ -189,7 +189,7 @@ write_image_as_ppm :: proc(filename: string, image: ^image.Image) -> (success: b
 	img := image
 	img := image
 
 
 	// PBM 16-bit images are big endian
 	// PBM 16-bit images are big endian
-	when ODIN_ENDIAN == "little" {
+	when ODIN_ENDIAN == .Little {
 		if img.depth == 16 {
 		if img.depth == 16 {
 			// The pixel components are in Big Endian. Let's byteswap back.
 			// The pixel components are in Big Endian. Let's byteswap back.
 			input  := mem.slice_data_cast([]u16,   img.pixels.buf[:])
 			input  := mem.slice_data_cast([]u16,   img.pixels.buf[:])
@@ -207,7 +207,7 @@ write_image_as_ppm :: proc(filename: string, image: ^image.Image) -> (success: b
 	}
 	}
 
 
 	mode: int = 0
 	mode: int = 0
-	when ODIN_OS == "linux" || ODIN_OS == "darwin" {
+	when ODIN_OS == .Linux || ODIN_OS == .Darwin {
 		// NOTE(justasd): 644 (owner read, write; group read; others read)
 		// NOTE(justasd): 644 (owner read, write; group read; others read)
 		mode = S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH
 		mode = S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH
 	}
 	}

+ 10 - 11
core/image/png/helpers.odin

@@ -242,17 +242,16 @@ srgb :: proc(c: image.PNG_Chunk) -> (res: sRGB, ok: bool) {
 }
 }
 
 
 plte :: proc(c: image.PNG_Chunk) -> (res: PLTE, ok: bool) {
 plte :: proc(c: image.PNG_Chunk) -> (res: PLTE, ok: bool) {
-	if c.header.type != .PLTE {
+	if c.header.type != .PLTE || c.header.length % 3 != 0 || c.header.length > 768 {
 		return {}, false
 		return {}, false
 	}
 	}
 
 
-	i := 0; j := 0; ok = true
-	for j < int(c.header.length) {
-		res.entries[i] = {c.data[j], c.data[j+1], c.data[j+2]}
-		i += 1; j += 3
+	plte := mem.slice_data_cast([]image.RGB_Pixel, c.data[:])
+	for color, i in plte {
+		res.entries[i] = color
 	}
 	}
-	res.used = u16(i)
-	return
+	res.used = u16(len(plte))
+	return res, true
 }
 }
 
 
 splt :: proc(c: image.PNG_Chunk) -> (res: sPLT, ok: bool) {
 splt :: proc(c: image.PNG_Chunk) -> (res: sPLT, ok: bool) {
@@ -439,18 +438,18 @@ when false {
 		flags: int = O_WRONLY|O_CREATE|O_TRUNC
 		flags: int = O_WRONLY|O_CREATE|O_TRUNC
 
 
 		if len(image.pixels) == 0 || len(image.pixels) < image.width * image.height * int(image.channels) {
 		if len(image.pixels) == 0 || len(image.pixels) < image.width * image.height * int(image.channels) {
-			return E_PNG.Invalid_Image_Dimensions
+			return .Invalid_Image_Dimensions
 		}
 		}
 
 
 		mode: int = 0
 		mode: int = 0
-		when ODIN_OS == "linux" || ODIN_OS == "darwin" {
+		when ODIN_OS == .Linux || ODIN_OS == .Darwin {
 			// NOTE(justasd): 644 (owner read, write; group read; others read)
 			// NOTE(justasd): 644 (owner read, write; group read; others read)
 			mode = S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH
 			mode = S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH
 		}
 		}
 
 
 		fd, fderr := open(filename, flags, mode)
 		fd, fderr := open(filename, flags, mode)
 		if fderr != 0 {
 		if fderr != 0 {
-			return E_General.Cannot_Open_File
+			return .Cannot_Open_File
 		}
 		}
 		defer close(fd)
 		defer close(fd)
 
 
@@ -473,7 +472,7 @@ when false {
 		case 3: ihdr.color_type = Color_Type{.Color}
 		case 3: ihdr.color_type = Color_Type{.Color}
 		case 4: ihdr.color_type = Color_Type{.Color, .Alpha}
 		case 4: ihdr.color_type = Color_Type{.Color, .Alpha}
 		case:// Unhandled
 		case:// Unhandled
-			return E_PNG.Unknown_Color_Type
+			return .Unknown_Color_Type
 		}
 		}
 		h := make_chunk(ihdr, .IHDR)
 		h := make_chunk(ihdr, .IHDR)
 		write_chunk(fd, h)
 		write_chunk(fd, h)

+ 45 - 31
core/image/png/png.odin

@@ -6,6 +6,11 @@
 		Jeroen van Rijn: Initial implementation.
 		Jeroen van Rijn: Initial implementation.
 		Ginger Bill:     Cosmetic changes.
 		Ginger Bill:     Cosmetic changes.
 */
 */
+
+
+// package png implements a PNG image reader
+//
+// The PNG specification is at https://www.w3.org/TR/PNG/.
 package png
 package png
 
 
 import "core:compress"
 import "core:compress"
@@ -20,16 +25,10 @@ import "core:io"
 import "core:mem"
 import "core:mem"
 import "core:intrinsics"
 import "core:intrinsics"
 
 
-/*
-	67_108_864 pixels max by default.
-	Maximum allowed dimensions are capped at 65535 * 65535.
-*/
-MAX_DIMENSIONS    :: min(#config(PNG_MAX_DIMENSIONS, 8192 * 8192), 65535 * 65535)
+// Limit chunk sizes.
+// By default: IDAT = 8k x 8k x 16-bits + 8k filter bytes.
+// The total number of pixels defaults to 64 Megapixel and can be tuned in image/common.odin.
 
 
-/*
-	Limit chunk sizes.
-		By default: IDAT = 8k x 8k x 16-bits + 8k filter bytes.
-*/
 _MAX_IDAT_DEFAULT :: ( 8192 /* Width */ *  8192 /* Height */ * 2 /* 16-bit */) +  8192 /* Filter bytes */
 _MAX_IDAT_DEFAULT :: ( 8192 /* Width */ *  8192 /* Height */ * 2 /* 16-bit */) +  8192 /* Filter bytes */
 _MAX_IDAT         :: (65535 /* Width */ * 65535 /* Height */ * 2 /* 16-bit */) + 65535 /* Filter bytes */
 _MAX_IDAT         :: (65535 /* Width */ * 65535 /* Height */ * 2 /* 16-bit */) + 65535 /* Filter bytes */
 
 
@@ -59,7 +58,7 @@ Row_Filter :: enum u8 {
 	Paeth   = 4,
 	Paeth   = 4,
 }
 }
 
 
-PLTE_Entry    :: [3]u8
+PLTE_Entry :: image.RGB_Pixel
 
 
 PLTE :: struct #packed {
 PLTE :: struct #packed {
 	entries: [256]PLTE_Entry,
 	entries: [256]PLTE_Entry,
@@ -254,7 +253,7 @@ read_header :: proc(ctx: ^$C) -> (image.PNG_IHDR, Error) {
 	header := (^image.PNG_IHDR)(raw_data(c.data))^
 	header := (^image.PNG_IHDR)(raw_data(c.data))^
 	// Validate IHDR
 	// Validate IHDR
 	using header
 	using header
-	if width == 0 || height == 0 || u128(width) * u128(height) > MAX_DIMENSIONS {
+	if width == 0 || height == 0 || u128(width) * u128(height) > image.MAX_DIMENSIONS {
 		return {}, .Invalid_Image_Dimensions
 		return {}, .Invalid_Image_Dimensions
 	}
 	}
 
 
@@ -361,6 +360,10 @@ load_from_context :: proc(ctx: ^$C, options := Options{}, allocator := context.a
 		options -= {.info}
 		options -= {.info}
 	}
 	}
 
 
+	if .return_header in options && .return_metadata in options {
+		options -= {.return_header}
+	}
+
 	if .alpha_drop_if_present in options && .alpha_add_if_missing in options {
 	if .alpha_drop_if_present in options && .alpha_add_if_missing in options {
 		return {}, compress.General_Error.Incompatible_Options
 		return {}, compress.General_Error.Incompatible_Options
 	}
 	}
@@ -387,7 +390,7 @@ load_from_context :: proc(ctx: ^$C, options := Options{}, allocator := context.a
 
 
 	idat_length := u64(0)
 	idat_length := u64(0)
 
 
-	c:		image.PNG_Chunk
+	c:	image.PNG_Chunk
 	ch:     image.PNG_Chunk_Header
 	ch:     image.PNG_Chunk_Header
 	e:      io.Error
 	e:      io.Error
 
 
@@ -468,6 +471,10 @@ load_from_context :: proc(ctx: ^$C, options := Options{}, allocator := context.a
 			}
 			}
 			info.header = h
 			info.header = h
 
 
+			if .return_header in options && .return_metadata not_in options && .do_not_decompress_image not_in options {
+				return img, nil
+			}
+
 		case .PLTE:
 		case .PLTE:
 			seen_plte = true
 			seen_plte = true
 			// PLTE must appear before IDAT and can't appear for color types 0, 4.
 			// PLTE must appear before IDAT and can't appear for color types 0, 4.
@@ -535,9 +542,6 @@ load_from_context :: proc(ctx: ^$C, options := Options{}, allocator := context.a
 			seen_iend = true
 			seen_iend = true
 
 
 		case .bKGD:
 		case .bKGD:
-
-			// TODO: Make sure that 16-bit bKGD + tRNS chunks return u16 instead of u16be
-
 			c = read_chunk(ctx) or_return
 			c = read_chunk(ctx) or_return
 			seen_bkgd = true
 			seen_bkgd = true
 			if .return_metadata in options {
 			if .return_metadata in options {
@@ -589,23 +593,36 @@ load_from_context :: proc(ctx: ^$C, options := Options{}, allocator := context.a
 			*/
 			*/
 
 
 			final_image_channels += 1
 			final_image_channels += 1
-
 			seen_trns = true
 			seen_trns = true
+
+			if .Paletted in header.color_type {
+				if len(c.data) > 256 {
+					return img, .TNRS_Invalid_Length
+				}
+			} else if .Color in header.color_type {
+				if len(c.data) != 6 {
+					return img, .TNRS_Invalid_Length
+				}
+			} else if len(c.data) != 2 {
+				return img, .TNRS_Invalid_Length
+			}
+
 			if info.header.bit_depth < 8 && .Paletted not_in info.header.color_type {
 			if info.header.bit_depth < 8 && .Paletted not_in info.header.color_type {
 				// Rescale tRNS data so key matches intensity
 				// Rescale tRNS data so key matches intensity
-				dsc := depth_scale_table
+				dsc   := depth_scale_table
 				scale := dsc[info.header.bit_depth]
 				scale := dsc[info.header.bit_depth]
 				if scale != 1 {
 				if scale != 1 {
 					key := mem.slice_data_cast([]u16be, c.data)[0] * u16be(scale)
 					key := mem.slice_data_cast([]u16be, c.data)[0] * u16be(scale)
 					c.data = []u8{0, u8(key & 255)}
 					c.data = []u8{0, u8(key & 255)}
 				}
 				}
 			}
 			}
+
 			trns = c
 			trns = c
 
 
-		case .iDOT, .CbGI:
+		case .iDOT, .CgBI:
 			/*
 			/*
 				iPhone PNG bastardization that doesn't adhere to spec with broken IDAT chunk.
 				iPhone PNG bastardization that doesn't adhere to spec with broken IDAT chunk.
-				We're not going to add support for it. If you have the misfortunte of coming
+				We're not going to add support for it. If you have the misfortune of coming
 				across one of these files, use a utility to defry it.
 				across one of these files, use a utility to defry it.
 			*/
 			*/
 			return img, .Image_Does_Not_Adhere_to_Spec
 			return img, .Image_Does_Not_Adhere_to_Spec
@@ -630,6 +647,10 @@ load_from_context :: proc(ctx: ^$C, options := Options{}, allocator := context.a
 		return img, .IDAT_Missing
 		return img, .IDAT_Missing
 	}
 	}
 
 
+	if .Paletted in header.color_type && !seen_plte {
+		return img, .PLTE_Missing
+	}
+
 	/*
 	/*
 		Calculate the expected output size, to help `inflate` make better decisions about the output buffer.
 		Calculate the expected output size, to help `inflate` make better decisions about the output buffer.
 		We'll also use it to check the returned buffer size is what we expected it to be.
 		We'll also use it to check the returned buffer size is what we expected it to be.
@@ -678,15 +699,6 @@ load_from_context :: proc(ctx: ^$C, options := Options{}, allocator := context.a
 		return {}, defilter_error
 		return {}, defilter_error
 	}
 	}
 
 
-	/*
-		Now we'll handle the relocoring of paletted images, handling of tRNS chunks,
-		and we'll expand grayscale images to RGB(A).
-
-		For the sake of convenience we return only RGB(A) images. In the future we
-		may supply an option to return Gray/Gray+Alpha as-is, in which case RGB(A)
-		will become the default.
-	*/
-
 	if .Paletted in header.color_type && .do_not_expand_indexed in options {
 	if .Paletted in header.color_type && .do_not_expand_indexed in options {
 		return img, nil
 		return img, nil
 	}
 	}
@@ -694,7 +706,10 @@ load_from_context :: proc(ctx: ^$C, options := Options{}, allocator := context.a
 		return img, nil
 		return img, nil
 	}
 	}
 
 
-
+	/*
+		Now we're going to optionally apply various post-processing stages,
+		to for example expand grayscale, apply a palette, premultiply alpha, etc.
+	*/
 	raw_image_channels := img.channels
 	raw_image_channels := img.channels
 	out_image_channels := 3
 	out_image_channels := 3
 
 
@@ -1199,7 +1214,6 @@ load_from_context :: proc(ctx: ^$C, options := Options{}, allocator := context.a
 	return img, nil
 	return img, nil
 }
 }
 
 
-
 filter_paeth :: #force_inline proc(left, up, up_left: u8) -> u8 {
 filter_paeth :: #force_inline proc(left, up, up_left: u8) -> u8 {
 	aa, bb, cc := i16(left), i16(up), i16(up_left)
 	aa, bb, cc := i16(left), i16(up), i16(up_left)
 	p  := aa + bb - cc
 	p  := aa + bb - cc
@@ -1611,7 +1625,7 @@ defilter :: proc(img: ^Image, filter_bytes: ^bytes.Buffer, header: ^image.PNG_IH
 			}
 			}
 		}
 		}
 	}
 	}
-	when ODIN_ENDIAN == "little" {
+	when ODIN_ENDIAN == .Little {
 		if img.depth == 16 {
 		if img.depth == 16 {
 			// The pixel components are in Big Endian. Let's byteswap.
 			// The pixel components are in Big Endian. Let's byteswap.
 			input  := mem.slice_data_cast([]u16be, img.pixels.buf[:])
 			input  := mem.slice_data_cast([]u16be, img.pixels.buf[:])

+ 408 - 0
core/image/qoi/qoi.odin

@@ -0,0 +1,408 @@
+/*
+	Copyright 2022 Jeroen van Rijn <[email protected]>.
+	Made available under Odin's BSD-3 license.
+
+	List of contributors:
+		Jeroen van Rijn: Initial implementation.
+*/
+
+
+// package qoi implements a QOI image reader
+//
+// The QOI specification is at https://qoiformat.org.
+package qoi
+
+import "core:mem"
+import "core:image"
+import "core:compress"
+import "core:bytes"
+import "core:os"
+
+Error   :: image.Error
+General :: compress.General_Error
+Image   :: image.Image
+Options :: image.Options
+
+RGB_Pixel  :: image.RGB_Pixel
+RGBA_Pixel :: image.RGBA_Pixel
+
+save_to_memory  :: proc(output: ^bytes.Buffer, img: ^Image, options := Options{}, allocator := context.allocator) -> (err: Error) {
+	context.allocator = allocator
+
+	if img == nil {
+		return .Invalid_Input_Image
+	}
+
+	if output == nil {
+		return .Invalid_Output
+	}
+
+	pixels := img.width * img.height
+	if pixels == 0 || pixels > image.MAX_DIMENSIONS {
+		return .Invalid_Input_Image
+	}
+
+	// QOI supports only 8-bit images with 3 or 4 channels.
+	if img.depth != 8 || img.channels < 3 || img.channels > 4 {
+		return .Invalid_Input_Image
+	}
+
+	if img.channels * pixels != len(img.pixels.buf) {
+		return .Invalid_Input_Image
+	}
+
+	written := 0
+
+	// Calculate and allocate maximum size. We'll reclaim space to actually written output at the end.
+	max_size := pixels * (img.channels + 1) + size_of(image.QOI_Header) + size_of(u64be)
+
+	if !resize(&output.buf, max_size) {
+		return General.Resize_Failed
+	}
+
+	header := image.QOI_Header{
+		magic       = image.QOI_Magic,
+		width       = u32be(img.width),
+		height      = u32be(img.height),
+		channels    = u8(img.channels),
+		color_space = .Linear if .qoi_all_channels_linear in options else .sRGB,
+	}
+	header_bytes := transmute([size_of(image.QOI_Header)]u8)header
+
+	copy(output.buf[written:], header_bytes[:])
+	written += size_of(image.QOI_Header)
+
+	/*
+		Encode loop starts here.
+	*/
+	seen: [64]RGBA_Pixel
+	pix  := RGBA_Pixel{0, 0, 0, 255}
+	prev := pix
+
+	seen[qoi_hash(pix)] = pix
+
+	input := img.pixels.buf[:]
+	run   := u8(0)
+
+	for len(input) > 0 {
+		if img.channels == 4 {
+			pix     = (^RGBA_Pixel)(raw_data(input))^
+		} else {
+			pix.rgb = (^RGB_Pixel)(raw_data(input))^
+		}
+		input = input[img.channels:]
+
+		if pix == prev {
+			run += 1
+			// As long as the pixel matches the last one, accumulate the run total.
+			// If we reach the max run length or the end of the image, write the run.
+			if run == 62 || len(input) == 0 {
+				// Encode and write run
+				output.buf[written] = u8(QOI_Opcode_Tag.RUN) | (run - 1)
+				written += 1
+				run = 0
+			}
+		} else {
+			if run > 0 {
+				// The pixel differs from the previous one, but we still need to write the pending run.
+				// Encode and write run
+				output.buf[written] = u8(QOI_Opcode_Tag.RUN) | (run - 1)
+				written += 1
+				run = 0
+			}
+
+			index := qoi_hash(pix)
+
+			if seen[index] == pix {
+				// Write indexed pixel
+				output.buf[written] = u8(QOI_Opcode_Tag.INDEX) | index
+				written += 1
+			} else {
+				// Add pixel to index
+				seen[index] = pix
+
+				// If the alpha matches the previous pixel's alpha, we don't need to write a full RGBA literal.
+				if pix.a == prev.a {
+					// Delta
+					d  := pix.rgb - prev.rgb
+
+					// DIFF, biased and modulo 256
+					_d := d + 2
+
+					// LUMA, biased and modulo 256
+					_l := RGB_Pixel{ d.r - d.g + 8, d.g + 32, d.b - d.g + 8 }
+
+					if _d.r < 4 && _d.g < 4 && _d.b < 4 {
+						// Delta is between -2 and 1 inclusive
+						output.buf[written] = u8(QOI_Opcode_Tag.DIFF) | _d.r << 4 | _d.g << 2 | _d.b
+						written += 1
+					} else if _l.r < 16 && _l.g < 64 && _l.b < 16 {
+						// Biased luma is between {-8..7, -32..31, -8..7}
+						output.buf[written    ] = u8(QOI_Opcode_Tag.LUMA) | _l.g
+						output.buf[written + 1] = _l.r << 4 | _l.b
+						written += 2
+					} else {
+						// Write RGB literal
+						output.buf[written] = u8(QOI_Opcode_Tag.RGB)
+						pix_bytes := transmute([4]u8)pix
+						copy(output.buf[written + 1:], pix_bytes[:3])
+						written += 4
+					}
+				} else {
+					// Write RGBA literal
+					output.buf[written] = u8(QOI_Opcode_Tag.RGBA)
+					pix_bytes := transmute([4]u8)pix
+					copy(output.buf[written + 1:], pix_bytes[:])
+					written += 5
+				}
+			}
+		}
+		prev = pix
+	}
+
+	trailer := []u8{0, 0, 0, 0, 0, 0, 0, 1}
+	copy(output.buf[written:], trailer[:])
+	written += len(trailer)
+
+	resize(&output.buf, written)
+	return nil
+}
+
+save_to_file :: proc(output: string, img: ^Image, options := Options{}, allocator := context.allocator) -> (err: Error) {
+	context.allocator = allocator
+
+	out := &bytes.Buffer{}
+	defer bytes.buffer_destroy(out)
+
+	save_to_memory(out, img, options) or_return
+	write_ok := os.write_entire_file(output, out.buf[:])
+
+	return nil if write_ok else General.Cannot_Open_File
+}
+
+save :: proc{save_to_memory, save_to_file}
+
+load_from_slice :: proc(slice: []u8, options := Options{}, allocator := context.allocator) -> (img: ^Image, err: Error) {
+	ctx := &compress.Context_Memory_Input{
+		input_data = slice,
+	}
+
+	img, err = load_from_context(ctx, options, allocator)
+	return img, err
+}
+
+load_from_file :: proc(filename: string, options := Options{}, allocator := context.allocator) -> (img: ^Image, err: Error) {
+	context.allocator = allocator
+
+	data, ok := os.read_entire_file(filename)
+	defer delete(data)
+
+	if ok {
+		return load_from_slice(data, options)
+	} else {
+		img = new(Image)
+		return img, compress.General_Error.File_Not_Found
+	}
+}
+
+@(optimization_mode="speed")
+load_from_context :: proc(ctx: ^$C, options := Options{}, allocator := context.allocator) -> (img: ^Image, err: Error) {
+	context.allocator = allocator
+	options := options
+
+	if .info in options {
+		options |= {.return_metadata, .do_not_decompress_image}
+		options -= {.info}
+	}
+
+	if .return_header in options && .return_metadata in options {
+		options -= {.return_header}
+	}
+
+	header := image.read_data(ctx, image.QOI_Header) or_return
+	if header.magic != image.QOI_Magic {
+		return img, .Invalid_QOI_Signature
+	}
+
+	if img == nil {
+		img = new(Image)
+	}
+
+	if .return_metadata in options {
+		info := new(image.QOI_Info)
+		info.header  = header
+		img.metadata = info		
+	}
+
+	if header.channels != 3 && header.channels != 4 {
+		return img, .Invalid_Number_Of_Channels
+	}
+
+	if header.color_space != .sRGB && header.color_space != .Linear {
+		return img, .Invalid_Color_Space
+	}
+
+	if header.width == 0 || header.height == 0 {
+		return img, .Invalid_Image_Dimensions
+	}
+
+	total_pixels := header.width * header.height
+	if total_pixels > image.MAX_DIMENSIONS {
+		return img, .Image_Dimensions_Too_Large
+	}
+
+	img.width    = int(header.width)
+	img.height   = int(header.height)
+	img.channels = 4 if .alpha_add_if_missing in options else int(header.channels)
+	img.depth    = 8
+
+	if .do_not_decompress_image in options {
+		img.channels = int(header.channels)
+		return
+	}
+
+	bytes_needed := image.compute_buffer_size(int(header.width), int(header.height), img.channels, 8)
+
+	if !resize(&img.pixels.buf, bytes_needed) {
+	 	return img, mem.Allocator_Error.Out_Of_Memory
+	}
+
+	/*
+		Decode loop starts here.
+	*/
+	seen: [64]RGBA_Pixel
+	pix := RGBA_Pixel{0, 0, 0, 255}
+	seen[qoi_hash(pix)] = pix
+	pixels := img.pixels.buf[:]
+
+	decode: for len(pixels) > 0 {
+		data := image.read_u8(ctx) or_return
+
+		tag := QOI_Opcode_Tag(data)
+		#partial switch tag {
+		case .RGB:
+			pix.rgb = image.read_data(ctx, RGB_Pixel) or_return
+
+			#no_bounds_check {
+				seen[qoi_hash(pix)] = pix	
+			}
+
+		case .RGBA:
+			pix = image.read_data(ctx, RGBA_Pixel) or_return
+
+			#no_bounds_check {
+				seen[qoi_hash(pix)] = pix	
+			}
+
+		case:
+			// 2-bit tag
+			tag = QOI_Opcode_Tag(data & QOI_Opcode_Mask)
+			#partial switch tag {
+				case .INDEX:
+					pix = seen[data & 63]
+
+				case .DIFF:
+					diff_r := ((data >> 4) & 3) - 2
+					diff_g := ((data >> 2) & 3) - 2
+					diff_b := ((data >> 0) & 3) - 2
+
+					pix += {diff_r, diff_g, diff_b, 0}
+
+					#no_bounds_check {
+						seen[qoi_hash(pix)] = pix	
+					}
+
+				case .LUMA:
+					data2 := image.read_u8(ctx) or_return
+
+					diff_g := (data & 63) - 32
+					diff_r := diff_g - 8 + ((data2 >> 4) & 15)
+					diff_b := diff_g - 8 + (data2 & 15)
+
+					pix += {diff_r, diff_g, diff_b, 0}
+
+					#no_bounds_check {
+						seen[qoi_hash(pix)] = pix	
+					}
+
+				case .RUN:
+					if length := int(data & 63) + 1; (length * img.channels) > len(pixels) {
+						return img, .Corrupt
+					} else {
+						#no_bounds_check for in 0..<length {
+							copy(pixels, pix[:img.channels])
+							pixels = pixels[img.channels:]
+						}
+					}
+
+					continue decode
+
+				case:
+					unreachable()
+			}
+		}
+
+		#no_bounds_check {
+			copy(pixels, pix[:img.channels])
+			pixels = pixels[img.channels:]
+		}
+	}
+
+	// The byte stream's end is marked with 7 0x00 bytes followed by a single 0x01 byte.
+	trailer, trailer_err := compress.read_data(ctx, u64be)
+	if trailer_err != nil || trailer != 0x1 {
+		return img, .Missing_Or_Corrupt_Trailer
+	}
+
+	if .alpha_premultiply in options && !image.alpha_drop_if_present(img, options) {
+		return img, .Post_Processing_Error
+	}
+
+	return
+}
+
+load :: proc{load_from_file, load_from_slice, load_from_context}
+
+/*
+	Cleanup of image-specific data.
+*/
+destroy :: proc(img: ^Image) {
+	if img == nil {
+		/*
+			Nothing to do.
+			Load must've returned with an error.
+		*/
+		return
+	}
+
+	bytes.buffer_destroy(&img.pixels)
+
+	if v, ok := img.metadata.(^image.QOI_Info); ok {
+	 	free(v)
+	}
+	free(img)
+}
+
+QOI_Opcode_Tag :: enum u8 {
+	// 2-bit tags
+	INDEX = 0b0000_0000, // 6-bit index into color array follows
+	DIFF  = 0b0100_0000, // 3x (RGB) 2-bit difference follows (-2..1), bias of 2.
+	LUMA  = 0b1000_0000, // Luma difference
+	RUN   = 0b1100_0000, // Run length encoding, bias -1
+
+	// 8-bit tags
+	RGB   = 0b1111_1110, // Raw RGB  pixel follows
+	RGBA  = 0b1111_1111, // Raw RGBA pixel follows
+}
+
+QOI_Opcode_Mask :: 0b1100_0000
+QOI_Data_Mask   :: 0b0011_1111
+
+qoi_hash :: #force_inline proc(pixel: RGBA_Pixel) -> (index: u8) {
+	i1 := u16(pixel.r) * 3
+	i2 := u16(pixel.g) * 5
+	i3 := u16(pixel.b) * 7
+	i4 := u16(pixel.a) * 11
+
+	return u8((i1 + i2 + i3 + i4) & 63)
+}

+ 103 - 0
core/image/tga/tga.odin

@@ -0,0 +1,103 @@
+/*
+	Copyright 2022 Jeroen van Rijn <[email protected]>.
+	Made available under Odin's BSD-3 license.
+
+	List of contributors:
+		Jeroen van Rijn: Initial implementation.
+*/
+
+
+// package tga implements a TGA image writer for 8-bit RGB and RGBA images.
+package tga
+
+import "core:mem"
+import "core:image"
+import "core:compress"
+import "core:bytes"
+import "core:os"
+
+Error   :: image.Error
+General :: compress.General_Error
+Image   :: image.Image
+Options :: image.Options
+
+RGB_Pixel  :: image.RGB_Pixel
+RGBA_Pixel :: image.RGBA_Pixel
+
+save_to_memory  :: proc(output: ^bytes.Buffer, img: ^Image, options := Options{}, allocator := context.allocator) -> (err: Error) {
+	context.allocator = allocator
+
+	if img == nil {
+		return .Invalid_Input_Image
+	}
+
+	if output == nil {
+		return .Invalid_Output
+	}
+
+	pixels := img.width * img.height
+	if pixels == 0 || pixels > image.MAX_DIMENSIONS || img.width > 65535 || img.height > 65535 {
+		return .Invalid_Input_Image
+	}
+
+	// Our TGA writer supports only 8-bit images with 3 or 4 channels.
+	if img.depth != 8 || img.channels < 3 || img.channels > 4 {
+		return .Invalid_Input_Image
+	}
+
+	if img.channels * pixels != len(img.pixels.buf) {
+		return .Invalid_Input_Image
+	}
+
+	written := 0
+
+	// Calculate and allocate necessary space.
+	necessary := pixels * img.channels + size_of(image.TGA_Header)
+
+	if !resize(&output.buf, necessary) {
+		return General.Resize_Failed
+	}
+
+	header := image.TGA_Header{
+		data_type_code   = 0x02, // Color, uncompressed.
+		dimensions       = {u16le(img.width), u16le(img.height)},
+		bits_per_pixel   = u8(img.depth * img.channels),
+		image_descriptor = 1 << 5, // Origin is top left.
+	}
+	header_bytes := transmute([size_of(image.TGA_Header)]u8)header
+
+	copy(output.buf[written:], header_bytes[:])
+	written += size_of(image.TGA_Header)
+
+	/*
+		Encode loop starts here.
+	*/
+	if img.channels == 3 {
+		pix := mem.slice_data_cast([]RGB_Pixel, img.pixels.buf[:])
+		out := mem.slice_data_cast([]RGB_Pixel, output.buf[written:])
+		for p, i in pix {
+			out[i] = p.bgr
+		}
+	} else if img.channels == 4 {
+		pix := mem.slice_data_cast([]RGBA_Pixel, img.pixels.buf[:])
+		out := mem.slice_data_cast([]RGBA_Pixel, output.buf[written:])
+		for p, i in pix {
+			out[i] = p.bgra
+		}
+	}
+	return nil
+}
+
+save_to_file :: proc(output: string, img: ^Image, options := Options{}, allocator := context.allocator) -> (err: Error) {
+	context.allocator = allocator
+
+	out := &bytes.Buffer{}
+	defer bytes.buffer_destroy(out)
+
+	save_to_memory(out, img, options) or_return
+	write_ok := os.write_entire_file(output, out.buf[:])
+
+	return nil if write_ok else General.Cannot_Open_File
+}
+
+save :: proc{save_to_memory, save_to_file}

+ 49 - 68
core/intrinsics/intrinsics.odin

@@ -41,6 +41,8 @@ mem_copy_non_overlapping :: proc(dst, src: rawptr, len: int) ---
 mem_zero                 :: proc(ptr: rawptr, len: int) ---
 mem_zero                 :: proc(ptr: rawptr, len: int) ---
 mem_zero_volatile        :: proc(ptr: rawptr, len: int) ---
 mem_zero_volatile        :: proc(ptr: rawptr, len: int) ---
 
 
+unaligned_load           :: proc(src: ^$T) -> T ---
+unaligned_store          :: proc(dst: ^$T, val: T) -> T ---
 
 
 fixed_point_mul     :: proc(lhs, rhs: $T, #const scale: uint) -> T where type_is_integer(T) ---
 fixed_point_mul     :: proc(lhs, rhs: $T, #const scale: uint) -> T where type_is_integer(T) ---
 fixed_point_div     :: proc(lhs, rhs: $T, #const scale: uint) -> T where type_is_integer(T) ---
 fixed_point_div     :: proc(lhs, rhs: $T, #const scale: uint) -> T where type_is_integer(T) ---
@@ -60,77 +62,46 @@ syscall :: proc(id: uintptr, args: ..uintptr) -> uintptr ---
 
 
 
 
 // Atomics
 // Atomics
-atomic_fence        :: proc() ---
-atomic_fence_acq    :: proc() ---
-atomic_fence_rel    :: proc() ---
-atomic_fence_acqrel :: proc() ---
+Atomic_Memory_Order :: enum {
+	Relaxed = 0, // Unordered
+	Consume = 1, // Monotonic
+	Acquire = 2,
+	Release = 3,
+	Acq_Rel = 4,
+	Seq_Cst = 5,
+}
 
 
-atomic_store           :: proc(dst: ^$T, val: T) ---
-atomic_store_rel       :: proc(dst: ^$T, val: T) ---
-atomic_store_relaxed   :: proc(dst: ^$T, val: T) ---
-atomic_store_unordered :: proc(dst: ^$T, val: T) ---
+atomic_type_is_lock_free :: proc($T: typeid) -> bool ---
+
+atomic_thread_fence :: proc(order: Atomic_Memory_Order) ---
+atomic_signal_fence :: proc(order: Atomic_Memory_Order) ---
+
+atomic_store          :: proc(dst: ^$T, val: T) ---
+atomic_store_explicit :: proc(dst: ^$T, val: T, order: Atomic_Memory_Order) ---
 
 
 atomic_load           :: proc(dst: ^$T) -> T ---
 atomic_load           :: proc(dst: ^$T) -> T ---
-atomic_load_acq       :: proc(dst: ^$T) -> T ---
-atomic_load_relaxed   :: proc(dst: ^$T) -> T ---
-atomic_load_unordered :: proc(dst: ^$T) -> T ---
-
-atomic_add          :: proc(dst; ^$T, val: T) -> T ---
-atomic_add_acq      :: proc(dst; ^$T, val: T) -> T ---
-atomic_add_rel      :: proc(dst; ^$T, val: T) -> T ---
-atomic_add_acqrel   :: proc(dst; ^$T, val: T) -> T ---
-atomic_add_relaxed  :: proc(dst; ^$T, val: T) -> T ---
-atomic_sub          :: proc(dst; ^$T, val: T) -> T ---
-atomic_sub_acq      :: proc(dst; ^$T, val: T) -> T ---
-atomic_sub_rel      :: proc(dst; ^$T, val: T) -> T ---
-atomic_sub_acqrel   :: proc(dst; ^$T, val: T) -> T ---
-atomic_sub_relaxed  :: proc(dst; ^$T, val: T) -> T ---
-atomic_and          :: proc(dst; ^$T, val: T) -> T ---
-atomic_and_acq      :: proc(dst; ^$T, val: T) -> T ---
-atomic_and_rel      :: proc(dst; ^$T, val: T) -> T ---
-atomic_and_acqrel   :: proc(dst; ^$T, val: T) -> T ---
-atomic_and_relaxed  :: proc(dst; ^$T, val: T) -> T ---
-atomic_nand         :: proc(dst; ^$T, val: T) -> T ---
-atomic_nand_acq     :: proc(dst; ^$T, val: T) -> T ---
-atomic_nand_rel     :: proc(dst; ^$T, val: T) -> T ---
-atomic_nand_acqrel  :: proc(dst; ^$T, val: T) -> T ---
-atomic_nand_relaxed :: proc(dst; ^$T, val: T) -> T ---
-atomic_or           :: proc(dst; ^$T, val: T) -> T ---
-atomic_or_acq       :: proc(dst; ^$T, val: T) -> T ---
-atomic_or_rel       :: proc(dst; ^$T, val: T) -> T ---
-atomic_or_acqrel    :: proc(dst; ^$T, val: T) -> T ---
-atomic_or_relaxed   :: proc(dst; ^$T, val: T) -> T ---
-atomic_xor          :: proc(dst; ^$T, val: T) -> T ---
-atomic_xor_acq      :: proc(dst; ^$T, val: T) -> T ---
-atomic_xor_rel      :: proc(dst; ^$T, val: T) -> T ---
-atomic_xor_acqrel   :: proc(dst; ^$T, val: T) -> T ---
-atomic_xor_relaxed  :: proc(dst; ^$T, val: T) -> T ---
-
-atomic_xchg         :: proc(dst; ^$T, val: T) -> T ---
-atomic_xchg_acq     :: proc(dst; ^$T, val: T) -> T ---
-atomic_xchg_rel     :: proc(dst; ^$T, val: T) -> T ---
-atomic_xchg_acqrel  :: proc(dst; ^$T, val: T) -> T ---
-atomic_xchg_relaxed :: proc(dst; ^$T, val: T) -> T ---
-
-atomic_cxchg                    :: proc(dst: ^$T, old, new: T) -> (T, bool) #optional_ok ---
-atomic_cxchg_acq                :: proc(dst: ^$T, old, new: T) -> (T, bool) #optional_ok ---
-atomic_cxchg_rel                :: proc(dst: ^$T, old, new: T) -> (T, bool) #optional_ok ---
-atomic_cxchg_acqrel             :: proc(dst: ^$T, old, new: T) -> (T, bool) #optional_ok ---
-atomic_cxchg_relaxed            :: proc(dst: ^$T, old, new: T) -> (T, bool) #optional_ok ---
-atomic_cxchg_failrelaxed        :: proc(dst: ^$T, old, new: T) -> (T, bool) #optional_ok ---
-atomic_cxchg_failacq            :: proc(dst: ^$T, old, new: T) -> (T, bool) #optional_ok ---
-atomic_cxchg_acq_failrelaxed    :: proc(dst: ^$T, old, new: T) -> (T, bool) #optional_ok ---
-atomic_cxchg_acqrel_failrelaxed :: proc(dst: ^$T, old, new: T) -> (T, bool) #optional_ok ---
-
-atomic_cxchgweak                    :: proc(dst: ^$T, old, new: T) -> (T, bool) #optional_ok ---
-atomic_cxchgweak_acq                :: proc(dst: ^$T, old, new: T) -> (T, bool) #optional_ok ---
-atomic_cxchgweak_rel                :: proc(dst: ^$T, old, new: T) -> (T, bool) #optional_ok ---
-atomic_cxchgweak_acqrel             :: proc(dst: ^$T, old, new: T) -> (T, bool) #optional_ok ---
-atomic_cxchgweak_relaxed            :: proc(dst: ^$T, old, new: T) -> (T, bool) #optional_ok ---
-atomic_cxchgweak_failrelaxed        :: proc(dst: ^$T, old, new: T) -> (T, bool) #optional_ok ---
-atomic_cxchgweak_failacq            :: proc(dst: ^$T, old, new: T) -> (T, bool) #optional_ok ---
-atomic_cxchgweak_acq_failrelaxed    :: proc(dst: ^$T, old, new: T) -> (T, bool) #optional_ok ---
-atomic_cxchgweak_acqrel_failrelaxed :: proc(dst: ^$T, old, new: T) -> (T, bool) #optional_ok ---
+atomic_load_explicit  :: proc(dst: ^$T, order: Atomic_Memory_Order) -> T ---
+
+atomic_add               :: proc(dst; ^$T, val: T) -> T ---
+atomic_add_explicit      :: proc(dst; ^$T, val: T, order: Atomic_Memory_Order) -> T ---
+atomic_sub               :: proc(dst; ^$T, val: T) -> T ---
+atomic_sub_explicit      :: proc(dst; ^$T, val: T, order: Atomic_Memory_Order) -> T ---
+atomic_and               :: proc(dst; ^$T, val: T) -> T ---
+atomic_and_explicit      :: proc(dst; ^$T, val: T, order: Atomic_Memory_Order) -> T ---
+atomic_nand              :: proc(dst; ^$T, val: T) -> T ---
+atomic_nand_explicit     :: proc(dst; ^$T, val: T, order: Atomic_Memory_Order) -> T ---
+atomic_or                :: proc(dst; ^$T, val: T) -> T ---
+atomic_or_explicit       :: proc(dst; ^$T, val: T, order: Atomic_Memory_Order) -> T ---
+atomic_xor               :: proc(dst; ^$T, val: T) -> T ---
+atomic_xor_explicit      :: proc(dst; ^$T, val: T, order: Atomic_Memory_Order) -> T ---
+atomic_exchange          :: proc(dst; ^$T, val: T) -> T ---
+atomic_exchange_explicit :: proc(dst; ^$T, val: T, order: Atomic_Memory_Order) -> T ---
+
+atomic_compare_exchange_strong          :: proc(dst: ^$T, old, new: T) -> (T, bool) #optional_ok ---
+atomic_compare_exchange_strong_explicit :: proc(dst: ^$T, old, new: T, success, failure: Atomic_Memory_Order) -> (T, bool) #optional_ok ---
+atomic_compare_exchange_weak            :: proc(dst: ^$T, old, new: T) -> (T, bool) #optional_ok ---
+atomic_compare_exchange_weak_explicit   :: proc(dst: ^$T, old, new: T, success, failure: Atomic_Memory_Order) -> (T, bool) #optional_ok ---
+
 
 
 // Constant type tests
 // Constant type tests
 
 
@@ -182,6 +153,7 @@ type_is_specialization_of :: proc($T, $S: typeid) -> bool ---
 type_is_variant_of :: proc($U, $V: typeid) -> bool where type_is_union(U) ---
 type_is_variant_of :: proc($U, $V: typeid) -> bool where type_is_union(U) ---
 
 
 type_has_field :: proc($T: typeid, $name: string) -> bool ---
 type_has_field :: proc($T: typeid, $name: string) -> bool ---
+type_field_type :: proc($T: typeid, $name: string) -> typeid ---
 
 
 type_proc_parameter_count :: proc($T: typeid) -> int where type_is_proc(T) ---
 type_proc_parameter_count :: proc($T: typeid) -> int where type_is_proc(T) ---
 type_proc_return_count    :: proc($T: typeid) -> int where type_is_proc(T) ---
 type_proc_return_count    :: proc($T: typeid) -> int where type_is_proc(T) ---
@@ -197,3 +169,12 @@ type_field_index_of :: proc($T: typeid, $name: string) -> uintptr ---
 
 
 type_equal_proc  :: proc($T: typeid) -> (equal:  proc "contextless" (rawptr, rawptr) -> bool)                 where type_is_comparable(T) ---
 type_equal_proc  :: proc($T: typeid) -> (equal:  proc "contextless" (rawptr, rawptr) -> bool)                 where type_is_comparable(T) ---
 type_hasher_proc :: proc($T: typeid) -> (hasher: proc "contextless" (data: rawptr, seed: uintptr) -> uintptr) where type_is_comparable(T) ---
 type_hasher_proc :: proc($T: typeid) -> (hasher: proc "contextless" (data: rawptr, seed: uintptr) -> uintptr) where type_is_comparable(T) ---
+
+
+// WASM targets only
+wasm_memory_grow :: proc(index, delta: uintptr) -> int ---
+wasm_memory_size :: proc(index: uintptr)        -> int ---
+
+// Internal compiler use only
+
+__entry_point :: proc() ---

Some files were not shown because too many files changed in this diff