Browse Source

Merge pull request #4191 from laytan/improve-package-doc-comments

core: improve package doc comments for the documentation generator
gingerBill 11 months ago
parent
commit
a4fd0c133e
66 changed files with 965 additions and 949 deletions
  1. 90 0
      core/compress/gzip/doc.odin
  2. 0 89
      core/compress/gzip/example.odin
  3. 1 2
      core/compress/shoco/model.odin
  4. 2 2
      core/compress/shoco/shoco.odin
  5. 50 0
      core/compress/zlib/doc.odin
  6. 0 47
      core/compress/zlib/example.odin
  7. 4 4
      core/container/bit_array/doc.odin
  8. 9 6
      core/container/intrusive/list/doc.odin
  9. 43 44
      core/crypto/aead/doc.odin
  10. 3 3
      core/crypto/aes/aes.odin
  11. 2 2
      core/crypto/blake2b/blake2b.odin
  12. 2 2
      core/crypto/blake2s/blake2s.odin
  13. 2 2
      core/crypto/chacha20/chacha20.odin
  14. 2 2
      core/crypto/chacha20poly1305/chacha20poly1305.odin
  15. 3 3
      core/crypto/ed25519/ed25519.odin
  16. 28 30
      core/crypto/hash/doc.odin
  17. 1 1
      core/crypto/hkdf/hkdf.odin
  18. 1 1
      core/crypto/hmac/hmac.odin
  19. 1 1
      core/crypto/kmac/kmac.odin
  20. 2 2
      core/crypto/legacy/md5/md5.odin
  21. 3 3
      core/crypto/legacy/sha1/sha1.odin
  22. 1 1
      core/crypto/pbkdf2/pbkdf2.odin
  23. 1 1
      core/crypto/poly1305/poly1305.odin
  24. 1 1
      core/crypto/ristretto255/ristretto255.odin
  25. 2 2
      core/crypto/sha2/sha2.odin
  26. 1 1
      core/crypto/sha3/sha3.odin
  27. 2 2
      core/crypto/shake/shake.odin
  28. 9 4
      core/crypto/siphash/siphash.odin
  29. 1 1
      core/crypto/sm3/sm3.odin
  30. 1 1
      core/crypto/tuplehash/tuplehash.odin
  31. 1 1
      core/crypto/x25519/x25519.odin
  32. 1 2
      core/dynlib/doc.odin
  33. 3 3
      core/encoding/ansi/doc.odin
  34. 96 0
      core/encoding/csv/doc.odin
  35. 0 90
      core/encoding/csv/example.odin
  36. 2 2
      core/encoding/csv/reader.odin
  37. 13 12
      core/encoding/endian/doc.odin
  38. 14 12
      core/encoding/entity/entity.odin
  39. 89 83
      core/encoding/hxa/doc.odin
  40. 5 4
      core/encoding/uuid/doc.odin
  41. 6 7
      core/encoding/varint/doc.odin
  42. 1 3
      core/encoding/varint/leb128.odin
  43. 23 0
      core/encoding/xml/doc.odin
  44. 5 23
      core/encoding/xml/xml_reader.odin
  45. 15 25
      core/flags/doc.odin
  46. 2 1
      core/hash/xxhash/common.odin
  47. 2 1
      core/hash/xxhash/streaming.odin
  48. 1 0
      core/hash/xxhash/xxhash_3.odin
  49. 1 0
      core/hash/xxhash/xxhash_32.odin
  50. 1 0
      core/hash/xxhash/xxhash_64.odin
  51. 348 0
      core/image/png/doc.odin
  52. 0 351
      core/image/png/example.odin
  53. 1 0
      core/image/png/helpers.odin
  54. 1 4
      core/image/png/png.odin
  55. 1 1
      core/image/qoi/qoi.odin
  56. 3 3
      core/math/noise/opensimplex2.odin
  57. 30 32
      core/net/doc.odin
  58. 3 0
      core/prof/spall/doc.odin
  59. 3 3
      core/sync/doc.odin
  60. 8 1
      core/sys/info/doc.odin
  61. 1 0
      core/sys/llvm/bit_manipulation.odin
  62. 1 0
      core/sys/llvm/code_generator.odin
  63. 1 0
      core/sys/llvm/standard_c_library.odin
  64. 1 1
      core/sys/posix/unistd.odin
  65. 4 5
      core/text/edit/text_edit.odin
  66. 10 19
      core/text/table/doc.odin

+ 90 - 0
core/compress/gzip/doc.odin

@@ -0,0 +1,90 @@
+/*
+	Copyright 2021 Jeroen van Rijn <[email protected]>.
+	Made available under Odin's BSD-3 license.
+
+	List of contributors:
+		Jeroen van Rijn: Initial implementation.
+		Ginger Bill:     Cosmetic changes.
+
+	A small GZIP implementation as an example.
+*/
+
+/*
+Example:
+	import "core:bytes"
+	import "core:os"
+	import "core:compress"
+	import "core:fmt"
+
+	// Small GZIP file with fextra, fname and fcomment present.
+	@private
+	TEST: []u8 = {
+		0x1f, 0x8b, 0x08, 0x1c, 0xcb, 0x3b, 0x3a, 0x5a,
+		0x02, 0x03, 0x07, 0x00, 0x61, 0x62, 0x03, 0x00,
+		0x63, 0x64, 0x65, 0x66, 0x69, 0x6c, 0x65, 0x6e,
+		0x61, 0x6d, 0x65, 0x00, 0x54, 0x68, 0x69, 0x73,
+		0x20, 0x69, 0x73, 0x20, 0x61, 0x20, 0x63, 0x6f,
+		0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x00, 0x2b, 0x48,
+		0xac, 0xcc, 0xc9, 0x4f, 0x4c, 0x01, 0x00, 0x15,
+		0x6a, 0x2c, 0x42, 0x07, 0x00, 0x00, 0x00,
+	}
+
+	main :: proc() {
+		// Set up output buffer.
+		buf := bytes.Buffer{}
+
+		stdout :: proc(s: string) {
+			os.write_string(os.stdout, s)
+		}
+		stderr :: proc(s: string) {
+			os.write_string(os.stderr, s)
+		}
+
+		args := os.args
+
+		if len(args) < 2 {
+			stderr("No input file specified.\n")
+			err := load(data=TEST, buf=&buf, known_gzip_size=len(TEST))
+			if err == nil {
+				stdout("Displaying test vector: ")
+				stdout(bytes.buffer_to_string(&buf))
+				stdout("\n")
+			} else {
+				fmt.printf("gzip.load returned %v\n", err)
+			}
+			bytes.buffer_destroy(&buf)
+			os.exit(0)
+		}
+
+		// The rest are all files.
+		args = args[1:]
+		err: Error
+
+		for file in args {
+			if file == "-" {
+				// Read from stdin
+				s := os.stream_from_handle(os.stdin)
+				ctx := &compress.Context_Stream_Input{
+					input = s,
+				}
+				err = load(ctx, &buf)
+			} else {
+				err = load(file, &buf)
+			}
+			if err != nil {
+				if err != E_General.File_Not_Found {
+					stderr("File not found: ")
+					stderr(file)
+					stderr("\n")
+					os.exit(1)
+				}
+				stderr("GZIP returned an error.\n")
+					bytes.buffer_destroy(&buf)
+				os.exit(2)
+			}
+			stdout(bytes.buffer_to_string(&buf))
+		}
+		bytes.buffer_destroy(&buf)
+	}
+*/
+package compress_gzip

+ 0 - 89
core/compress/gzip/example.odin

@@ -1,89 +0,0 @@
-//+build ignore
-package compress_gzip
-
-/*
-	Copyright 2021 Jeroen van Rijn <[email protected]>.
-	Made available under Odin's BSD-3 license.
-
-	List of contributors:
-		Jeroen van Rijn: Initial implementation.
-		Ginger Bill:     Cosmetic changes.
-
-	A small GZIP implementation as an example.
-*/
-
-import "core:bytes"
-import "core:os"
-import "core:compress"
-import "core:fmt"
-
-// Small GZIP file with fextra, fname and fcomment present.
-@private
-TEST: []u8 = {
-	0x1f, 0x8b, 0x08, 0x1c, 0xcb, 0x3b, 0x3a, 0x5a,
-	0x02, 0x03, 0x07, 0x00, 0x61, 0x62, 0x03, 0x00,
-	0x63, 0x64, 0x65, 0x66, 0x69, 0x6c, 0x65, 0x6e,
-	0x61, 0x6d, 0x65, 0x00, 0x54, 0x68, 0x69, 0x73,
-	0x20, 0x69, 0x73, 0x20, 0x61, 0x20, 0x63, 0x6f,
-	0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x00, 0x2b, 0x48,
-	0xac, 0xcc, 0xc9, 0x4f, 0x4c, 0x01, 0x00, 0x15,
-	0x6a, 0x2c, 0x42, 0x07, 0x00, 0x00, 0x00,
-}
-
-main :: proc() {
-	// Set up output buffer.
-	buf := bytes.Buffer{}
-
-	stdout :: proc(s: string) {
-		os.write_string(os.stdout, s)
-	}
-	stderr :: proc(s: string) {
-		os.write_string(os.stderr, s)
-	}
-
-	args := os.args
-
-	if len(args) < 2 {
-		stderr("No input file specified.\n")
-		err := load(data=TEST, buf=&buf, known_gzip_size=len(TEST))
-		if err == nil {
-			stdout("Displaying test vector: ")
-			stdout(bytes.buffer_to_string(&buf))
-			stdout("\n")
-		} else {
-			fmt.printf("gzip.load returned %v\n", err)
-		}
-		bytes.buffer_destroy(&buf)
-		os.exit(0)
-	}
-
-	// The rest are all files.
-	args = args[1:]
-	err: Error
-
-	for file in args {
-		if file == "-" {
-			// Read from stdin
-			s := os.stream_from_handle(os.stdin)
-			ctx := &compress.Context_Stream_Input{
-				input = s,
-			}
-			err = load(ctx, &buf)
-		} else {
-			err = load(file, &buf)
-		}
-		if err != nil {
-			if err != E_General.File_Not_Found {
-				stderr("File not found: ")
-				stderr(file)
-				stderr("\n")
-				os.exit(1)
-			}
-			stderr("GZIP returned an error.\n")
-				bytes.buffer_destroy(&buf)
-			os.exit(2)
-		}
-		stdout(bytes.buffer_to_string(&buf))
-	}
-	bytes.buffer_destroy(&buf)
-}

+ 1 - 2
core/compress/shoco/model.odin

@@ -4,7 +4,6 @@
 	which is an English word model.
 */
 
-// package shoco is an implementation of the shoco short string compressor
 package compress_shoco
 
 DEFAULT_MODEL :: Shoco_Model {
@@ -145,4 +144,4 @@ DEFAULT_MODEL :: Shoco_Model {
 		{ 0xc0000000, 2, 4, { 25, 22, 19, 16, 16, 16, 16, 16 }, { 15,  7,  7,  7, 0, 0, 0, 0 }, 0xe0, 0xc0 },
 		{ 0xe0000000, 4, 8, { 23, 19, 15, 11,  8,  5,  2,  0 }, { 31, 15, 15, 15, 7, 7, 7, 3 }, 0xf0, 0xe0 },
 	},
-}
+}

+ 2 - 2
core/compress/shoco/shoco.odin

@@ -8,7 +8,7 @@
 	An implementation of [shoco](https://github.com/Ed-von-Schleck/shoco) by Christian Schramm.
 */
 
-// package shoco is an implementation of the shoco short string compressor
+// package shoco is an implementation of the shoco short string compressor.
 package compress_shoco
 
 import "base:intrinsics"
@@ -308,4 +308,4 @@ compress_string :: proc(input: string, model := DEFAULT_MODEL, allocator := cont
 	resize(&buf, length) or_return
 	return buf[:length], result
 }
-compress :: proc{compress_string_to_buffer, compress_string}
+compress :: proc{compress_string_to_buffer, compress_string}

+ 50 - 0
core/compress/zlib/doc.odin

@@ -0,0 +1,50 @@
+/*
+	Copyright 2021 Jeroen van Rijn <[email protected]>.
+	Made available under Odin's BSD-3 license.
+
+	List of contributors:
+		Jeroen van Rijn: Initial implementation.
+
+	An example of how to use `zlib.inflate`.
+*/
+
+/*
+Example:
+	package main
+
+	import "core:bytes"
+	import "core:fmt"
+
+	main :: proc() {
+		ODIN_DEMO := []u8{
+			120, 218, 101, 144,  65, 110, 131,  48,  16,  69, 215, 246,  41, 190,  44,  69,  73,  32, 148, 182,
+			 75,  75,  28,  32, 251,  46, 217,  88, 238,   0,  86, 192,  32, 219,  36, 170, 170, 172, 122, 137,
+			238, 122, 197,  30, 161,  70, 162,  20,  81, 203, 139,  25, 191, 255, 191,  60,  51,  40, 125,  81,
+			 53,  33, 144,  15, 156, 155, 110, 232,  93, 128, 208, 189,  35,  89, 117,  65, 112, 222,  41,  99,
+			 33,  37,   6, 215, 235, 195,  17, 239, 156, 197, 170, 118, 170, 131,  44,  32,  82, 164,  72, 240,
+			253, 245, 249, 129,  12, 185, 224,  76, 105,  61, 118,  99, 171,  66, 239,  38, 193,  35, 103,  85,
+			172,  66, 127,  33, 139,  24, 244, 235, 141,  49, 204, 223,  76, 208, 205, 204, 166,   7, 173,  60,
+			 97, 159, 238,  37, 214,  41, 105, 129, 167,   5, 102,  27, 152, 173,  97, 178, 129,  73, 129, 231,
+			  5, 230,  27, 152, 175, 225,  52, 192, 127, 243, 170, 157, 149,  18, 121, 142, 115, 109, 227, 122,
+			 64,  87, 114, 111, 161,  49, 182,   6, 181, 158, 162, 226, 206, 167,  27, 215, 246,  48,  56,  99,
+			 67, 117,  16,  47,  13,  45,  35, 151,  98, 231,  75,   1, 173,  90,  61, 101, 146,  71, 136, 244,
+			170, 218, 145, 176, 123,  45, 173,  56, 113, 134, 191,  51, 219,  78, 235,  95,  28, 249, 253,   7,
+			159, 150, 133, 125,
+		}
+		OUTPUT_SIZE :: 432
+
+		buf: bytes.Buffer
+
+		// We can pass ", true" to inflate a raw DEFLATE stream instead of a ZLIB wrapped one.
+		err := inflate(input=ODIN_DEMO, buf=&buf, expected_output_size=OUTPUT_SIZE)
+		defer bytes.buffer_destroy(&buf)
+
+		if err != nil {
+			fmt.printf("\nError: %v\n", err)
+		}
+		s := bytes.buffer_to_string(&buf)
+		fmt.printf("Input: %v bytes, output (%v bytes):\n%v\n", len(ODIN_DEMO), len(s), s)
+		assert(len(s) == OUTPUT_SIZE)
+	}
+*/
+package compress_zlib

+ 0 - 47
core/compress/zlib/example.odin

@@ -1,47 +0,0 @@
-//+build ignore
-package compress_zlib
-
-/*
-	Copyright 2021 Jeroen van Rijn <[email protected]>.
-	Made available under Odin's BSD-3 license.
-
-	List of contributors:
-		Jeroen van Rijn: Initial implementation.
-
-	An example of how to use `zlib.inflate`.
-*/
-
-import "core:bytes"
-import "core:fmt"
-
-main :: proc() {
-	ODIN_DEMO := []u8{
-		120, 218, 101, 144,  65, 110, 131,  48,  16,  69, 215, 246,  41, 190,  44,  69,  73,  32, 148, 182,
-		 75,  75,  28,  32, 251,  46, 217,  88, 238,   0,  86, 192,  32, 219,  36, 170, 170, 172, 122, 137,
-		238, 122, 197,  30, 161,  70, 162,  20,  81, 203, 139,  25, 191, 255, 191,  60,  51,  40, 125,  81,
-		 53,  33, 144,  15, 156, 155, 110, 232,  93, 128, 208, 189,  35,  89, 117,  65, 112, 222,  41,  99,
-		 33,  37,   6, 215, 235, 195,  17, 239, 156, 197, 170, 118, 170, 131,  44,  32,  82, 164,  72, 240,
-		253, 245, 249, 129,  12, 185, 224,  76, 105,  61, 118,  99, 171,  66, 239,  38, 193,  35, 103,  85,
-		172,  66, 127,  33, 139,  24, 244, 235, 141,  49, 204, 223,  76, 208, 205, 204, 166,   7, 173,  60,
-		 97, 159, 238,  37, 214,  41, 105, 129, 167,   5, 102,  27, 152, 173,  97, 178, 129,  73, 129, 231,
-		  5, 230,  27, 152, 175, 225,  52, 192, 127, 243, 170, 157, 149,  18, 121, 142, 115, 109, 227, 122,
-		 64,  87, 114, 111, 161,  49, 182,   6, 181, 158, 162, 226, 206, 167,  27, 215, 246,  48,  56,  99,
-		 67, 117,  16,  47,  13,  45,  35, 151,  98, 231,  75,   1, 173,  90,  61, 101, 146,  71, 136, 244,
-		170, 218, 145, 176, 123,  45, 173,  56, 113, 134, 191,  51, 219,  78, 235,  95,  28, 249, 253,   7,
-		159, 150, 133, 125,
-	}
-	OUTPUT_SIZE :: 432
-
-	buf: bytes.Buffer
-
-	// We can pass ", true" to inflate a raw DEFLATE stream instead of a ZLIB wrapped one.
-	err := inflate(input=ODIN_DEMO, buf=&buf, expected_output_size=OUTPUT_SIZE)
-	defer bytes.buffer_destroy(&buf)
-
-	if err != nil {
-		fmt.printf("\nError: %v\n", err)
-	}
-	s := bytes.buffer_to_string(&buf)
-	fmt.printf("Input: %v bytes, output (%v bytes):\n%v\n", len(ODIN_DEMO), len(s), s)
-	assert(len(s) == OUTPUT_SIZE)
-}

+ 4 - 4
core/container/bit_array/doc.odin

@@ -1,8 +1,8 @@
 /*
 The Bit Array can be used in several ways:
 
-- By default you don't need to instantiate a Bit Array:
-
+By default you don't need to instantiate a Bit Array.
+Example:
 	package test
 
 	import "core:fmt"
@@ -22,8 +22,8 @@ The Bit Array can be used in several ways:
 		destroy(&bits)
 	}
 
-- A Bit Array can optionally allow for negative indices, if the minimum value was given during creation:
-
+A Bit Array can optionally allow for negative indices, if the minimum value was given during creation.
+Example:
 	package test
 
 	import "core:fmt"

+ 9 - 6
core/container/intrusive/list/doc.odin

@@ -1,22 +1,22 @@
 /*
 Package list implements an intrusive doubly-linked list.
 
-An intrusive container requires a `Node` to be embedded in your own structure, like this:
-
+An intrusive container requires a `Node` to be embedded in your own structure, like this.
+Example:
 	My_String :: struct {
 		node:  list.Node,
 		value: string,
 	}
 
-Embedding the members of a `list.Node` in your structure with the `using` keyword is also allowed:
-
+Embedding the members of a `list.Node` in your structure with the `using` keyword is also allowed.
+Example:
 	My_String :: struct {
 		using node: list.Node,
 		value: string,
 	}
 
-Here is a full example:
-
+Here is a full example.
+Example:
 	package test
 	
 	import "core:fmt"
@@ -42,5 +42,8 @@ Here is a full example:
 	    value: string,
 	}
 
+Output:
+	Hello
+	World
 */
 package container_intrusive_list

+ 43 - 44
core/crypto/aead/doc.odin

@@ -10,49 +10,48 @@ algorithm.
 WARNING: Reusing the same key + iv to seal (encrypt) multiple messages
 results in catastrophic loss of security for most algorithms.
 
-```odin
-package aead_example
-
-import "core:bytes"
-import "core:crypto"
-import "core:crypto/aead"
-
-main :: proc() {
-	algo := aead.Algorithm.XCHACHA20POLY1305
-
-	// The example added associated data, and plaintext.
-	aad_str := "Get your ass in gear boys."
-	pt_str := "They're immanetizing the Eschaton."
-
-	aad := transmute([]byte)aad_str
-	plaintext := transmute([]byte)pt_str
-	pt_len := len(plaintext)
-
-	// Generate a random key for the purposes of illustration.
-	key := make([]byte, aead.KEY_SIZES[algo])
-	defer delete(key)
-	crypto.rand_bytes(key)
-
-	// `ciphertext || tag`, is a common way data is transmitted, so
-	// demonstrate that.
-	buf := make([]byte, pt_len + aead.TAG_SIZES[algo])
-	defer delete(buf)
-	ciphertext, tag := buf[:pt_len], buf[pt_len:]
-
-	// Seal the AAD + Plaintext.
-	iv := make([]byte, aead.IV_SIZES[algo])
-	defer delete(iv)
-	crypto.rand_bytes(iv) // Random IVs are safe with XChaCha20-Poly1305.
-	aead.seal(algo, ciphertext, tag, key, iv, aad, plaintext)
-
-	// Open the AAD + Ciphertext.
-	opened_pt := buf[:pt_len]
-	if ok := aead.open(algo, opened_pt, key, iv, aad, ciphertext, tag); !ok {
-		panic("aead example: failed to open")
+Example:
+	package aead_example
+
+	import "core:bytes"
+	import "core:crypto"
+	import "core:crypto/aead"
+
+	main :: proc() {
+		algo := aead.Algorithm.XCHACHA20POLY1305
+
+		// The example added associated data, and plaintext.
+		aad_str := "Get your ass in gear boys."
+		pt_str := "They're immanetizing the Eschaton."
+
+		aad := transmute([]byte)aad_str
+		plaintext := transmute([]byte)pt_str
+		pt_len := len(plaintext)
+
+		// Generate a random key for the purposes of illustration.
+		key := make([]byte, aead.KEY_SIZES[algo])
+		defer delete(key)
+		crypto.rand_bytes(key)
+
+		// `ciphertext || tag`, is a common way data is transmitted, so
+		// demonstrate that.
+		buf := make([]byte, pt_len + aead.TAG_SIZES[algo])
+		defer delete(buf)
+		ciphertext, tag := buf[:pt_len], buf[pt_len:]
+
+		// Seal the AAD + Plaintext.
+		iv := make([]byte, aead.IV_SIZES[algo])
+		defer delete(iv)
+		crypto.rand_bytes(iv) // Random IVs are safe with XChaCha20-Poly1305.
+		aead.seal(algo, ciphertext, tag, key, iv, aad, plaintext)
+
+		// Open the AAD + Ciphertext.
+		opened_pt := buf[:pt_len]
+		if ok := aead.open(algo, opened_pt, key, iv, aad, ciphertext, tag); !ok {
+			panic("aead example: failed to open")
+		}
+
+		assert(bytes.equal(opened_pt, plaintext))
 	}
-
-	assert(bytes.equal(opened_pt, plaintext))
-}
-```
 */
-package aead
+package aead

+ 3 - 3
core/crypto/aes/aes.odin

@@ -2,9 +2,9 @@
 package aes implements the AES block cipher and some common modes.
 
 See:
-- https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.197-upd1.pdf
-- https://nvlpubs.nist.gov/nistpubs/Legacy/SP/nistspecialpublication800-38a.pdf
-- https://nvlpubs.nist.gov/nistpubs/Legacy/SP/nistspecialpublication800-38d.pdf
+- [[ https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.197-upd1.pdf ]]
+- [[ https://nvlpubs.nist.gov/nistpubs/Legacy/SP/nistspecialpublication800-38a.pdf ]]
+- [[ https://nvlpubs.nist.gov/nistpubs/Legacy/SP/nistspecialpublication800-38d.pdf ]]
 */
 package aes
 

+ 2 - 2
core/crypto/blake2b/blake2b.odin

@@ -2,8 +2,8 @@
 package blake2b implements the BLAKE2b hash algorithm.
 
 See:
-- https://datatracker.ietf.org/doc/html/rfc7693
-- https://www.blake2.net
+- [[ https://datatracker.ietf.org/doc/html/rfc7693 ]]
+- [[ https://www.blake2.net ]]
 */
 package blake2b
 

+ 2 - 2
core/crypto/blake2s/blake2s.odin

@@ -2,8 +2,8 @@
 package blake2s implements the BLAKE2s hash algorithm.
 
 See:
-- https://datatracker.ietf.org/doc/html/rfc7693
-- https://www.blake2.net/
+- [[ https://datatracker.ietf.org/doc/html/rfc7693 ]]
+- [[ https://www.blake2.net/ ]]
 */
 package blake2s
 

+ 2 - 2
core/crypto/chacha20/chacha20.odin

@@ -2,8 +2,8 @@
 package chacha20 implements the ChaCha20 and XChaCha20 stream ciphers.
 
 See:
-- https://datatracker.ietf.org/doc/html/rfc8439
-- https://datatracker.ietf.org/doc/draft-irtf-cfrg-xchacha/03/
+- [[ https://datatracker.ietf.org/doc/html/rfc8439 ]]
+- [[ https://datatracker.ietf.org/doc/draft-irtf-cfrg-xchacha/03/ ]]
 */
 package chacha20
 

+ 2 - 2
core/crypto/chacha20poly1305/chacha20poly1305.odin

@@ -4,8 +4,8 @@ AEAD_XChaCha20_Poly1305 Authenticated Encryption with Additional Data
 algorithms.
 
 See:
-- https://www.rfc-editor.org/rfc/rfc8439
-- https://datatracker.ietf.org/doc/html/draft-arciszewski-xchacha-03
+- [[ https://www.rfc-editor.org/rfc/rfc8439 ]]
+- [[ https://datatracker.ietf.org/doc/html/draft-arciszewski-xchacha-03 ]]
 */
 package chacha20poly1305
 

+ 3 - 3
core/crypto/ed25519/ed25519.odin

@@ -2,9 +2,9 @@
 package ed25519 implements the Ed25519 EdDSA signature algorithm.
 
 See:
-- https://datatracker.ietf.org/doc/html/rfc8032
-- https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.186-5.pdf
-- https://eprint.iacr.org/2020/1244.pdf
+- [[ https://datatracker.ietf.org/doc/html/rfc8032 ]]
+- [[ https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.186-5.pdf ]]
+- [[ https://eprint.iacr.org/2020/1244.pdf ]]
 */
 package ed25519
 

+ 28 - 30
core/crypto/hash/doc.odin

@@ -17,46 +17,44 @@ accomplish common tasks.
   A third optional boolean parameter controls if the file is streamed
   (default), or or read at once.
 
-```odin
-package hash_example
+Example:
+	package hash_example
 
-import "core:crypto/hash"
+	import "core:crypto/hash"
 
-main :: proc() {
-	input := "Feed the fire."
+	main :: proc() {
+		input := "Feed the fire."
 
-	// Compute the digest, using the high level API.
-	returned_digest := hash.hash(hash.Algorithm.SHA512_256, input)
-	defer delete(returned_digest)
+		// Compute the digest, using the high level API.
+		returned_digest := hash.hash(hash.Algorithm.SHA512_256, input)
+		defer delete(returned_digest)
 
-	// Variant that takes a destination buffer, instead of returning
-	// the digest.
-	digest := make([]byte, hash.DIGEST_SIZES[hash.Algorithm.BLAKE2B]) // @note: Destination buffer has to be at least as big as the digest size of the hash.
-	defer delete(digest)
-	hash.hash(hash.Algorithm.BLAKE2B, input, digest)
-}
-```
+		// Variant that takes a destination buffer, instead of returning
+		// the digest.
+		digest := make([]byte, hash.DIGEST_SIZES[hash.Algorithm.BLAKE2B]) // @note: Destination buffer has to be at least as big as the digest size of the hash.
+		defer delete(digest)
+		hash.hash(hash.Algorithm.BLAKE2B, input, digest)
+	}
 
 A generic low level API is provided supporting the init/update/final interface
 that is typical with cryptographic hash function implementations.
 
-```odin
-package hash_example
+Example:
+	package hash_example
 
-import "core:crypto/hash"
+	import "core:crypto/hash"
 
-main :: proc() {
-    input := "Let the cinders burn."
+	main :: proc() {
+		input := "Let the cinders burn."
 
-    // Compute the digest, using the low level API.
-    ctx: hash.Context
-    digest := make([]byte, hash.DIGEST_SIZES[hash.Algorithm.SHA3_512])
-    defer delete(digest)
+		// Compute the digest, using the low level API.
+		ctx: hash.Context
+		digest := make([]byte, hash.DIGEST_SIZES[hash.Algorithm.SHA3_512])
+		defer delete(digest)
 
-    hash.init(&ctx, hash.Algorithm.SHA3_512)
-    hash.update(&ctx, transmute([]byte)input)
-    hash.final(&ctx, digest)
-}
-```
+		hash.init(&ctx, hash.Algorithm.SHA3_512)
+		hash.update(&ctx, transmute([]byte)input)
+		hash.final(&ctx, digest)
+	}
 */
-package crypto_hash
+package crypto_hash

+ 1 - 1
core/crypto/hkdf/hkdf.odin

@@ -2,7 +2,7 @@
 package hkdf implements the HKDF HMAC-based Extract-and-Expand Key
 Derivation Function.
 
-See: https://www.rfc-editor.org/rfc/rfc5869
+See: [[ https://www.rfc-editor.org/rfc/rfc5869 ]]
 */
 package hkdf
 

+ 1 - 1
core/crypto/hmac/hmac.odin

@@ -2,7 +2,7 @@
 package hmac implements the HMAC MAC algorithm.
 
 See:
-- https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.198-1.pdf
+- [[ https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.198-1.pdf ]]
 */
 package hmac
 

+ 1 - 1
core/crypto/kmac/kmac.odin

@@ -2,7 +2,7 @@
 package kmac implements the KMAC MAC algorithm.
 
 See:
-- https://nvlpubs.nist.gov/nistpubs/specialpublications/nist.sp.800-185.pdf
+- [[ https://nvlpubs.nist.gov/nistpubs/specialpublications/nist.sp.800-185.pdf ]]
 */
 package kmac
 

+ 2 - 2
core/crypto/legacy/md5/md5.odin

@@ -5,8 +5,8 @@ WARNING: The MD5 algorithm is known to be insecure and should only be
 used for interoperating with legacy applications.
 
 See:
-- https://eprint.iacr.org/2005/075
-- https://datatracker.ietf.org/doc/html/rfc1321
+- [[ https://eprint.iacr.org/2005/075 ]]
+- [[ https://datatracker.ietf.org/doc/html/rfc1321 ]]
 */
 package md5
 

+ 3 - 3
core/crypto/legacy/sha1/sha1.odin

@@ -5,9 +5,9 @@ WARNING: The SHA1 algorithm is known to be insecure and should only be
 used for interoperating with legacy applications.
 
 See:
-- https://eprint.iacr.org/2017/190
-- https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.180-4.pdf
-- https://datatracker.ietf.org/doc/html/rfc3174
+- [[ https://eprint.iacr.org/2017/190 ]]
+- [[ https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.180-4.pdf ]]
+- [[ https://datatracker.ietf.org/doc/html/rfc3174 ]]
 */
 package sha1
 

+ 1 - 1
core/crypto/pbkdf2/pbkdf2.odin

@@ -1,7 +1,7 @@
 /*
 package pbkdf2 implements the PBKDF2 password-based key derivation function.
 
-See: https://www.rfc-editor.org/rfc/rfc2898
+See: [[ https://www.rfc-editor.org/rfc/rfc2898 ]]
 */
 package pbkdf2
 

+ 1 - 1
core/crypto/poly1305/poly1305.odin

@@ -2,7 +2,7 @@
 package poly1305 implements the Poly1305 one-time MAC algorithm.
 
 See:
-- https://datatracker.ietf.org/doc/html/rfc8439
+- [[ https://datatracker.ietf.org/doc/html/rfc8439 ]]
 */
 package poly1305
 

+ 1 - 1
core/crypto/ristretto255/ristretto255.odin

@@ -2,7 +2,7 @@
 package ristretto255 implement the ristretto255 prime-order group.
 
 See:
-- https://www.rfc-editor.org/rfc/rfc9496
+- [[ https://www.rfc-editor.org/rfc/rfc9496 ]]
 */
 package ristretto255
 

+ 2 - 2
core/crypto/sha2/sha2.odin

@@ -2,8 +2,8 @@
 package sha2 implements the SHA2 hash algorithm family.
 
 See:
-- https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.180-4.pdf
-- https://datatracker.ietf.org/doc/html/rfc3874
+- [[ https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.180-4.pdf ]]
+- [[ https://datatracker.ietf.org/doc/html/rfc3874 ]]
 */
 package sha2
 

+ 1 - 1
core/crypto/sha3/sha3.odin

@@ -6,7 +6,7 @@ pre-standardization Keccak algorithm is required, it can be found in
 crypto/legacy/keccak.
 
 See:
-- https://nvlpubs.nist.gov/nistpubs/fips/nist.fips.202.pdf
+- [[ https://nvlpubs.nist.gov/nistpubs/fips/nist.fips.202.pdf ]]
 */
 package sha3
 

+ 2 - 2
core/crypto/shake/shake.odin

@@ -4,8 +4,8 @@ package shake implements the SHAKE and cSHAKE XOF algorithm families.
 The SHA3 hash algorithm can be found in the crypto/sha3.
 
 See:
-- https://nvlpubs.nist.gov/nistpubs/fips/nist.fips.202.pdf
-- https://nvlpubs.nist.gov/nistpubs/specialpublications/nist.sp.800-185.pdf
+- [[ https://nvlpubs.nist.gov/nistpubs/fips/nist.fips.202.pdf ]]
+- [[ https://nvlpubs.nist.gov/nistpubs/specialpublications/nist.sp.800-185.pdf ]]
 */
 package shake
 

+ 9 - 4
core/crypto/siphash/siphash.odin

@@ -1,3 +1,12 @@
+/*
+package siphash Implements the SipHash hashing algorithm.
+
+Use the specific procedures for a certain setup. The generic procedures will default to Siphash 2-4.
+
+See:
+- [[ https://github.com/veorq/SipHash ]]
+- [[ https://www.aumasson.jp/siphash/siphash.pdf ]]
+*/
 package siphash
 
 /*
@@ -6,10 +15,6 @@ package siphash
 
     List of contributors:
         zhibog:  Initial implementation.
-
-    Implementation of the SipHash hashing algorithm, as defined at <https://github.com/veorq/SipHash> and <https://www.aumasson.jp/siphash/siphash.pdf>
-
-    Use the specific procedures for a certain setup. The generic procdedures will default to Siphash 2-4
 */
 
 import "core:crypto"

+ 1 - 1
core/crypto/sm3/sm3.odin

@@ -2,7 +2,7 @@
 package sm3 implements the SM3 hash algorithm.
 
 See:
-- https://datatracker.ietf.org/doc/html/draft-sca-cfrg-sm3-02
+- [[ https://datatracker.ietf.org/doc/html/draft-sca-cfrg-sm3-02 ]]
 */
 package sm3
 

+ 1 - 1
core/crypto/tuplehash/tuplehash.odin

@@ -2,7 +2,7 @@
 package tuplehash implements the TupleHash and TupleHashXOF algorithms.
 
 See:
-- https://nvlpubs.nist.gov/nistpubs/specialpublications/nist.sp.800-185.pdf
+- [[ https://nvlpubs.nist.gov/nistpubs/specialpublications/nist.sp.800-185.pdf ]]
 */
 package tuplehash
 

+ 1 - 1
core/crypto/x25519/x25519.odin

@@ -3,7 +3,7 @@ package x25519 implements the X25519 (aka curve25519) Elliptic-Curve
 Diffie-Hellman key exchange protocol.
 
 See:
-- https://www.rfc-editor.org/rfc/rfc7748
+- [[ https://www.rfc-editor.org/rfc/rfc7748 ]]
 */
 package x25519
 

+ 1 - 2
core/dynlib/doc.odin

@@ -4,7 +4,6 @@ Package `core:dynlib` implements loading of shared libraries/DLLs and their symb
 The behaviour of dynamically loaded libraries is specific to the target platform of the program.
 For in depth detail on the underlying behaviour please refer to your target platform's documentation.
 
-See `example` directory for an example library exporting 3 symbols and a host program loading them automatically
-by defining a symbol table struct.
+For a full example, see: [[ core/dynlib/example; https://github.com/odin-lang/Odin/tree/master/core/dynlib/example ]]
 */
 package dynlib

+ 3 - 3
core/encoding/ansi/doc.odin

@@ -13,8 +13,8 @@ If your terminal supports 24-bit true color mode, you can also do this:
 	fmt.println(ansi.CSI + ansi.FG_COLOR_24_BIT + ";0;255;255" + ansi.SGR + "Hellope!" + ansi.CSI + ansi.RESET + ansi.SGR)
 
 For more information, see:
-	1. https://en.wikipedia.org/wiki/ANSI_escape_code
-	2. https://www.vt100.net/docs/vt102-ug/chapter5.html
-	3. https://invisible-island.net/xterm/ctlseqs/ctlseqs.html
+- [[ https://en.wikipedia.org/wiki/ANSI_escape_code ]]
+- [[ https://www.vt100.net/docs/vt102-ug/chapter5.html ]]
+- [[ https://invisible-island.net/xterm/ctlseqs/ctlseqs.html ]]
 */
 package ansi

+ 96 - 0
core/encoding/csv/doc.odin

@@ -0,0 +1,96 @@
+/*
+package csv reads and writes comma-separated values (CSV) files.
+This package supports the format described in [[ RFC 4180; https://tools.ietf.org/html/rfc4180.html ]]
+
+Example:
+	package main
+
+	import "core:fmt"
+	import "core:encoding/csv"
+	import "core:os"
+
+	// Requires keeping the entire CSV file in memory at once
+	iterate_csv_from_string :: proc(filename: string) {
+		r: csv.Reader
+		r.trim_leading_space  = true
+		r.reuse_record        = true // Without it you have to delete(record)
+		r.reuse_record_buffer = true // Without it you have to each of the fields within it
+		defer csv.reader_destroy(&r)
+
+		csv_data, ok := os.read_entire_file(filename)
+		if ok {
+			csv.reader_init_with_string(&r, string(csv_data))
+		} else {
+			fmt.printfln("Unable to open file: %v", filename)
+			return
+		}
+		defer delete(csv_data)
+
+		for r, i, err in csv.iterator_next(&r) {
+			if err != nil { /* Do something with error */ }
+			for f, j in r {
+				fmt.printfln("Record %v, field %v: %q", i, j, f)
+			}
+		}
+	}
+
+	// Reads the CSV as it's processed (with a small buffer)
+	iterate_csv_from_stream :: proc(filename: string) {
+		fmt.printfln("Hellope from %v", filename)
+		r: csv.Reader
+		r.trim_leading_space  = true
+		r.reuse_record        = true // Without it you have to delete(record)
+		r.reuse_record_buffer = true // Without it you have to each of the fields within it
+		defer csv.reader_destroy(&r)
+
+		handle, err := os.open(filename)
+		if err != nil {
+			fmt.eprintfln("Error opening file: %v", filename)
+			return
+		}
+		defer os.close(handle)
+		csv.reader_init(&r, os.stream_from_handle(handle))
+
+		for r, i in csv.iterator_next(&r) {
+			for f, j in r {
+				fmt.printfln("Record %v, field %v: %q", i, j, f)
+			}
+		}
+		fmt.printfln("Error: %v", csv.iterator_last_error(r))
+	}
+
+	// Read all records at once
+	read_csv_from_string :: proc(filename: string) {
+		r: csv.Reader
+		r.trim_leading_space  = true
+		r.reuse_record        = true // Without it you have to delete(record)
+		r.reuse_record_buffer = true // Without it you have to each of the fields within it
+		defer csv.reader_destroy(&r)
+
+		csv_data, ok := os.read_entire_file(filename)
+		if ok {
+			csv.reader_init_with_string(&r, string(csv_data))
+		} else {
+			fmt.printfln("Unable to open file: %v", filename)
+			return
+		}
+		defer delete(csv_data)
+
+		records, err := csv.read_all(&r)
+		if err != nil { /* Do something with CSV parse error */ }
+
+		defer {
+			for rec in records {
+				delete(rec)
+			}
+			delete(records)
+		}
+
+		for r, i in records {
+			for f, j in r {
+				fmt.printfln("Record %v, field %v: %q", i, j, f)
+			}
+		}
+	}
+*/
+package encoding_csv

+ 0 - 90
core/encoding/csv/example.odin

@@ -1,90 +0,0 @@
-//+build ignore
-package encoding_csv
-
-import "core:fmt"
-import "core:encoding/csv"
-import "core:os"
-
-// Requires keeping the entire CSV file in memory at once
-iterate_csv_from_string :: proc(filename: string) {
-	r: csv.Reader
-	r.trim_leading_space  = true
-	r.reuse_record        = true // Without it you have to delete(record)
-	r.reuse_record_buffer = true // Without it you have to each of the fields within it
-	defer csv.reader_destroy(&r)
-
-	csv_data, ok := os.read_entire_file(filename)
-	if ok {
-		csv.reader_init_with_string(&r, string(csv_data))
-	} else {
-		fmt.printfln("Unable to open file: %v", filename)
-		return
-	}
-	defer delete(csv_data)
-
-	for r, i, err in csv.iterator_next(&r) {
-		if err != nil { /* Do something with error */ }
-		for f, j in r {
-			fmt.printfln("Record %v, field %v: %q", i, j, f)
-		}
-	}
-}
-
-// Reads the CSV as it's processed (with a small buffer)
-iterate_csv_from_stream :: proc(filename: string) {
-	fmt.printfln("Hellope from %v", filename)
-	r: csv.Reader
-	r.trim_leading_space  = true
-	r.reuse_record        = true // Without it you have to delete(record)
-	r.reuse_record_buffer = true // Without it you have to each of the fields within it
-	defer csv.reader_destroy(&r)
-
-	handle, err := os.open(filename)
-	if err != nil {
-		fmt.eprintfln("Error opening file: %v", filename)
-		return
-	}
-	defer os.close(handle)
-	csv.reader_init(&r, os.stream_from_handle(handle))
-
-	for r, i in csv.iterator_next(&r) {
-		for f, j in r {
-			fmt.printfln("Record %v, field %v: %q", i, j, f)
-		}
-	}
-	fmt.printfln("Error: %v", csv.iterator_last_error(r))
-}
-
-// Read all records at once
-read_csv_from_string :: proc(filename: string) {
-	r: csv.Reader
-	r.trim_leading_space  = true
-	r.reuse_record        = true // Without it you have to delete(record)
-	r.reuse_record_buffer = true // Without it you have to each of the fields within it
-	defer csv.reader_destroy(&r)
-
-	csv_data, ok := os.read_entire_file(filename)
-	if ok {
-		csv.reader_init_with_string(&r, string(csv_data))
-	} else {
-		fmt.printfln("Unable to open file: %v", filename)
-		return
-	}
-	defer delete(csv_data)
-
-	records, err := csv.read_all(&r)
-	if err != nil { /* Do something with CSV parse error */ }
-
-	defer {
-		for rec in records {
-			delete(rec)
-		}
-		delete(records)
-	}
-
-	for r, i in records {
-		for f, j in r {
-			fmt.printfln("Record %v, field %v: %q", i, j, f)
-		}
-	}
-}

+ 2 - 2
core/encoding/csv/reader.odin

@@ -1,5 +1,5 @@
 // package csv reads and writes comma-separated values (CSV) files.
-// This package supports the format described in RFC 4180 <https://tools.ietf.org/html/rfc4180.html>
+// This package supports the format described in [[ RFC 4180; https://tools.ietf.org/html/rfc4180.html ]]
 package encoding_csv
 
 import "core:bufio"
@@ -484,4 +484,4 @@ _read_record :: proc(r: ^Reader, dst: ^[dynamic]string, allocator := context.all
 		r.fields_per_record = len(dst)
 	}
 	return dst[:], err
-}
+}

+ 13 - 12
core/encoding/endian/doc.odin

@@ -2,22 +2,23 @@
     Package endian implements a simple translation between bytes and numbers with
     specific endian encodings.
 
-    buf: [100]u8
-    put_u16(buf[:], .Little, 16) or_return
+Example:
+	buf: [100]u8
+	put_u16(buf[:], .Little, 16) or_return
 
-    You may ask yourself, why isn't `byte_order` platform Endianness by default, so we can write:
-    put_u16(buf[:], 16) or_return
+	// You may ask yourself, why isn't `byte_order` platform Endianness by default, so we can write:
+	put_u16(buf[:], 16) or_return
 
-    The answer is that very few file formats are written in native/platform endianness. Most of them specify the endianness of
-    each of their fields, or use a header field which specifies it for the entire file.
+	// The answer is that very few file formats are written in native/platform endianness. Most of them specify the endianness of
+	// each of their fields, or use a header field which specifies it for the entire file.
 
-    e.g. a file which specifies it at the top for all fields could do this:
-    file_order := .Little if buf[0] == 0 else .Big
-    field := get_u16(buf[1:], file_order) or_return
+	// e.g. a file which specifies it at the top for all fields could do this:
+	file_order := .Little if buf[0] == 0 else .Big
+	field := get_u16(buf[1:], file_order) or_return
 
-    If on the other hand a field is *always* Big-Endian, you're wise to explicitly state it for the benefit of the reader,
-    be that your future self or someone else.
+	// If on the other hand a field is *always* Big-Endian, you're wise to explicitly state it for the benefit of the reader,
+	// be that your future self or someone else.
 
-    field := get_u16(buf[:], .Big) or_return
+	field := get_u16(buf[:], .Big) or_return
 */
 package encoding_endian

+ 14 - 12
core/encoding/entity/entity.odin

@@ -1,23 +1,25 @@
-package encoding_unicode_entity
 /*
-	A unicode entity encoder/decoder
-
 	Copyright 2021 Jeroen van Rijn <[email protected]>.
 	Made available under Odin's BSD-3 license.
 
+	List of contributors:
+		Jeroen van Rijn: Initial implementation.
+*/
+
+/*
+	A unicode entity encoder/decoder.
+
 	This code has several procedures to map unicode runes to/from different textual encodings.
 	- SGML/XML/HTML entity
-	-- &#<decimal>;
-	-- &#x<hexadecimal>;
-	-- &<entity name>;   (If the lookup tables are compiled in).
-	Reference: https://www.w3.org/2003/entities/2007xml/unicode.xml	
+	- &#<decimal>;
+	- &#x<hexadecimal>;
+	- &<entity name>;   (If the lookup tables are compiled in).
+	Reference: [[ https://www.w3.org/2003/entities/2007xml/unicode.xml ]]
 
 	- URL encode / decode %hex entity
-	Reference: https://datatracker.ietf.org/doc/html/rfc3986/#section-2.1
-
-	List of contributors:
-		Jeroen van Rijn: Initial implementation.
+	Reference: [[ https://datatracker.ietf.org/doc/html/rfc3986/#section-2.1 ]]
 */
+package encoding_unicode_entity
 
 import "core:unicode/utf8"
 import "core:unicode"
@@ -353,4 +355,4 @@ _handle_xml_special :: proc(t: ^Tokenizer, builder: ^strings.Builder, options: X
 
 	}
 	return false, .None
-}
+}

+ 89 - 83
core/encoding/hxa/doc.odin

@@ -1,83 +1,89 @@
-// Implementation of the HxA 3D asset format
-// HxA is a interchangeable graphics asset format.
-// Designed by Eskil Steenberg. @quelsolaar / eskil 'at' obsession 'dot' se / www.quelsolaar.com
-//
-// Author of this Odin package: Ginger Bill
-//
-// Following comment is copied from the original C-implementation
-// ---------
-// -Does the world need another Graphics file format?
-// 	Unfortunately, Yes. All existing formats are either too large and complicated to be implemented from
-// 	scratch, or don't have some basic features needed in modern computer graphics.
-// -Who is this format for?
-// 	For people who want a capable open Graphics format that can be implemented from scratch in
-// 	a few hours. It is ideal for graphics researchers, game developers or other people who
-// 	wants to build custom graphics pipelines. Given how easy it is to parse and write, it
-// 	should be easy to write utilities that process assets to preform tasks like: generating
-// 	normals, light-maps, tangent spaces, Error detection, GPU optimization, LOD generation,
-// 	and UV mapping.
-// -Why store images in the format when there are so many good image formats already?
-// 	Yes there are, but only for 2D RGB/RGBA images. A lot of computer graphics rendering rely
-// 	on 1D, 3D, cube, multilayer, multi channel, floating point bitmap buffers. There almost no
-// 	formats for this kind of data. Also 3D files that reference separate image files rely on
-// 	file paths, and this often creates issues when the assets are moved. By including the
-// 	texture data in the files directly the assets become self contained.
-// -Why doesn't the format support <insert whatever>?
-// 	Because the entire point is to make a format that can be implemented. Features like NURBSs,
-// 	Construction history, or BSP trees would make the format too large to serve its purpose.
-// 	The facilities of the formats to store meta data should make the format flexible enough
-// 	for most uses. Adding HxA support should be something anyone can do in a days work.
-//
-// Structure:
-// ----------
-// HxA is designed to be extremely simple to parse, and is therefore based around conventions. It has
-// a few basic structures, and depending on how they are used they mean different things. This means
-// that you can implement a tool that loads the entire file, modifies the parts it cares about and
-// leaves the rest intact. It is also possible to write a tool that makes all data in the file
-// editable without the need to understand its use. It is also possible for anyone to use the format
-// to store data axillary data. Anyone who wants to store data not covered by a convention can submit
-// a convention to extend the format. There should never be a convention for storing the same data in
-// two differed ways.
-// The data is story in a number of nodes that are stored in an array. Each node stores an array of
-// meta data. Meta data can describe anything you want, and a lot of conventions will use meta data
-// to store additional information, for things like transforms, lights, shaders and animation.
-// Data for Vertices, Corners, Faces, and Pixels are stored in named layer stacks. Each stack consists
-// of a number of named layers. All layers in the stack have the same number of elements. Each layer
-// describes one property of the primitive. Each layer can have multiple channels and each layer can
-// store data of a different type.
-//
-// HaX stores 3 kinds of nodes
-// 	- Pixel data.
-// 	- Polygon geometry data.
-// 	- Meta data only.
-//
-// Pixel Nodes stores pixels in a layer stack. A layer may store things like Albedo, Roughness,
-// Reflectance, Light maps, Masks, Normal maps, and Displacement. Layers use the channels of the
-// layers to store things like color. The length of the layer stack is determined by the type and
-// dimensions stored in the
-//
-// Geometry data is stored in 3 separate layer stacks for: vertex data, corner data and face data. The
-// vertex data stores things like verities, blend shapes, weight maps, and vertex colors. The first
-// layer in a vertex stack has to be a 3 channel layer named "position" describing the base position
-// of the vertices. The corner stack describes data per corner or edge of the polygons. It can be used
-// for things like UV, normals, and adjacency. The first layer in a corner stack has to be a 1 channel
-// integer layer named "index" describing the vertices used to form polygons. The last value in each
-// polygon has a negative - 1 index to indicate the end of the polygon.
-//
-// Example:
-// 	A quad and a tri with the vertex index:
-// 		[0, 1, 2, 3] [1, 4, 2]
-// 	is stored:
-// 		[0, 1, 2, -4, 1, 4, -3]
-// The face stack stores values per face. the length of the face stack has to match the number of
-// negative values in the index layer in the corner stack. The face stack can be used to store things
-// like material index.
-//
-// Storage
-// -------
-// All data is stored in little endian byte order with no padding. The layout mirrors the structs
-// defined below with a few exceptions. All names are stored as a 8-bit unsigned integer indicating
-// the length of the name followed by that many characters. Termination is not stored in the file.
-// Text strings stored in meta data are stored the same way as names, but instead of a 8-bit unsigned
-// integer a 32-bit unsigned integer is used.
-package encoding_hxa
+/*
+Implementation of the HxA 3D asset format
+HxA is a interchangeable graphics asset format.
+Designed by Eskil Steenberg. @quelsolaar / eskil 'at' obsession 'dot' se / www.quelsolaar.com
+
+Author of this Odin package: Ginger Bill
+
+Following comment is copied from the original C-implementation  
+---------  
+- Does the world need another Graphics file format?  
+Unfortunately, Yes. All existing formats are either too large and complicated to be implemented from
+scratch, or don't have some basic features needed in modern computer graphics.
+
+- Who is this format for?  
+For people who want a capable open Graphics format that can be implemented from scratch in
+a few hours. It is ideal for graphics researchers, game developers or other people who
+wants to build custom graphics pipelines. Given how easy it is to parse and write, it
+should be easy to write utilities that process assets to preform tasks like: generating
+normals, light-maps, tangent spaces, Error detection, GPU optimization, LOD generation,
+and UV mapping.
+
+- Why store images in the format when there are so many good image formats already?  
+Yes there are, but only for 2D RGB/RGBA images. A lot of computer graphics rendering rely
+on 1D, 3D, cube, multilayer, multi channel, floating point bitmap buffers. There almost no
+formats for this kind of data. Also 3D files that reference separate image files rely on
+file paths, and this often creates issues when the assets are moved. By including the
+texture data in the files directly the assets become self contained.
+
+- Why doesn't the format support <insert whatever>?  
+Because the entire point is to make a format that can be implemented. Features like NURBSs,
+Construction history, or BSP trees would make the format too large to serve its purpose.
+The facilities of the formats to store meta data should make the format flexible enough
+for most uses. Adding HxA support should be something anyone can do in a days work.
+
+Structure:  
+----------  
+HxA is designed to be extremely simple to parse, and is therefore based around conventions. It has
+a few basic structures, and depending on how they are used they mean different things. This means
+that you can implement a tool that loads the entire file, modifies the parts it cares about and
+leaves the rest intact. It is also possible to write a tool that makes all data in the file
+editable without the need to understand its use. It is also possible for anyone to use the format
+to store data axillary data. Anyone who wants to store data not covered by a convention can submit
+a convention to extend the format. There should never be a convention for storing the same data in
+two differed ways.
+
+The data is story in a number of nodes that are stored in an array. Each node stores an array of
+meta data. Meta data can describe anything you want, and a lot of conventions will use meta data
+to store additional information, for things like transforms, lights, shaders and animation.
+Data for Vertices, Corners, Faces, and Pixels are stored in named layer stacks. Each stack consists
+of a number of named layers. All layers in the stack have the same number of elements. Each layer
+describes one property of the primitive. Each layer can have multiple channels and each layer can
+store data of a different type.
+
+HaX stores 3 kinds of nodes
+- Pixel data.
+- Polygon geometry data.
+- Meta data only.
+
+Pixel Nodes stores pixels in a layer stack. A layer may store things like Albedo, Roughness,
+Reflectance, Light maps, Masks, Normal maps, and Displacement. Layers use the channels of the
+layers to store things like color.
+The length of the layer stack is determined by the type and dimensions stored in the Geometry data
+is stored in 3 separate layer stacks for: vertex data, corner data and face data. The
+vertex data stores things like verities, blend shapes, weight maps, and vertex colors. The first
+layer in a vertex stack has to be a 3 channel layer named "position" describing the base position
+of the vertices. The corner stack describes data per corner or edge of the polygons. It can be used
+for things like UV, normals, and adjacency. The first layer in a corner stack has to be a 1 channel
+integer layer named "index" describing the vertices used to form polygons. The last value in each
+polygon has a negative - 1 index to indicate the end of the polygon.
+
+For Example:
+	A quad and a tri with the vertex index:
+		[0, 1, 2, 3] [1, 4, 2]
+	is stored:
+		[0, 1, 2, -4, 1, 4, -3]
+
+The face stack stores values per face. the length of the face stack has to match the number of
+negative values in the index layer in the corner stack. The face stack can be used to store things
+like material index.
+
+Storage:  
+-------  
+All data is stored in little endian byte order with no padding. The layout mirrors the structs
+defined below with a few exceptions. All names are stored as a 8-bit unsigned integer indicating
+the length of the name followed by that many characters. Termination is not stored in the file.
+Text strings stored in meta data are stored the same way as names, but instead of a 8-bit unsigned
+integer a 32-bit unsigned integer is used.
+*/
+package encoding_hxa

+ 5 - 4
core/encoding/uuid/doc.odin

@@ -21,8 +21,9 @@ cryptographically-secure, per RFC 9562's suggestion.
 - Version 6 without either a clock or node argument.
 - Version 7 in all cases.
 
-Here's an example of how to set up one:
-	
+Example:	
+	package main
+
 	import "core:crypto"
 	import "core:encoding/uuid"
 
@@ -40,7 +41,7 @@ Here's an example of how to set up one:
 
 
 For more information on the specifications, see here:
-- https://www.rfc-editor.org/rfc/rfc4122.html
-- https://www.rfc-editor.org/rfc/rfc9562.html
+- [[ https://www.rfc-editor.org/rfc/rfc4122.html ]]
+- [[ https://www.rfc-editor.org/rfc/rfc9562.html ]]
 */
 package uuid

+ 6 - 7
core/encoding/varint/doc.odin

@@ -1,10 +1,11 @@
 /*
-	Implementation of the LEB128 variable integer encoding as used by DWARF encoding and DEX files, among others.
+Implementation of the LEB128 variable integer encoding as used by DWARF encoding and DEX files, among others.
 
-	Author of this Odin package: Jeroen van Rijn
+Author of this Odin package: Jeroen van Rijn
+
+Example:
+	package main
 
-	Example:
-	```odin
 	import "core:encoding/varint"
 	import "core:fmt"
 
@@ -22,7 +23,5 @@
 		assert(decoded_val == value && decode_size == encode_size && decode_err == .None)
 		fmt.printf("Decoded as %v, using %v byte%v\n", decoded_val, decode_size, "" if decode_size == 1 else "s")
 	}
-	```
-
 */
-package encoding_varint
+package encoding_varint

+ 1 - 3
core/encoding/varint/leb128.odin

@@ -6,8 +6,6 @@
 		Jeroen van Rijn: Initial implementation.
 */
 
-// package varint implements variable length integer encoding and decoding using
-// the LEB128 format as used by DWARF debug info, Android .dex and other file formats.
 package encoding_varint
 
 // In theory we should use the bigint package. In practice, varints bigger than this indicate a corrupted file.
@@ -160,4 +158,4 @@ encode_ileb128 :: proc(buf: []u8, val: i128) -> (size: int, err: Error) {
 		buf[size - 1] = u8(low)
 	}
 	return
-}
+}

+ 23 - 0
core/encoding/xml/doc.odin

@@ -0,0 +1,23 @@
+/*
+XML 1.0 / 1.1 parser
+
+A from-scratch XML implementation, loosely modelled on the [[ spec; https://www.w3.org/TR/2006/REC-xml11-20060816 ]].
+
+Features:
+- Supports enough of the XML 1.0/1.1 spec to handle the 99.9% of XML documents in common current usage.
+- Simple to understand and use. Small.
+
+Caveats:
+- We do NOT support HTML in this package, as that may or may not be valid XML.
+  If it works, great. If it doesn't, that's not considered a bug.
+
+- We do NOT support UTF-16. If you have a UTF-16 XML file, please convert it to UTF-8 first. Also, our condolences.
+- <[!ELEMENT and <[!ATTLIST are not supported, and will be either ignored or return an error depending on the parser options.
+
+MAYBE:
+- XML writer?
+- Serialize/deserialize Odin types?
+
+For a full example, see: [[ core/encoding/xml/example; https://github.com/odin-lang/Odin/tree/master/core/encoding/xml/example ]]
+*/
+package encoding_xml

+ 5 - 23
core/encoding/xml/xml_reader.odin

@@ -1,29 +1,11 @@
 /*
- XML 1.0 / 1.1 parser
+	2021-2022 Jeroen van Rijn <[email protected]>.
+	available under Odin's BSD-3 license.
 
- 2021-2022 Jeroen van Rijn <[email protected]>.
- available under Odin's BSD-3 license.
-
- from-scratch XML implementation, loosely modelled on the [spec](https://www.w3.org/TR/2006/REC-xml11-20060816).
-
-Features:
-- Supports enough of the XML 1.0/1.1 spec to handle the 99.9% of XML documents in common current usage.
-- Simple to understand and use. Small.
-
-Caveats:
-- We do NOT support HTML in this package, as that may or may not be valid XML.
-  If it works, great. If it doesn't, that's not considered a bug.
-
-- We do NOT support UTF-16. If you have a UTF-16 XML file, please convert it to UTF-8 first. Also, our condolences.
-- <[!ELEMENT and <[!ATTLIST are not supported, and will be either ignored or return an error depending on the parser options.
-
-MAYBE:
-- XML writer?
-- Serialize/deserialize Odin types?
-
-List of contributors:
-- Jeroen van Rijn: Initial implementation.
+	List of contributors:
+	- Jeroen van Rijn: Initial implementation.
 */
+
 package encoding_xml
 // An XML 1.0 / 1.1 parser
 

+ 15 - 25
core/flags/doc.odin

@@ -11,15 +11,13 @@ Command-Line Syntax:
 Arguments are treated differently depending on how they're formatted.
 The format is similar to the Odin binary's way of handling compiler flags.
 
-```
-type                  handling
-------------          ------------------------
-<positional>          depends on struct layout
--<flag>               set a bool true
--<flag:option>        set flag to option
--<flag=option>        set flag to option, alternative syntax
--<map>:<key>=<value>  set map[key] to value
-```
+	type                  handling
+	------------          ------------------------
+	<positional>          depends on struct layout
+	-<flag>               set a bool true
+	-<flag:option>        set flag to option
+	-<flag=option>        set flag to option, alternative syntax
+	-<map>:<key>=<value>  set map[key] to value
 
 
 Struct Tags:
@@ -40,11 +38,9 @@ Under the `args` tag, there are the following subtags:
 - `indistinct`: allow the setting of distinct types by their base type.
 
 `required` may be given a range specifier in the following formats:
-```
-min
-<max
-min<max
-```
+	min
+	<max
+	min<max
 
 `max` is not inclusive in this range, as noted by the less-than `<` sign, so if
 you want to require 3 and only 3 arguments in a dynamic array, you would
@@ -161,21 +157,15 @@ UNIX-style:
 This package also supports parsing arguments in a limited flavor of UNIX.
 Odin and UNIX style are mutually exclusive, and which one to be used is chosen
 at parse time.
-
-```
---flag
---flag=argument
---flag argument
---flag argument repeating-argument
-```
+	--flag
+	--flag=argument
+	--flag argument
+	--flag argument repeating-argument
 
 `-flag` may also be substituted for `--flag`.
 
 Do note that map flags are not currently supported in this parsing style.
 
-
-Example:
-
-A complete example is given in the `example` subdirectory.
+For a complete example, see: [[ core/flags/example; https://github.com/odin-lang/Odin/blob/master/core/flags/example/example.odin ]].
 */
 package flags

+ 2 - 1
core/hash/xxhash/common.odin

@@ -1,5 +1,4 @@
 /*
-	An implementation of Yann Collet's [xxhash Fast Hash Algorithm](https://cyan4973.github.io/xxHash/).
 	Copyright 2021 Jeroen van Rijn <[email protected]>.
 
 	Made available under Odin's BSD-3 license, based on the original C code.
@@ -7,6 +6,8 @@
 	List of contributors:
 		Jeroen van Rijn: Initial implementation.
 */
+
+// An implementation of Yann Collet's [[ xxhash Fast Hash Algorithm; https://cyan4973.github.io/xxHash/ ]].
 package xxhash
 
 import "base:intrinsics"

+ 2 - 1
core/hash/xxhash/streaming.odin

@@ -7,6 +7,7 @@
 	List of contributors:
 		Jeroen van Rijn: Initial implementation.
 */
+
 package xxhash
 
 import "core:mem"
@@ -371,4 +372,4 @@ XXH3_generate_secret :: proc(secret_buffer: []u8, custom_seed: []u8) {
 			mem_copy(&secret_buffer[segment_start], &segment, size_of(segment))
 		}
 	}
-}
+}

+ 1 - 0
core/hash/xxhash/xxhash_3.odin

@@ -7,6 +7,7 @@
 	List of contributors:
 		Jeroen van Rijn: Initial implementation.
 */
+
 package xxhash
 
 import "base:intrinsics"

+ 1 - 0
core/hash/xxhash/xxhash_32.odin

@@ -7,6 +7,7 @@
 	List of contributors:
 		Jeroen van Rijn: Initial implementation.
 */
+
 package xxhash
 
 import "base:intrinsics"

+ 1 - 0
core/hash/xxhash/xxhash_64.odin

@@ -7,6 +7,7 @@
 	List of contributors:
 		Jeroen van Rijn: Initial implementation.
 */
+
 package xxhash
 
 import "base:intrinsics"

+ 348 - 0
core/image/png/doc.odin

@@ -0,0 +1,348 @@
+/*
+package png implements a PNG image reader
+
+The PNG specification is at [[ https://www.w3.org/TR/PNG/ ]].
+
+Example:
+	package main
+
+	import "core:image"
+	// import "core:image/png"
+	import "core:bytes"
+	import "core:fmt"
+
+	// For PPM writer
+	import "core:mem"
+	import "core:os"
+
+	main :: proc() {
+		track := mem.Tracking_Allocator{}
+		mem.tracking_allocator_init(&track, context.allocator)
+
+		context.allocator = mem.tracking_allocator(&track)
+
+		demo()
+
+		if len(track.allocation_map) > 0 {
+			fmt.println("Leaks:")
+			for _, v in track.allocation_map {
+				fmt.printf("\t%v\n\n", v)
+			}
+		}
+	}
+
+	demo :: proc() {
+		file: string
+
+		options := image.Options{.return_metadata}
+		err:       image.Error
+		img:      ^image.Image
+
+		file = "../../../misc/logo-slim.png"
+
+		img, err = load(file, options)
+		defer destroy(img)
+
+		if err != nil {
+			fmt.printf("Trying to read PNG file %v returned %v\n", file, err)
+		} else {
+			fmt.printf("Image: %vx%vx%v, %v-bit.\n", img.width, img.height, img.channels, img.depth)
+
+			if v, ok := img.metadata.(^image.PNG_Info); ok {
+				// Handle ancillary chunks as you wish.
+				// We provide helper functions for a few types.
+				for c in v.chunks {
+					#partial switch c.header.type {
+					case .tIME:
+						if t, t_ok := core_time(c); t_ok {
+							fmt.printf("[tIME]: %v\n", t)
+						}
+					case .gAMA:
+						if gama, gama_ok := gamma(c); gama_ok {
+							fmt.printf("[gAMA]: %v\n", gama)
+						}
+					case .pHYs:
+						if phys, phys_ok := phys(c); phys_ok {
+							if phys.unit == .Meter {
+								xm    := f32(img.width)  / f32(phys.ppu_x)
+								ym    := f32(img.height) / f32(phys.ppu_y)
+								dpi_x, dpi_y := phys_to_dpi(phys)
+								fmt.printf("[pHYs] Image resolution is %v x %v pixels per meter.\n", phys.ppu_x, phys.ppu_y)
+								fmt.printf("[pHYs] Image resolution is %v x %v DPI.\n", dpi_x, dpi_y)
+								fmt.printf("[pHYs] Image dimensions are %v x %v meters.\n", xm, ym)
+							} else {
+								fmt.printf("[pHYs] x: %v, y: %v pixels per unknown unit.\n", phys.ppu_x, phys.ppu_y)
+							}
+						}
+					case .iTXt, .zTXt, .tEXt:
+						res, ok_text := text(c)
+						if ok_text {
+							if c.header.type == .iTXt {
+								fmt.printf("[iTXt] %v (%v:%v): %v\n", res.keyword, res.language, res.keyword_localized, res.text)
+							} else {
+								fmt.printf("[tEXt/zTXt] %v: %v\n", res.keyword, res.text)
+							}
+						}
+						defer text_destroy(res)
+					case .bKGD:
+						fmt.printf("[bKGD] %v\n", img.background)
+					case .eXIf:
+						if res, ok_exif := exif(c); ok_exif {
+							/*
+								Other than checking the signature and byte order, we don't handle Exif data.
+								If you wish to interpret it, pass it to an Exif parser.
+							*/
+							fmt.printf("[eXIf] %v\n", res)
+						}
+					case .PLTE:
+						if plte, plte_ok := plte(c); plte_ok {
+							fmt.printf("[PLTE] %v\n", plte)
+						} else {
+							fmt.printf("[PLTE] Error\n")
+						}
+					case .hIST:
+						if res, ok_hist := hist(c); ok_hist {
+							fmt.printf("[hIST] %v\n", res)
+						}
+					case .cHRM:
+						if res, ok_chrm := chrm(c); ok_chrm {
+							fmt.printf("[cHRM] %v\n", res)
+						}
+					case .sPLT:
+						res, ok_splt := splt(c)
+						if ok_splt {
+							fmt.printf("[sPLT] %v\n", res)
+						}
+						splt_destroy(res)
+					case .sBIT:
+						if res, ok_sbit := sbit(c); ok_sbit {
+							fmt.printf("[sBIT] %v\n", res)
+						}
+					case .iCCP:
+						res, ok_iccp := iccp(c)
+						if ok_iccp {
+							fmt.printf("[iCCP] %v\n", res)
+						}
+						iccp_destroy(res)
+					case .sRGB:
+						if res, ok_srgb := srgb(c); ok_srgb {
+							fmt.printf("[sRGB] Rendering intent: %v\n", res)
+						}
+					case:
+						type := c.header.type
+						name := chunk_type_to_name(&type)
+						fmt.printf("[%v]: %v\n", name, c.data)
+					}
+				}
+			}
+		}
+
+		fmt.printf("Done parsing metadata.\n")
+
+		if err == nil && .do_not_decompress_image not_in options && .info not_in options {
+			if ok := write_image_as_ppm("out.ppm", img); ok {
+				fmt.println("Saved decoded image.")
+			} else {
+				fmt.println("Error saving out.ppm.")
+				fmt.println(img)
+			}
+		}
+	}
+
+	// Crappy PPM writer used during testing. Don't use in production.
+	write_image_as_ppm :: proc(filename: string, image: ^image.Image) -> (success: bool) {
+
+		_bg :: proc(bg: Maybe([3]u16), x, y: int, high := true) -> (res: [3]u16) {
+			if v, ok := bg.?; ok {
+				res = v
+			} else {
+				if high {
+					l := u16(30 * 256 + 30)
+
+					if (x & 4 == 0) ~ (y & 4 == 0) {
+						res = [3]u16{l, 0, l}
+					} else {
+						res = [3]u16{l >> 1, 0, l >> 1}
+					}
+				} else {
+					if (x & 4 == 0) ~ (y & 4 == 0) {
+						res = [3]u16{30, 30, 30}
+					} else {
+						res = [3]u16{15, 15, 15}
+					}
+				}
+			}
+			return
+		}
+
+		// profiler.timed_proc();
+		using image
+		using os
+
+		flags: int = O_WRONLY|O_CREATE|O_TRUNC
+
+		img := image
+
+		// PBM 16-bit images are big endian
+		when ODIN_ENDIAN == .Little {
+			if img.depth == 16 {
+				// The pixel components are in Big Endian. Let's byteswap back.
+				input  := mem.slice_data_cast([]u16,   img.pixels.buf[:])
+				output := mem.slice_data_cast([]u16be, img.pixels.buf[:])
+				#no_bounds_check for v, i in input {
+					output[i] = u16be(v)
+				}
+			}
+		}
+
+		pix := bytes.buffer_to_bytes(&img.pixels)
+
+		if len(pix) == 0 || len(pix) < image.width * image.height * int(image.channels) {
+			return false
+		}
+
+		mode: int = 0
+		when ODIN_OS == .Linux || ODIN_OS == .Darwin {
+			// NOTE(justasd): 644 (owner read, write; group read; others read)
+			mode = S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH
+		}
+
+		fd, err := open(filename, flags, mode)
+		if err != nil {
+			return false
+		}
+		defer close(fd)
+
+		write_string(fd,
+			fmt.tprintf("P6\n%v %v\n%v\n", width, height, uint(1 << uint(depth) - 1)),
+		)
+
+		if channels == 3 {
+			// We don't handle transparency here...
+			write_ptr(fd, raw_data(pix), len(pix))
+		} else {
+			bpp := depth == 16 ? 2 : 1
+			bytes_needed := width * height * 3 * bpp
+
+			op := bytes.Buffer{}
+			bytes.buffer_init_allocator(&op, bytes_needed, bytes_needed)
+			defer bytes.buffer_destroy(&op)
+
+			if channels == 1 {
+				if depth == 16 {
+					assert(len(pix) == width * height * 2)
+					p16 := mem.slice_data_cast([]u16, pix)
+					o16 := mem.slice_data_cast([]u16, op.buf[:])
+					#no_bounds_check for len(p16) != 0 {
+						r := u16(p16[0])
+						o16[0] = r
+						o16[1] = r
+						o16[2] = r
+						p16 = p16[1:]
+						o16 = o16[3:]
+					}
+				} else {
+					o := 0
+					for i := 0; i < len(pix); i += 1 {
+						r := pix[i]
+						op.buf[o  ] = r
+						op.buf[o+1] = r
+						op.buf[o+2] = r
+						o += 3
+					}
+				}
+				write_ptr(fd, raw_data(op.buf), len(op.buf))
+			} else if channels == 2 {
+				if depth == 16 {
+					p16 := mem.slice_data_cast([]u16, pix)
+					o16 := mem.slice_data_cast([]u16, op.buf[:])
+
+					bgcol := img.background
+
+					#no_bounds_check for len(p16) != 0 {
+						r  := f64(u16(p16[0]))
+						bg:   f64
+						if bgcol != nil {
+							v := bgcol.([3]u16)[0]
+							bg = f64(v)
+						}
+						a  := f64(u16(p16[1])) / 65535.0
+						l  := (a * r) + (1 - a) * bg
+
+						o16[0] = u16(l)
+						o16[1] = u16(l)
+						o16[2] = u16(l)
+
+						p16 = p16[2:]
+						o16 = o16[3:]
+					}
+				} else {
+					o := 0
+					for i := 0; i < len(pix); i += 2 {
+						r := pix[i]; a := pix[i+1]; a1 := f32(a) / 255.0
+						c := u8(f32(r) * a1)
+						op.buf[o  ] = c
+						op.buf[o+1] = c
+						op.buf[o+2] = c
+						o += 3
+					}
+				}
+				write_ptr(fd, raw_data(op.buf), len(op.buf))
+			} else if channels == 4 {
+				if depth == 16 {
+					p16 := mem.slice_data_cast([]u16be, pix)
+					o16 := mem.slice_data_cast([]u16be, op.buf[:])
+
+					#no_bounds_check for len(p16) != 0 {
+
+						bg := _bg(img.background, 0, 0)
+						r     := f32(p16[0])
+						g     := f32(p16[1])
+						b     := f32(p16[2])
+						a     := f32(p16[3]) / 65535.0
+
+						lr  := (a * r) + (1 - a) * f32(bg[0])
+						lg  := (a * g) + (1 - a) * f32(bg[1])
+						lb  := (a * b) + (1 - a) * f32(bg[2])
+
+						o16[0] = u16be(lr)
+						o16[1] = u16be(lg)
+						o16[2] = u16be(lb)
+
+						p16 = p16[4:]
+						o16 = o16[3:]
+					}
+				} else {
+					o := 0
+
+					for i := 0; i < len(pix); i += 4 {
+
+						x := (i / 4)  % width
+						y := i / width / 4
+
+						_b := _bg(img.background, x, y, false)
+						bgcol := [3]u8{u8(_b[0]), u8(_b[1]), u8(_b[2])}
+
+						r := f32(pix[i])
+						g := f32(pix[i+1])
+						b := f32(pix[i+2])
+						a := f32(pix[i+3]) / 255.0
+
+						lr := u8(f32(r) * a + (1 - a) * f32(bgcol[0]))
+						lg := u8(f32(g) * a + (1 - a) * f32(bgcol[1]))
+						lb := u8(f32(b) * a + (1 - a) * f32(bgcol[2]))
+						op.buf[o  ] = lr
+						op.buf[o+1] = lg
+						op.buf[o+2] = lb
+						o += 3
+					}
+				}
+				write_ptr(fd, raw_data(op.buf), len(op.buf))
+			} else {
+				return false
+			}
+		}
+		return true
+	}
+*/
+package png

+ 0 - 351
core/image/png/example.odin

@@ -1,351 +0,0 @@
-/*
-	Copyright 2021 Jeroen van Rijn <[email protected]>.
-	Made available under Odin's BSD-3 license.
-
-	List of contributors:
-		Jeroen van Rijn: Initial implementation.
-		Ginger Bill:     Cosmetic changes.
-
-	An example of how to use `load`.
-*/
-//+build ignore
-package png
-
-import "core:image"
-// import "core:image/png"
-import "core:bytes"
-import "core:fmt"
-
-// For PPM writer
-import "core:mem"
-import "core:os"
-
-main :: proc() {
-	track := mem.Tracking_Allocator{}
-	mem.tracking_allocator_init(&track, context.allocator)
-
-	context.allocator = mem.tracking_allocator(&track)
-
-	demo()
-
-	if len(track.allocation_map) > 0 {
-		fmt.println("Leaks:")
-		for _, v in track.allocation_map {
-			fmt.printf("\t%v\n\n", v)
-		}
-	}
-}
-
-demo :: proc() {
-	file: string
-
-	options := image.Options{.return_metadata}
-	err:       image.Error
-	img:      ^image.Image
-
-	file = "../../../misc/logo-slim.png"
-
-	img, err = load(file, options)
-	defer destroy(img)
-
-	if err != nil {
-		fmt.printf("Trying to read PNG file %v returned %v\n", file, err)
-	} else {
-		fmt.printf("Image: %vx%vx%v, %v-bit.\n", img.width, img.height, img.channels, img.depth)
-
-		if v, ok := img.metadata.(^image.PNG_Info); ok {
-			// Handle ancillary chunks as you wish.
-			// We provide helper functions for a few types.
-			for c in v.chunks {
-				#partial switch c.header.type {
-				case .tIME:
-					if t, t_ok := core_time(c); t_ok {
-						fmt.printf("[tIME]: %v\n", t)
-					}
-				case .gAMA:
-					if gama, gama_ok := gamma(c); gama_ok {
-						fmt.printf("[gAMA]: %v\n", gama)
-					}
-				case .pHYs:
-					if phys, phys_ok := phys(c); phys_ok {
-						if phys.unit == .Meter {
-							xm    := f32(img.width)  / f32(phys.ppu_x)
-							ym    := f32(img.height) / f32(phys.ppu_y)
-							dpi_x, dpi_y := phys_to_dpi(phys)
-							fmt.printf("[pHYs] Image resolution is %v x %v pixels per meter.\n", phys.ppu_x, phys.ppu_y)
-							fmt.printf("[pHYs] Image resolution is %v x %v DPI.\n", dpi_x, dpi_y)
-							fmt.printf("[pHYs] Image dimensions are %v x %v meters.\n", xm, ym)
-						} else {
-							fmt.printf("[pHYs] x: %v, y: %v pixels per unknown unit.\n", phys.ppu_x, phys.ppu_y)
-						}
-					}
-				case .iTXt, .zTXt, .tEXt:
-					res, ok_text := text(c)
-					if ok_text {
-						if c.header.type == .iTXt {
-							fmt.printf("[iTXt] %v (%v:%v): %v\n", res.keyword, res.language, res.keyword_localized, res.text)
-						} else {
-							fmt.printf("[tEXt/zTXt] %v: %v\n", res.keyword, res.text)
-						}
-					}
-					defer text_destroy(res)
-				case .bKGD:
-					fmt.printf("[bKGD] %v\n", img.background)
-				case .eXIf:
-					if res, ok_exif := exif(c); ok_exif {
-						/*
-							Other than checking the signature and byte order, we don't handle Exif data.
-							If you wish to interpret it, pass it to an Exif parser.
-						*/
-						fmt.printf("[eXIf] %v\n", res)
-					}
-				case .PLTE:
-					if plte, plte_ok := plte(c); plte_ok {
-						fmt.printf("[PLTE] %v\n", plte)
-					} else {
-						fmt.printf("[PLTE] Error\n")
-					}
-				case .hIST:
-					if res, ok_hist := hist(c); ok_hist {
-						fmt.printf("[hIST] %v\n", res)
-					}
-				case .cHRM:
-					if res, ok_chrm := chrm(c); ok_chrm {
-						fmt.printf("[cHRM] %v\n", res)
-					}
-				case .sPLT:
-					res, ok_splt := splt(c)
-					if ok_splt {
-						fmt.printf("[sPLT] %v\n", res)
-					}
-					splt_destroy(res)
-				case .sBIT:
-					if res, ok_sbit := sbit(c); ok_sbit {
-						fmt.printf("[sBIT] %v\n", res)
-					}
-				case .iCCP:
-					res, ok_iccp := iccp(c)
-					if ok_iccp {
-						fmt.printf("[iCCP] %v\n", res)
-					}
-					iccp_destroy(res)
-				case .sRGB:
-					if res, ok_srgb := srgb(c); ok_srgb {
-						fmt.printf("[sRGB] Rendering intent: %v\n", res)
-					}
-				case:
-					type := c.header.type
-					name := chunk_type_to_name(&type)
-					fmt.printf("[%v]: %v\n", name, c.data)
-				}
-			}
-		}
-	}
-
-	fmt.printf("Done parsing metadata.\n")
-
-	if err == nil && .do_not_decompress_image not_in options && .info not_in options {
-		if ok := write_image_as_ppm("out.ppm", img); ok {
-			fmt.println("Saved decoded image.")
-		} else {
-			fmt.println("Error saving out.ppm.")
-			fmt.println(img)
-		}
-	}
-}
-
-// Crappy PPM writer used during testing. Don't use in production.
-write_image_as_ppm :: proc(filename: string, image: ^image.Image) -> (success: bool) {
-
-	_bg :: proc(bg: Maybe([3]u16), x, y: int, high := true) -> (res: [3]u16) {
-		if v, ok := bg.?; ok {
-			res = v
-		} else {
-			if high {
-				l := u16(30 * 256 + 30)
-
-				if (x & 4 == 0) ~ (y & 4 == 0) {
-					res = [3]u16{l, 0, l}
-				} else {
-					res = [3]u16{l >> 1, 0, l >> 1}
-				}
-			} else {
-				if (x & 4 == 0) ~ (y & 4 == 0) {
-					res = [3]u16{30, 30, 30}
-				} else {
-					res = [3]u16{15, 15, 15}
-				}
-			}
-		}
-		return
-	}
-
-	// profiler.timed_proc();
-	using image
-	using os
-
-	flags: int = O_WRONLY|O_CREATE|O_TRUNC
-
-	img := image
-
-	// PBM 16-bit images are big endian
-	when ODIN_ENDIAN == .Little {
-		if img.depth == 16 {
-			// The pixel components are in Big Endian. Let's byteswap back.
-			input  := mem.slice_data_cast([]u16,   img.pixels.buf[:])
-			output := mem.slice_data_cast([]u16be, img.pixels.buf[:])
-			#no_bounds_check for v, i in input {
-				output[i] = u16be(v)
-			}
-		}
-	}
-
-	pix := bytes.buffer_to_bytes(&img.pixels)
-
-	if len(pix) == 0 || len(pix) < image.width * image.height * int(image.channels) {
-		return false
-	}
-
-	mode: int = 0
-	when ODIN_OS == .Linux || ODIN_OS == .Darwin {
-		// NOTE(justasd): 644 (owner read, write; group read; others read)
-		mode = S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH
-	}
-
-	fd, err := open(filename, flags, mode)
-	if err != nil {
-		return false
-	}
-	defer close(fd)
-
-	write_string(fd,
-		fmt.tprintf("P6\n%v %v\n%v\n", width, height, uint(1 << uint(depth) - 1)),
-	)
-
-	if channels == 3 {
-		// We don't handle transparency here...
-		write_ptr(fd, raw_data(pix), len(pix))
-	} else {
-		bpp := depth == 16 ? 2 : 1
-		bytes_needed := width * height * 3 * bpp
-
-		op := bytes.Buffer{}
-		bytes.buffer_init_allocator(&op, bytes_needed, bytes_needed)
-		defer bytes.buffer_destroy(&op)
-
-		if channels == 1 {
-			if depth == 16 {
-				assert(len(pix) == width * height * 2)
-				p16 := mem.slice_data_cast([]u16, pix)
-				o16 := mem.slice_data_cast([]u16, op.buf[:])
-				#no_bounds_check for len(p16) != 0 {
-					r := u16(p16[0])
-					o16[0] = r
-					o16[1] = r
-					o16[2] = r
-					p16 = p16[1:]
-					o16 = o16[3:]
-				}
-			} else {
-				o := 0
-				for i := 0; i < len(pix); i += 1 {
-					r := pix[i]
-					op.buf[o  ] = r
-					op.buf[o+1] = r
-					op.buf[o+2] = r
-					o += 3
-				}
-			}
-			write_ptr(fd, raw_data(op.buf), len(op.buf))
-		} else if channels == 2 {
-			if depth == 16 {
-				p16 := mem.slice_data_cast([]u16, pix)
-				o16 := mem.slice_data_cast([]u16, op.buf[:])
-
-				bgcol := img.background
-
-				#no_bounds_check for len(p16) != 0 {
-					r  := f64(u16(p16[0]))
-					bg:   f64
-					if bgcol != nil {
-						v := bgcol.([3]u16)[0]
-						bg = f64(v)
-					}
-					a  := f64(u16(p16[1])) / 65535.0
-					l  := (a * r) + (1 - a) * bg
-
-					o16[0] = u16(l)
-					o16[1] = u16(l)
-					o16[2] = u16(l)
-
-					p16 = p16[2:]
-					o16 = o16[3:]
-				}
-			} else {
-				o := 0
-				for i := 0; i < len(pix); i += 2 {
-					r := pix[i]; a := pix[i+1]; a1 := f32(a) / 255.0
-					c := u8(f32(r) * a1)
-					op.buf[o  ] = c
-					op.buf[o+1] = c
-					op.buf[o+2] = c
-					o += 3
-				}
-			}
-			write_ptr(fd, raw_data(op.buf), len(op.buf))
-		} else if channels == 4 {
-			if depth == 16 {
-				p16 := mem.slice_data_cast([]u16be, pix)
-				o16 := mem.slice_data_cast([]u16be, op.buf[:])
-
-				#no_bounds_check for len(p16) != 0 {
-
-					bg := _bg(img.background, 0, 0)
-					r     := f32(p16[0])
-					g     := f32(p16[1])
-					b     := f32(p16[2])
-					a     := f32(p16[3]) / 65535.0
-
-					lr  := (a * r) + (1 - a) * f32(bg[0])
-					lg  := (a * g) + (1 - a) * f32(bg[1])
-					lb  := (a * b) + (1 - a) * f32(bg[2])
-
-					o16[0] = u16be(lr)
-					o16[1] = u16be(lg)
-					o16[2] = u16be(lb)
-
-					p16 = p16[4:]
-					o16 = o16[3:]
-				}
-			} else {
-				o := 0
-
-				for i := 0; i < len(pix); i += 4 {
-
-					x := (i / 4)  % width
-					y := i / width / 4
-
-					_b := _bg(img.background, x, y, false)
-					bgcol := [3]u8{u8(_b[0]), u8(_b[1]), u8(_b[2])}
-
-					r := f32(pix[i])
-					g := f32(pix[i+1])
-					b := f32(pix[i+2])
-					a := f32(pix[i+3]) / 255.0
-
-					lr := u8(f32(r) * a + (1 - a) * f32(bgcol[0]))
-					lg := u8(f32(g) * a + (1 - a) * f32(bgcol[1]))
-					lb := u8(f32(b) * a + (1 - a) * f32(bgcol[2]))
-					op.buf[o  ] = lr
-					op.buf[o+1] = lg
-					op.buf[o+2] = lb
-					o += 3
-				}
-			}
-			write_ptr(fd, raw_data(op.buf), len(op.buf))
-		} else {
-			return false
-		}
-	}
-	return true
-}

+ 1 - 0
core/image/png/helpers.odin

@@ -8,6 +8,7 @@
 
 	These are a few useful utility functions to work with PNG images.
 */
+
 package png
 
 import "core:image"

+ 1 - 4
core/image/png/png.odin

@@ -8,9 +8,6 @@
 */
 
 
-// package png implements a PNG image reader
-//
-// The PNG specification is at https://www.w3.org/TR/PNG/.
 //+vet !using-stmt
 package png
 
@@ -1619,4 +1616,4 @@ defilter :: proc(img: ^Image, filter_bytes: ^bytes.Buffer, header: ^image.PNG_IH
 @(init, private)
 _register :: proc() {
 	image.register(.PNG, load_from_bytes, destroy)
-}
+}

+ 1 - 1
core/image/qoi/qoi.odin

@@ -9,7 +9,7 @@
 
 // package qoi implements a QOI image reader
 //
-// The QOI specification is at https://qoiformat.org.
+// The QOI specification is at [[ https://qoiformat.org ]].
 package qoi
 
 import "core:image"

+ 3 - 3
core/math/noise/opensimplex2.odin

@@ -1,8 +1,8 @@
 /*
 	OpenSimplex2 noise implementation.
 
-	Ported from https://github.com/KdotJPG/OpenSimplex2.
-	Copyright 2022 Yuki2 (https://github.com/NoahR02)
+	Ported from [[ https://github.com/KdotJPG/OpenSimplex2 }].
+	Copyright 2022 Yuki2 [[ https://github.com/NoahR02 ]]
 */
 package math_noise
 
@@ -177,4 +177,4 @@ noise_4d_fallback :: proc(seed: i64, coord: Vec4) -> (value: f32) {
 	// Get points for A4 lattice
 	skew := f64(SKEW_4D) * (coord.x + coord.y + coord.z + coord.w)
 	return _internal_noise_4d_unskewed_base(seed, coord + skew)
-}
+}

+ 30 - 32
core/net/doc.odin

@@ -13,36 +13,34 @@
 */
 
 /*
-	Package net implements cross-platform Berkeley Sockets, DNS resolution and associated procedures.
-	For other protocols and their features, see subdirectories of this package.
-
-	Features:
-		- Supports Windows, Linux and OSX.
-		- Opening and closing of TCP and UDP sockets.
-		- Sending to and receiving from these sockets.
-		- DNS name lookup, using either the OS or our own resolver.
-
-	Planned:
-		- Nonblocking IO
-		- `Connection` struct
-			A "fat socket" struct that remembers how you opened it, etc, instead of just being a handle.
-		- IP Range structs, CIDR/class ranges, netmask calculator and associated helper procedures.
-		- Use `context.temp_allocator` instead of stack-based arenas?
-			And check it's the default temp allocator or can give us 4 MiB worth of memory
-			without punting to the main allocator by comparing their addresses in an @(init) procedure.
-			Panic if this assumption is not met.
-
-		- Document assumptions about libc usage (or avoidance thereof) for each platform.
-
-	Assumptions:
-		- For performance reasons this package relies on the `context.temp_allocator` in some places.
-
-		  You can replace the default `context.temp_allocator` with your own as long as it meets
-		  this requirement: A minimum of 4 MiB of scratch space that's expected not to be freed.
-
-		  If this expectation is not met, the package's @(init) procedure will attempt to detect
-		  this and panic to avoid temp allocations prematurely overwriting data and garbling results,
-		  or worse. This means that should you replace the temp allocator with an insufficient one,
-		  we'll do our best to loudly complain the first time you try it.
+Package net implements cross-platform Berkeley Sockets, DNS resolution and associated procedures.
+For other protocols and their features, see subdirectories of this package.
+
+Features:
+- Supports Windows, Linux and OSX.
+- Opening and closing of TCP and UDP sockets.
+- Sending to and receiving from these sockets.
+- DNS name lookup, using either the OS or our own resolver.
+
+Planned:
+- Nonblocking IO
+- `Connection` struct; A "fat socket" struct that remembers how you opened it, etc, instead of just being a handle.
+- IP Range structs, CIDR/class ranges, netmask calculator and associated helper procedures.
+- Use `context.temp_allocator` instead of stack-based arenas?  
+And check it's the default temp allocator or can give us 4 MiB worth of memory
+without punting to the main allocator by comparing their addresses in an @(init) procedure.
+Panic if this assumption is not met.
+- Document assumptions about libc usage (or avoidance thereof) for each platform.
+
+Assumptions:
+For performance reasons this package relies on the `context.temp_allocator` in some places.  
+
+You can replace the default `context.temp_allocator` with your own as long as it meets
+this requirement: A minimum of 4 MiB of scratch space that's expected not to be freed.
+
+If this expectation is not met, the package's @(init) procedure will attempt to detect
+this and panic to avoid temp allocations prematurely overwriting data and garbling results,
+or worse. This means that should you replace the temp allocator with an insufficient one,
+we'll do our best to loudly complain the first time you try it.
 */
-package net
+package net

+ 3 - 0
core/prof/spall/doc.odin

@@ -1,4 +1,7 @@
 /*
+Example:
+	package main
+
 	import "base:runtime"
 	import "core:prof/spall"
 	import "core:sync"

+ 3 - 3
core/sync/doc.odin

@@ -7,8 +7,8 @@ synchronize threads' access to shared memory.
 To limit or control the threads' access to shared memory typically the
 following approaches are used:
 
-* Locks
-* Lock-free
+- Locks
+- Lock-free
 
 When using locks, sections of the code that access shared memory (also known as
 **critical sections**) are guarded by locks, allowing limited access to threads
@@ -18,4 +18,4 @@ In lock-free programming the data itself is organized in such a way that threads
 don't intervene much. It can be done via segmenting the data between threads,
 and/or by using atomic operations.
 */
-package sync
+package sync

+ 8 - 1
core/sys/info/doc.odin

@@ -2,6 +2,12 @@
 Copyright 2022 Jeroen van Rijn <[email protected]>.
 Made available under Odin's BSD-3 license.
 
+List of contributors:
+	Jeroen van Rijn: Initial implementation.
+	Laytan: ARM and RISC-V CPU feature detection.
+*/
+
+/*
 Package `core:sys/info` gathers system information on:
 Windows, Linux, macOS, FreeBSD & OpenBSD.
 
@@ -11,9 +17,10 @@ and CPU information.
 On Windows, GPUs will also be enumerated using the registry.
 
 CPU feature flags can be tested against `cpu_features`, where applicable, e.g.
-`if .aes in si.aes { ... }`
+`if .aes in info.cpu_features.? { ... }`
 
 Example:
+	package main
 
 	import "core:fmt"
 	import si "core:sys/info"

+ 1 - 0
core/sys/llvm/bit_manipulation.odin

@@ -1,4 +1,5 @@
 // Bit Manipulation Intrinsics
+
 package sys_llvm
 
 /*

+ 1 - 0
core/sys/llvm/code_generator.odin

@@ -1,4 +1,5 @@
 // Code Generator Intrinsics
+
 package sys_llvm
 
 @(default_calling_convention="none")

+ 1 - 0
core/sys/llvm/standard_c_library.odin

@@ -1,4 +1,5 @@
 // Standard C Library Intrinsics
+
 package sys_llvm
 
 @(default_calling_convention="none")

+ 1 - 1
core/sys/posix/unistd.odin

@@ -112,7 +112,7 @@ foreign lib {
 	Return configuration-defined string values.
 	Its use and purpose are similar to sysconf(), but it is used where string values rather than numeric values are returned.
 
-	Returns: 0 (setting errno) if `name` is invalid, need `buf` `len` if buf is `nil`, amount of bytes added to buf otherwise
+	Returns: 0 (setting errno) if `name` is invalid, need `buf` of `len` bytes if `buf` is `nil`, amount of bytes added to buf otherwise
 
 	[[ More; https://pubs.opengroup.org/onlinepubs/9699919799/functions/confstr.html ]]
 	*/

+ 4 - 5
core/text/edit/text_edit.odin

@@ -1,10 +1,9 @@
-package text_edit
-
 /*
-	Based off the articles by rxi:
-		* https://rxi.github.io/textbox_behaviour.html
-		* https://rxi.github.io/a_simple_undo_system.html
+Based off the articles by rxi:
+- [[ https://rxi.github.io/textbox_behaviour.html ]]
+- [[ https://rxi.github.io/a_simple_undo_system.html ]]
 */
+package text_edit
 
 import "base:runtime"
 import "core:time"

+ 10 - 19
core/text/table/doc.odin

@@ -1,8 +1,8 @@
 /*
 The package `table` implements plain-text/markdown/HTML/custom rendering of tables.
 
-**Custom rendering example:**
-
+**Custom rendering.**
+Example:
 	package main
 
 	import "core:io"
@@ -24,13 +24,12 @@ The package `table` implements plain-text/markdown/HTML/custom rendering of tabl
 		}
 	}
 
-This outputs:
-
+Output:
 	A_LONG_ENUM         = 54, // A comment about A_LONG_ENUM
 	AN_EVEN_LONGER_ENUM = 1,  // A comment about AN_EVEN_LONGER_ENUM
 
-**Plain-text rendering example:**
-
+**Plain-text rendering.**
+Example:
 	package main
 
 	import "core:fmt"
@@ -81,8 +80,7 @@ This outputs:
 		table.write_markdown_table(stdout, tbl)
 	}
 
-This outputs:
-
+Output:
 	+-----------------------------------------------+
 	|  This is a table caption and it is very long  |
 	+------------------+-----------------+----------+
@@ -93,16 +91,12 @@ This outputs:
 	|        a         | bbb             | c        |
 	+------------------+-----------------+----------+
 
-and
-
 	|    AAAAAAAAA     |        B        |    C     |
 	|:-----------------|:---------------:|---------:|
 	| 123              | foo             |          |
 	| 000000005        | 6.283185        |          |
 	| a                | bbb             | c        |
 
-respectively.
-
 
 Additionally, if you want to set the alignment and values in-line while
 constructing a table, you can use `aligned_row_of_values` or
@@ -116,8 +110,7 @@ constructing a table, you can use `aligned_row_of_values` or
 If you only need to build a table once but display it potentially many times,
 it may be more efficient to cache the results of your write into a string.
 
-Here's an example of how you can do that:
-
+Example:
 	package main
 
 	import "core:fmt"
@@ -191,8 +184,7 @@ This package makes use of the `grapheme_count` procedure from the
 implementation for counting graphemes and calculating visual width of a Unicode
 grapheme cluster in monospace cells.
 
-Here is a full example of how well-supported Unicode is with this package:
-
+Example:
 	package main
 
 	import "core:fmt"
@@ -237,7 +229,7 @@ Here is a full example of how well-supported Unicode is with this package:
 		scripts(stdout)
 	}
 
-This will print out:
+Output:
 
 	+----------------------------------------------------------------------------------------------------------------------------+
 	|                                                        Tést Suite                                                          |
@@ -271,8 +263,7 @@ If you'd prefer to change the borders used by the plain-text table printing,
 there is the `write_decorated_table` procedure that allows you to change the
 corners and dividers.
 
-Here is a complete example:
-
+Example:
 	package main
 
 	import "core:fmt"