浏览代码

Bump zeroidc dependencies (#1847)

openidconnect -> 2.5
base64 -> 0.21
url -> 2.3
bytes -> 1.3
Grant Limberg 2 年之前
父节点
当前提交
a59626c971
共有 100 个文件被更改,包括 13952 次插入2463 次删除
  1. 107 29
      zeroidc/Cargo.lock
  2. 4 4
      zeroidc/Cargo.toml
  3. 0 0
      zeroidc/vendor/base64-0.13.0/.cargo-checksum.json
  4. 826 0
      zeroidc/vendor/base64-0.13.0/Cargo.lock
  5. 43 0
      zeroidc/vendor/base64-0.13.0/Cargo.toml
  6. 201 0
      zeroidc/vendor/base64-0.13.0/LICENSE-APACHE
  7. 21 0
      zeroidc/vendor/base64-0.13.0/LICENSE-MIT
  8. 114 0
      zeroidc/vendor/base64-0.13.0/README.md
  9. 105 0
      zeroidc/vendor/base64-0.13.0/RELEASE-NOTES.md
  10. 210 0
      zeroidc/vendor/base64-0.13.0/benches/benchmarks.rs
  11. 89 0
      zeroidc/vendor/base64-0.13.0/examples/base64.rs
  12. 0 0
      zeroidc/vendor/base64-0.13.0/examples/make_tables.rs
  13. 34 0
      zeroidc/vendor/base64-0.13.0/icon_CLion.svg
  14. 247 0
      zeroidc/vendor/base64-0.13.0/src/chunked_encoder.rs
  15. 873 0
      zeroidc/vendor/base64-0.13.0/src/decode.rs
  16. 88 0
      zeroidc/vendor/base64-0.13.0/src/display.rs
  17. 675 0
      zeroidc/vendor/base64-0.13.0/src/encode.rs
  18. 245 0
      zeroidc/vendor/base64-0.13.0/src/lib.rs
  19. 282 0
      zeroidc/vendor/base64-0.13.0/src/read/decoder.rs
  20. 335 0
      zeroidc/vendor/base64-0.13.0/src/read/decoder_tests.rs
  21. 6 0
      zeroidc/vendor/base64-0.13.0/src/read/mod.rs
  22. 0 0
      zeroidc/vendor/base64-0.13.0/src/tables.rs
  23. 81 0
      zeroidc/vendor/base64-0.13.0/src/tests.rs
  24. 381 0
      zeroidc/vendor/base64-0.13.0/src/write/encoder.rs
  25. 176 0
      zeroidc/vendor/base64-0.13.0/src/write/encoder_string_writer.rs
  26. 568 0
      zeroidc/vendor/base64-0.13.0/src/write/encoder_tests.rs
  27. 8 0
      zeroidc/vendor/base64-0.13.0/src/write/mod.rs
  28. 0 0
      zeroidc/vendor/base64-0.13.0/tests/decode.rs
  29. 105 0
      zeroidc/vendor/base64-0.13.0/tests/encode.rs
  30. 0 0
      zeroidc/vendor/base64-0.13.0/tests/helpers.rs
  31. 194 0
      zeroidc/vendor/base64-0.13.0/tests/tests.rs
  32. 0 0
      zeroidc/vendor/base64/.cargo-checksum.json
  33. 400 415
      zeroidc/vendor/base64/Cargo.lock
  34. 34 13
      zeroidc/vendor/base64/Cargo.toml
  35. 83 43
      zeroidc/vendor/base64/README.md
  36. 127 7
      zeroidc/vendor/base64/RELEASE-NOTES.md
  37. 93 61
      zeroidc/vendor/base64/benches/benchmarks.rs
  38. 1 0
      zeroidc/vendor/base64/clippy.toml
  39. 25 25
      zeroidc/vendor/base64/examples/base64.rs
  40. 241 0
      zeroidc/vendor/base64/src/alphabet.rs
  41. 46 62
      zeroidc/vendor/base64/src/chunked_encoder.rs
  42. 172 696
      zeroidc/vendor/base64/src/decode.rs
  43. 15 15
      zeroidc/vendor/base64/src/display.rs
  44. 169 356
      zeroidc/vendor/base64/src/encode.rs
  45. 348 0
      zeroidc/vendor/base64/src/engine/general_purpose/decode.rs
  46. 161 0
      zeroidc/vendor/base64/src/engine/general_purpose/decode_suffix.rs
  47. 349 0
      zeroidc/vendor/base64/src/engine/general_purpose/mod.rs
  48. 410 0
      zeroidc/vendor/base64/src/engine/mod.rs
  49. 219 0
      zeroidc/vendor/base64/src/engine/naive.rs
  50. 1430 0
      zeroidc/vendor/base64/src/engine/tests.rs
  51. 120 186
      zeroidc/vendor/base64/src/lib.rs
  52. 19 0
      zeroidc/vendor/base64/src/prelude.rs
  53. 47 34
      zeroidc/vendor/base64/src/read/decoder.rs
  54. 52 41
      zeroidc/vendor/base64/src/read/decoder_tests.rs
  55. 58 22
      zeroidc/vendor/base64/src/tests.rs
  56. 53 27
      zeroidc/vendor/base64/src/write/encoder.rs
  57. 29 27
      zeroidc/vendor/base64/src/write/encoder_string_writer.rs
  58. 72 86
      zeroidc/vendor/base64/src/write/encoder_tests.rs
  59. 5 2
      zeroidc/vendor/base64/src/write/mod.rs
  60. 5 50
      zeroidc/vendor/base64/tests/encode.rs
  61. 48 81
      zeroidc/vendor/base64/tests/tests.rs
  62. 0 0
      zeroidc/vendor/bytes/.cargo-checksum.json
  63. 48 0
      zeroidc/vendor/bytes/CHANGELOG.md
  64. 26 10
      zeroidc/vendor/bytes/Cargo.toml
  65. 1 1
      zeroidc/vendor/bytes/benches/buf.rs
  66. 1 0
      zeroidc/vendor/bytes/benches/bytes.rs
  67. 5 5
      zeroidc/vendor/bytes/ci/miri.sh
  68. 1 0
      zeroidc/vendor/bytes/clippy.toml
  69. 318 0
      zeroidc/vendor/bytes/src/buf/buf_impl.rs
  70. 350 0
      zeroidc/vendor/bytes/src/buf/buf_mut.rs
  71. 1 2
      zeroidc/vendor/bytes/src/buf/chain.rs
  72. 31 1
      zeroidc/vendor/bytes/src/buf/uninit_slice.rs
  73. 183 51
      zeroidc/vendor/bytes/src/bytes.rs
  74. 272 58
      zeroidc/vendor/bytes/src/bytes_mut.rs
  75. 3 3
      zeroidc/vendor/bytes/src/fmt/debug.rs
  76. 2 2
      zeroidc/vendor/bytes/src/loom.rs
  77. 168 7
      zeroidc/vendor/bytes/tests/test_bytes.rs
  78. 30 2
      zeroidc/vendor/bytes/tests/test_bytes_odd_alloc.rs
  79. 103 39
      zeroidc/vendor/bytes/tests/test_bytes_vec_alloc.rs
  80. 22 0
      zeroidc/vendor/bytes/tests/test_chain.rs
  81. 0 0
      zeroidc/vendor/darling/.cargo-checksum.json
  82. 194 0
      zeroidc/vendor/darling/CHANGELOG.md
  83. 92 0
      zeroidc/vendor/darling/Cargo.lock
  84. 42 0
      zeroidc/vendor/darling/Cargo.toml
  85. 21 0
      zeroidc/vendor/darling/LICENSE
  86. 114 0
      zeroidc/vendor/darling/README.md
  87. 2 0
      zeroidc/vendor/darling/clippy.toml
  88. 73 0
      zeroidc/vendor/darling/examples/automatic_bounds.rs
  89. 174 0
      zeroidc/vendor/darling/examples/consume_fields.rs
  90. 85 0
      zeroidc/vendor/darling/examples/fallible_read.rs
  91. 80 0
      zeroidc/vendor/darling/examples/shorthand_or_long_field.rs
  92. 61 0
      zeroidc/vendor/darling/examples/supports_struct.rs
  93. 107 0
      zeroidc/vendor/darling/src/lib.rs
  94. 96 0
      zeroidc/vendor/darling/src/macros_public.rs
  95. 102 0
      zeroidc/vendor/darling/tests/accrue_errors.rs
  96. 42 0
      zeroidc/vendor/darling/tests/computed_bound.rs
  97. 25 0
      zeroidc/vendor/darling/tests/custom_bound.rs
  98. 123 0
      zeroidc/vendor/darling/tests/defaults.rs
  99. 90 0
      zeroidc/vendor/darling/tests/enums_newtype.rs
  100. 15 0
      zeroidc/vendor/darling/tests/enums_struct.rs

+ 107 - 29
zeroidc/Cargo.lock

@@ -34,6 +34,12 @@ version = "0.13.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "904dfeac50f3cdaba28fc6f57fdcddb75f49ed61346676a78c4ffe55877802fd"
 
+[[package]]
+name = "base64"
+version = "0.21.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "a4a4ddaa51a5bc52a6948f74c06d20aaaddb71924eab79b8c97a8c556e942d6a"
+
 [[package]]
 name = "bitflags"
 version = "1.3.2"
@@ -57,9 +63,9 @@ checksum = "37ccbd214614c6783386c1af30caf03192f17891059cecc394b4fb119e363de3"
 
 [[package]]
 name = "bytes"
-version = "1.1.0"
+version = "1.3.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "c4872d67bab6358e59559027aa3b9157c53d9358c51423c17554809a8858e0f8"
+checksum = "dfb24e866b15a1af2a1b663f10c6b6b8f397a84aadb828f12e5b289ec23a3a3c"
 
 [[package]]
 name = "cbindgen"
@@ -114,7 +120,7 @@ dependencies = [
  "ansi_term",
  "atty",
  "bitflags",
- "strsim",
+ "strsim 0.8.0",
  "textwrap",
  "unicode-width",
  "vec_map",
@@ -155,6 +161,41 @@ dependencies = [
  "typenum",
 ]
 
+[[package]]
+name = "darling"
+version = "0.13.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "a01d95850c592940db9b8194bc39f4bc0e89dee5c4265e4b1807c34a9aba453c"
+dependencies = [
+ "darling_core",
+ "darling_macro",
+]
+
+[[package]]
+name = "darling_core"
+version = "0.13.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "859d65a907b6852c9361e3185c862aae7fafd2887876799fa55f5f99dc40d610"
+dependencies = [
+ "fnv",
+ "ident_case",
+ "proc-macro2",
+ "quote",
+ "strsim 0.10.0",
+ "syn",
+]
+
+[[package]]
+name = "darling_macro"
+version = "0.13.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "9c972679f83bdf9c42bd905396b6c3588a843a17f0f16dfcfa3e2c5d57441835"
+dependencies = [
+ "darling_core",
+ "quote",
+ "syn",
+]
+
 [[package]]
 name = "digest"
 version = "0.10.3"
@@ -213,11 +254,10 @@ checksum = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b"
 
 [[package]]
 name = "form_urlencoded"
-version = "1.0.1"
+version = "1.1.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "5fc25a87fa4fd2094bffb06925852034d90a17f0d1e05197d4956d3555752191"
+checksum = "a9c384f161156f5260c24a097c56119f9be8c798586aecc13afbcbe7b7e26bf8"
 dependencies = [
- "matches",
  "percent-encoding",
 ]
 
@@ -415,13 +455,18 @@ dependencies = [
  "tokio-native-tls",
 ]
 
+[[package]]
+name = "ident_case"
+version = "1.0.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b9e0384b61958566e926dc50660321d12159025e767c18e043daf26b70104c39"
+
 [[package]]
 name = "idna"
-version = "0.2.3"
+version = "0.3.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "418a0a6fab821475f634efe3ccc45c013f742efe03d853e8d3355d5cb850ecf8"
+checksum = "e14ddfc70884202db2244c223200c204c2bda1bc6e0998d11b5e024d657209e6"
 dependencies = [
- "matches",
  "unicode-bidi",
  "unicode-normalization",
 ]
@@ -480,7 +525,7 @@ name = "jwt"
 version = "0.16.0"
 source = "git+https://github.com/glimberg/rust-jwt#61a9291fdeec747c6edf14f4fa0caf235136c168"
 dependencies = [
- "base64",
+ "base64 0.13.0",
  "crypto-common",
  "digest",
  "hmac",
@@ -510,12 +555,6 @@ dependencies = [
  "cfg-if",
 ]
 
-[[package]]
-name = "matches"
-version = "0.1.9"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "a3e378b66a060d48947b590737b30a1be76706c8dd7b8ba0f2fe3989c68a853f"
-
 [[package]]
 name = "memchr"
 version = "2.5.0"
@@ -609,11 +648,11 @@ dependencies = [
 
 [[package]]
 name = "oauth2"
-version = "4.2.0"
+version = "4.3.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "c3bd7d544f02ae0fa9e06137962703d043870d7ad6e6d44786d6a5f20679b2c9"
+checksum = "eeaf26a72311c087f8c5ba617c96fac67a5c04f430e716ac8d8ab2de62e23368"
 dependencies = [
- "base64",
+ "base64 0.13.0",
  "chrono",
  "getrandom",
  "http",
@@ -635,11 +674,11 @@ checksum = "7709cef83f0c1f58f666e746a08b21e0085f7440fa6a29cc194d68aac97a4225"
 
 [[package]]
 name = "openidconnect"
-version = "2.3.1"
+version = "2.5.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "32f73e47a1766acd7bedd605742a1a2651c111f34ed3e0be117d8651432d509c"
+checksum = "32a0f47b0f1499d08c4a8480c963d49c5ec77f4249c2b6869780979415f45809"
 dependencies = [
- "base64",
+ "base64 0.13.0",
  "chrono",
  "http",
  "itertools",
@@ -653,6 +692,9 @@ dependencies = [
  "serde_derive",
  "serde_json",
  "serde_path_to_error",
+ "serde_plain",
+ "serde_with",
+ "subtle",
  "thiserror",
  "url",
 ]
@@ -713,9 +755,9 @@ dependencies = [
 
 [[package]]
 name = "percent-encoding"
-version = "2.1.0"
+version = "2.2.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "d4fd5641d01c8f18a23da7b6fe29298ff4b55afcccdf78973b24cf3175fee32e"
+checksum = "478c572c3d73181ff3c2539045f6eb99e5491218eae919370993b890cdbdd98e"
 
 [[package]]
 name = "pin-project-lite"
@@ -813,7 +855,7 @@ version = "0.11.10"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "46a1f7aa4f35e5e8b4160449f51afc758f0ce6454315a9fa7d0d113e958c41eb"
 dependencies = [
- "base64",
+ "base64 0.13.0",
  "bytes",
  "encoding_rs",
  "futures-core",
@@ -947,6 +989,15 @@ dependencies = [
  "serde",
 ]
 
+[[package]]
+name = "serde_plain"
+version = "1.0.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d6018081315db179d0ce57b1fe4b62a12a0028c9cf9bbef868c9cf477b3c34ae"
+dependencies = [
+ "serde",
+]
+
 [[package]]
 name = "serde_urlencoded"
 version = "0.7.1"
@@ -959,6 +1010,28 @@ dependencies = [
  "serde",
 ]
 
+[[package]]
+name = "serde_with"
+version = "1.14.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "678b5a069e50bf00ecd22d0cd8ddf7c236f68581b03db652061ed5eb13a312ff"
+dependencies = [
+ "serde",
+ "serde_with_macros",
+]
+
+[[package]]
+name = "serde_with_macros"
+version = "1.5.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "e182d6ec6f05393cc0e5ed1bf81ad6db3a8feedf8ee515ecdd369809bcce8082"
+dependencies = [
+ "darling",
+ "proc-macro2",
+ "quote",
+ "syn",
+]
+
 [[package]]
 name = "sha2"
 version = "0.10.2"
@@ -998,6 +1071,12 @@ version = "0.8.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "8ea5119cdb4c55b55d432abb513a0429384878c15dde60cc77b1c99de1a95a6a"
 
+[[package]]
+name = "strsim"
+version = "0.10.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "73473c0e59e6d5812c5dfe2a064a6444949f089e20eec9a2e5506596494e4623"
+
 [[package]]
 name = "subtle"
 version = "2.4.1"
@@ -1213,13 +1292,12 @@ checksum = "a156c684c91ea7d62626509bce3cb4e1d9ed5c4d978f7b4352658f96a4c26b4a"
 
 [[package]]
 name = "url"
-version = "2.2.2"
+version = "2.3.1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "a507c383b2d33b5fc35d1861e77e6b383d158b2da5e14fe51b83dfedf6fd578c"
+checksum = "0d68c799ae75762b8c3fe375feb6600ef5602c883c5d21eb51c09f22b83c4643"
 dependencies = [
  "form_urlencoded",
  "idna",
- "matches",
  "percent-encoding",
  "serde",
 ]
@@ -1475,7 +1553,7 @@ dependencies = [
 name = "zeroidc"
 version = "0.1.0"
 dependencies = [
- "base64",
+ "base64 0.21.0",
  "bytes",
  "cbindgen",
  "jwt",

+ 4 - 4
zeroidc/Cargo.toml

@@ -9,14 +9,14 @@ publish = false
 crate-type = ["staticlib","rlib"]
 
 [dependencies]
-openidconnect = { version = "2.2", default-features = false, features = ["reqwest", "native-tls", "accept-rfc3339-timestamps"] }
-base64 = "0.13"
-url = "2.2"
+openidconnect = { version = "2.5", default-features = false, features = ["reqwest", "native-tls", "accept-rfc3339-timestamps"] }
+base64 = "0.21"
+url = "2.3"
 reqwest = "0.11"
 jwt = {version = "0.16", git = "https://github.com/glimberg/rust-jwt"}
 serde = "1.0"
 time = { version = "0.3", features = ["formatting"] }
-bytes = "1.1"
+bytes = "1.3"
 thiserror = "1"
 tokio = { version = ">=1.24" }
 

文件差异内容过多而无法显示
+ 0 - 0
zeroidc/vendor/base64-0.13.0/.cargo-checksum.json


+ 826 - 0
zeroidc/vendor/base64-0.13.0/Cargo.lock

@@ -0,0 +1,826 @@
+# This file is automatically @generated by Cargo.
+# It is not intended for manual editing.
+[[package]]
+name = "ansi_term"
+version = "0.11.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "winapi 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "atty"
+version = "0.2.14"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "hermit-abi 0.1.15 (registry+https://github.com/rust-lang/crates.io-index)",
+ "libc 0.2.74 (registry+https://github.com/rust-lang/crates.io-index)",
+ "winapi 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "autocfg"
+version = "0.1.7"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+
+[[package]]
+name = "autocfg"
+version = "1.0.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+
+[[package]]
+name = "base64"
+version = "0.13.0"
+dependencies = [
+ "criterion 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)",
+ "rand 0.6.5 (registry+https://github.com/rust-lang/crates.io-index)",
+ "structopt 0.3.17 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "bitflags"
+version = "1.2.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+
+[[package]]
+name = "bstr"
+version = "0.2.13"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "memchr 2.3.3 (registry+https://github.com/rust-lang/crates.io-index)",
+ "regex-automata 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)",
+ "serde 1.0.114 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "bumpalo"
+version = "3.4.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+
+[[package]]
+name = "byteorder"
+version = "1.3.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+
+[[package]]
+name = "cast"
+version = "0.2.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "rustc_version 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "cfg-if"
+version = "0.1.10"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+
+[[package]]
+name = "clap"
+version = "2.33.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "ansi_term 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "atty 0.2.14 (registry+https://github.com/rust-lang/crates.io-index)",
+ "bitflags 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
+ "strsim 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "textwrap 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "unicode-width 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)",
+ "vec_map 0.8.2 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "cloudabi"
+version = "0.0.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "bitflags 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "criterion"
+version = "0.3.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "atty 0.2.14 (registry+https://github.com/rust-lang/crates.io-index)",
+ "cast 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)",
+ "clap 2.33.1 (registry+https://github.com/rust-lang/crates.io-index)",
+ "criterion-plot 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)",
+ "csv 1.1.3 (registry+https://github.com/rust-lang/crates.io-index)",
+ "itertools 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "num-traits 0.2.12 (registry+https://github.com/rust-lang/crates.io-index)",
+ "oorandom 11.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
+ "plotters 0.2.15 (registry+https://github.com/rust-lang/crates.io-index)",
+ "rayon 1.3.1 (registry+https://github.com/rust-lang/crates.io-index)",
+ "regex 1.3.9 (registry+https://github.com/rust-lang/crates.io-index)",
+ "serde 1.0.114 (registry+https://github.com/rust-lang/crates.io-index)",
+ "serde_derive 1.0.114 (registry+https://github.com/rust-lang/crates.io-index)",
+ "serde_json 1.0.57 (registry+https://github.com/rust-lang/crates.io-index)",
+ "tinytemplate 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "walkdir 2.3.1 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "criterion-plot"
+version = "0.4.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "cast 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)",
+ "itertools 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "crossbeam-deque"
+version = "0.7.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "crossbeam-epoch 0.8.2 (registry+https://github.com/rust-lang/crates.io-index)",
+ "crossbeam-utils 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)",
+ "maybe-uninit 2.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "crossbeam-epoch"
+version = "0.8.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "autocfg 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)",
+ "crossbeam-utils 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)",
+ "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "maybe-uninit 2.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "memoffset 0.5.5 (registry+https://github.com/rust-lang/crates.io-index)",
+ "scopeguard 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "crossbeam-queue"
+version = "0.2.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)",
+ "crossbeam-utils 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)",
+ "maybe-uninit 2.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "crossbeam-utils"
+version = "0.7.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "autocfg 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)",
+ "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "csv"
+version = "1.1.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "bstr 0.2.13 (registry+https://github.com/rust-lang/crates.io-index)",
+ "csv-core 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)",
+ "itoa 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)",
+ "ryu 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)",
+ "serde 1.0.114 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "csv-core"
+version = "0.1.10"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "memchr 2.3.3 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "either"
+version = "1.5.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+
+[[package]]
+name = "fuchsia-cprng"
+version = "0.1.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+
+[[package]]
+name = "heck"
+version = "0.3.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "unicode-segmentation 1.6.0 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "hermit-abi"
+version = "0.1.15"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "libc 0.2.74 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "itertools"
+version = "0.9.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "either 1.5.3 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "itoa"
+version = "0.4.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+
+[[package]]
+name = "js-sys"
+version = "0.3.44"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "wasm-bindgen 0.2.67 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "lazy_static"
+version = "1.4.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+
+[[package]]
+name = "libc"
+version = "0.2.74"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+
+[[package]]
+name = "log"
+version = "0.4.11"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "maybe-uninit"
+version = "2.0.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+
+[[package]]
+name = "memchr"
+version = "2.3.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+
+[[package]]
+name = "memoffset"
+version = "0.5.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "autocfg 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "num-traits"
+version = "0.2.12"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "autocfg 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "num_cpus"
+version = "1.13.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "hermit-abi 0.1.15 (registry+https://github.com/rust-lang/crates.io-index)",
+ "libc 0.2.74 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "oorandom"
+version = "11.1.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+
+[[package]]
+name = "plotters"
+version = "0.2.15"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "js-sys 0.3.44 (registry+https://github.com/rust-lang/crates.io-index)",
+ "num-traits 0.2.12 (registry+https://github.com/rust-lang/crates.io-index)",
+ "wasm-bindgen 0.2.67 (registry+https://github.com/rust-lang/crates.io-index)",
+ "web-sys 0.3.44 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "proc-macro-error"
+version = "1.0.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "proc-macro-error-attr 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)",
+ "proc-macro2 1.0.19 (registry+https://github.com/rust-lang/crates.io-index)",
+ "quote 1.0.7 (registry+https://github.com/rust-lang/crates.io-index)",
+ "syn 1.0.36 (registry+https://github.com/rust-lang/crates.io-index)",
+ "version_check 0.9.2 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "proc-macro-error-attr"
+version = "1.0.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "proc-macro2 1.0.19 (registry+https://github.com/rust-lang/crates.io-index)",
+ "quote 1.0.7 (registry+https://github.com/rust-lang/crates.io-index)",
+ "version_check 0.9.2 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "proc-macro2"
+version = "1.0.19"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "unicode-xid 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "quote"
+version = "1.0.7"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "proc-macro2 1.0.19 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "rand"
+version = "0.6.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "autocfg 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)",
+ "libc 0.2.74 (registry+https://github.com/rust-lang/crates.io-index)",
+ "rand_chacha 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
+ "rand_core 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)",
+ "rand_hc 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "rand_isaac 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
+ "rand_jitter 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)",
+ "rand_os 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)",
+ "rand_pcg 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
+ "rand_xorshift 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
+ "winapi 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "rand_chacha"
+version = "0.1.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "autocfg 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)",
+ "rand_core 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "rand_core"
+version = "0.3.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "rand_core 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "rand_core"
+version = "0.4.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+
+[[package]]
+name = "rand_hc"
+version = "0.1.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "rand_core 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "rand_isaac"
+version = "0.1.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "rand_core 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "rand_jitter"
+version = "0.1.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "libc 0.2.74 (registry+https://github.com/rust-lang/crates.io-index)",
+ "rand_core 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)",
+ "winapi 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "rand_os"
+version = "0.1.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "cloudabi 0.0.3 (registry+https://github.com/rust-lang/crates.io-index)",
+ "fuchsia-cprng 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
+ "libc 0.2.74 (registry+https://github.com/rust-lang/crates.io-index)",
+ "rand_core 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)",
+ "rdrand 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "winapi 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "rand_pcg"
+version = "0.1.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "autocfg 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)",
+ "rand_core 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "rand_xorshift"
+version = "0.1.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "rand_core 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "rayon"
+version = "1.3.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "autocfg 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "crossbeam-deque 0.7.3 (registry+https://github.com/rust-lang/crates.io-index)",
+ "either 1.5.3 (registry+https://github.com/rust-lang/crates.io-index)",
+ "rayon-core 1.7.1 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "rayon-core"
+version = "1.7.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "crossbeam-deque 0.7.3 (registry+https://github.com/rust-lang/crates.io-index)",
+ "crossbeam-queue 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)",
+ "crossbeam-utils 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)",
+ "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "num_cpus 1.13.0 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "rdrand"
+version = "0.4.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "rand_core 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "regex"
+version = "1.3.9"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "regex-syntax 0.6.18 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "regex-automata"
+version = "0.1.9"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "byteorder 1.3.4 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "regex-syntax"
+version = "0.6.18"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+
+[[package]]
+name = "rustc_version"
+version = "0.2.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "semver 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "ryu"
+version = "1.0.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+
+[[package]]
+name = "same-file"
+version = "1.0.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "winapi-util 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "scopeguard"
+version = "1.1.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+
+[[package]]
+name = "semver"
+version = "0.9.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "semver-parser 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "semver-parser"
+version = "0.7.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+
+[[package]]
+name = "serde"
+version = "1.0.114"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+
+[[package]]
+name = "serde_derive"
+version = "1.0.114"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "proc-macro2 1.0.19 (registry+https://github.com/rust-lang/crates.io-index)",
+ "quote 1.0.7 (registry+https://github.com/rust-lang/crates.io-index)",
+ "syn 1.0.36 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "serde_json"
+version = "1.0.57"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "itoa 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)",
+ "ryu 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)",
+ "serde 1.0.114 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "strsim"
+version = "0.8.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+
+[[package]]
+name = "structopt"
+version = "0.3.17"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "clap 2.33.1 (registry+https://github.com/rust-lang/crates.io-index)",
+ "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "structopt-derive 0.4.10 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "structopt-derive"
+version = "0.4.10"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "heck 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)",
+ "proc-macro-error 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)",
+ "proc-macro2 1.0.19 (registry+https://github.com/rust-lang/crates.io-index)",
+ "quote 1.0.7 (registry+https://github.com/rust-lang/crates.io-index)",
+ "syn 1.0.36 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "syn"
+version = "1.0.36"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "proc-macro2 1.0.19 (registry+https://github.com/rust-lang/crates.io-index)",
+ "quote 1.0.7 (registry+https://github.com/rust-lang/crates.io-index)",
+ "unicode-xid 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "textwrap"
+version = "0.11.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "unicode-width 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "tinytemplate"
+version = "1.1.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "serde 1.0.114 (registry+https://github.com/rust-lang/crates.io-index)",
+ "serde_json 1.0.57 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "unicode-segmentation"
+version = "1.6.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+
+[[package]]
+name = "unicode-width"
+version = "0.1.8"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+
+[[package]]
+name = "unicode-xid"
+version = "0.2.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+
+[[package]]
+name = "vec_map"
+version = "0.8.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+
+[[package]]
+name = "version_check"
+version = "0.9.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+
+[[package]]
+name = "walkdir"
+version = "2.3.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "same-file 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)",
+ "winapi 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)",
+ "winapi-util 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "wasm-bindgen"
+version = "0.2.67"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)",
+ "wasm-bindgen-macro 0.2.67 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "wasm-bindgen-backend"
+version = "0.2.67"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "bumpalo 3.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "log 0.4.11 (registry+https://github.com/rust-lang/crates.io-index)",
+ "proc-macro2 1.0.19 (registry+https://github.com/rust-lang/crates.io-index)",
+ "quote 1.0.7 (registry+https://github.com/rust-lang/crates.io-index)",
+ "syn 1.0.36 (registry+https://github.com/rust-lang/crates.io-index)",
+ "wasm-bindgen-shared 0.2.67 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "wasm-bindgen-macro"
+version = "0.2.67"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "quote 1.0.7 (registry+https://github.com/rust-lang/crates.io-index)",
+ "wasm-bindgen-macro-support 0.2.67 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "wasm-bindgen-macro-support"
+version = "0.2.67"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "proc-macro2 1.0.19 (registry+https://github.com/rust-lang/crates.io-index)",
+ "quote 1.0.7 (registry+https://github.com/rust-lang/crates.io-index)",
+ "syn 1.0.36 (registry+https://github.com/rust-lang/crates.io-index)",
+ "wasm-bindgen-backend 0.2.67 (registry+https://github.com/rust-lang/crates.io-index)",
+ "wasm-bindgen-shared 0.2.67 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "wasm-bindgen-shared"
+version = "0.2.67"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+
+[[package]]
+name = "web-sys"
+version = "0.3.44"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "js-sys 0.3.44 (registry+https://github.com/rust-lang/crates.io-index)",
+ "wasm-bindgen 0.2.67 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "winapi"
+version = "0.3.9"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "winapi-i686-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "winapi-x86_64-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "winapi-i686-pc-windows-gnu"
+version = "0.4.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+
+[[package]]
+name = "winapi-util"
+version = "0.1.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "winapi 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "winapi-x86_64-pc-windows-gnu"
+version = "0.4.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+
+[metadata]
+"checksum ansi_term 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ee49baf6cb617b853aa8d93bf420db2383fab46d314482ca2803b40d5fde979b"
+"checksum atty 0.2.14 (registry+https://github.com/rust-lang/crates.io-index)" = "d9b39be18770d11421cdb1b9947a45dd3f37e93092cbf377614828a319d5fee8"
+"checksum autocfg 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)" = "1d49d90015b3c36167a20fe2810c5cd875ad504b39cff3d4eae7977e6b7c1cb2"
+"checksum autocfg 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "f8aac770f1885fd7e387acedd76065302551364496e46b3dd00860b2f8359b9d"
+"checksum bitflags 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "cf1de2fe8c75bc145a2f577add951f8134889b4795d47466a54a5c846d691693"
+"checksum bstr 0.2.13 (registry+https://github.com/rust-lang/crates.io-index)" = "31accafdb70df7871592c058eca3985b71104e15ac32f64706022c58867da931"
+"checksum bumpalo 3.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "2e8c087f005730276d1096a652e92a8bacee2e2472bcc9715a74d2bec38b5820"
+"checksum byteorder 1.3.4 (registry+https://github.com/rust-lang/crates.io-index)" = "08c48aae112d48ed9f069b33538ea9e3e90aa263cfa3d1c24309612b1f7472de"
+"checksum cast 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)" = "4b9434b9a5aa1450faa3f9cb14ea0e8c53bb5d2b3c1bfd1ab4fc03e9f33fbfb0"
+"checksum cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)" = "4785bdd1c96b2a846b2bd7cc02e86b6b3dbf14e7e53446c4f54c92a361040822"
+"checksum clap 2.33.1 (registry+https://github.com/rust-lang/crates.io-index)" = "bdfa80d47f954d53a35a64987ca1422f495b8d6483c0fe9f7117b36c2a792129"
+"checksum cloudabi 0.0.3 (registry+https://github.com/rust-lang/crates.io-index)" = "ddfc5b9aa5d4507acaf872de71051dfd0e309860e88966e1051e462a077aac4f"
+"checksum criterion 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)" = "63f696897c88b57f4ffe3c69d8e1a0613c7d0e6c4833363c8560fbde9c47b966"
+"checksum criterion-plot 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)" = "e022feadec601fba1649cfa83586381a4ad31c6bf3a9ab7d408118b05dd9889d"
+"checksum crossbeam-deque 0.7.3 (registry+https://github.com/rust-lang/crates.io-index)" = "9f02af974daeee82218205558e51ec8768b48cf524bd01d550abe5573a608285"
+"checksum crossbeam-epoch 0.8.2 (registry+https://github.com/rust-lang/crates.io-index)" = "058ed274caafc1f60c4997b5fc07bf7dc7cca454af7c6e81edffe5f33f70dace"
+"checksum crossbeam-queue 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)" = "774ba60a54c213d409d5353bda12d49cd68d14e45036a285234c8d6f91f92570"
+"checksum crossbeam-utils 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)" = "c3c7c73a2d1e9fc0886a08b93e98eb643461230d5f1925e4036204d5f2e261a8"
+"checksum csv 1.1.3 (registry+https://github.com/rust-lang/crates.io-index)" = "00affe7f6ab566df61b4be3ce8cf16bc2576bca0963ceb0955e45d514bf9a279"
+"checksum csv-core 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)" = "2b2466559f260f48ad25fe6317b3c8dac77b5bdb5763ac7d9d6103530663bc90"
+"checksum either 1.5.3 (registry+https://github.com/rust-lang/crates.io-index)" = "bb1f6b1ce1c140482ea30ddd3335fc0024ac7ee112895426e0a629a6c20adfe3"
+"checksum fuchsia-cprng 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "a06f77d526c1a601b7c4cdd98f54b5eaabffc14d5f2f0296febdc7f357c6d3ba"
+"checksum heck 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "20564e78d53d2bb135c343b3f47714a56af2061f1c928fdb541dc7b9fdd94205"
+"checksum hermit-abi 0.1.15 (registry+https://github.com/rust-lang/crates.io-index)" = "3deed196b6e7f9e44a2ae8d94225d80302d81208b1bb673fd21fe634645c85a9"
+"checksum itertools 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)" = "284f18f85651fe11e8a991b2adb42cb078325c996ed026d994719efcfca1d54b"
+"checksum itoa 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)" = "dc6f3ad7b9d11a0c00842ff8de1b60ee58661048eb8049ed33c73594f359d7e6"
+"checksum js-sys 0.3.44 (registry+https://github.com/rust-lang/crates.io-index)" = "85a7e2c92a4804dd459b86c339278d0fe87cf93757fae222c3fa3ae75458bc73"
+"checksum lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646"
+"checksum libc 0.2.74 (registry+https://github.com/rust-lang/crates.io-index)" = "a2f02823cf78b754822df5f7f268fb59822e7296276d3e069d8e8cb26a14bd10"
+"checksum log 0.4.11 (registry+https://github.com/rust-lang/crates.io-index)" = "4fabed175da42fed1fa0746b0ea71f412aa9d35e76e95e59b192c64b9dc2bf8b"
+"checksum maybe-uninit 2.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "60302e4db3a61da70c0cb7991976248362f30319e88850c487b9b95bbf059e00"
+"checksum memchr 2.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "3728d817d99e5ac407411fa471ff9800a778d88a24685968b36824eaf4bee400"
+"checksum memoffset 0.5.5 (registry+https://github.com/rust-lang/crates.io-index)" = "c198b026e1bbf08a937e94c6c60f9ec4a2267f5b0d2eec9c1b21b061ce2be55f"
+"checksum num-traits 0.2.12 (registry+https://github.com/rust-lang/crates.io-index)" = "ac267bcc07f48ee5f8935ab0d24f316fb722d7a1292e2913f0cc196b29ffd611"
+"checksum num_cpus 1.13.0 (registry+https://github.com/rust-lang/crates.io-index)" = "05499f3756671c15885fee9034446956fff3f243d6077b91e5767df161f766b3"
+"checksum oorandom 11.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "a170cebd8021a008ea92e4db85a72f80b35df514ec664b296fdcbb654eac0b2c"
+"checksum plotters 0.2.15 (registry+https://github.com/rust-lang/crates.io-index)" = "0d1685fbe7beba33de0330629da9d955ac75bd54f33d7b79f9a895590124f6bb"
+"checksum proc-macro-error 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)" = "da25490ff9892aab3fcf7c36f08cfb902dd3e71ca0f9f9517bea02a73a5ce38c"
+"checksum proc-macro-error-attr 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)" = "a1be40180e52ecc98ad80b184934baf3d0d29f979574e439af5a55274b35f869"
+"checksum proc-macro2 1.0.19 (registry+https://github.com/rust-lang/crates.io-index)" = "04f5f085b5d71e2188cb8271e5da0161ad52c3f227a661a3c135fdf28e258b12"
+"checksum quote 1.0.7 (registry+https://github.com/rust-lang/crates.io-index)" = "aa563d17ecb180e500da1cfd2b028310ac758de548efdd203e18f283af693f37"
+"checksum rand 0.6.5 (registry+https://github.com/rust-lang/crates.io-index)" = "6d71dacdc3c88c1fde3885a3be3fbab9f35724e6ce99467f7d9c5026132184ca"
+"checksum rand_chacha 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "556d3a1ca6600bfcbab7c7c91ccb085ac7fbbcd70e008a98742e7847f4f7bcef"
+"checksum rand_core 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "7a6fdeb83b075e8266dcc8762c22776f6877a63111121f5f8c7411e5be7eed4b"
+"checksum rand_core 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)" = "9c33a3c44ca05fa6f1807d8e6743f3824e8509beca625669633be0acbdf509dc"
+"checksum rand_hc 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "7b40677c7be09ae76218dc623efbf7b18e34bced3f38883af07bb75630a21bc4"
+"checksum rand_isaac 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "ded997c9d5f13925be2a6fd7e66bf1872597f759fd9dd93513dd7e92e5a5ee08"
+"checksum rand_jitter 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)" = "1166d5c91dc97b88d1decc3285bb0a99ed84b05cfd0bc2341bdf2d43fc41e39b"
+"checksum rand_os 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)" = "7b75f676a1e053fc562eafbb47838d67c84801e38fc1ba459e8f180deabd5071"
+"checksum rand_pcg 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "abf9b09b01790cfe0364f52bf32995ea3c39f4d2dd011eac241d2914146d0b44"
+"checksum rand_xorshift 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "cbf7e9e623549b0e21f6e97cf8ecf247c1a8fd2e8a992ae265314300b2455d5c"
+"checksum rayon 1.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "62f02856753d04e03e26929f820d0a0a337ebe71f849801eea335d464b349080"
+"checksum rayon-core 1.7.1 (registry+https://github.com/rust-lang/crates.io-index)" = "e92e15d89083484e11353891f1af602cc661426deb9564c298b270c726973280"
+"checksum rdrand 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "678054eb77286b51581ba43620cc911abf02758c91f93f479767aed0f90458b2"
+"checksum regex 1.3.9 (registry+https://github.com/rust-lang/crates.io-index)" = "9c3780fcf44b193bc4d09f36d2a3c87b251da4a046c87795a0d35f4f927ad8e6"
+"checksum regex-automata 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)" = "ae1ded71d66a4a97f5e961fd0cb25a5f366a42a41570d16a763a69c092c26ae4"
+"checksum regex-syntax 0.6.18 (registry+https://github.com/rust-lang/crates.io-index)" = "26412eb97c6b088a6997e05f69403a802a92d520de2f8e63c2b65f9e0f47c4e8"
+"checksum rustc_version 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)" = "138e3e0acb6c9fb258b19b67cb8abd63c00679d2851805ea151465464fe9030a"
+"checksum ryu 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)" = "71d301d4193d031abdd79ff7e3dd721168a9572ef3fe51a1517aba235bd8f86e"
+"checksum same-file 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)" = "93fc1dc3aaa9bfed95e02e6eadabb4baf7e3078b0bd1b4d7b6b0b68378900502"
+"checksum scopeguard 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd"
+"checksum semver 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)" = "1d7eb9ef2c18661902cc47e535f9bc51b78acd254da71d375c2f6720d9a40403"
+"checksum semver-parser 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "388a1df253eca08550bef6c72392cfe7c30914bf41df5269b68cbd6ff8f570a3"
+"checksum serde 1.0.114 (registry+https://github.com/rust-lang/crates.io-index)" = "5317f7588f0a5078ee60ef675ef96735a1442132dc645eb1d12c018620ed8cd3"
+"checksum serde_derive 1.0.114 (registry+https://github.com/rust-lang/crates.io-index)" = "2a0be94b04690fbaed37cddffc5c134bf537c8e3329d53e982fe04c374978f8e"
+"checksum serde_json 1.0.57 (registry+https://github.com/rust-lang/crates.io-index)" = "164eacbdb13512ec2745fb09d51fd5b22b0d65ed294a1dcf7285a360c80a675c"
+"checksum strsim 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)" = "8ea5119cdb4c55b55d432abb513a0429384878c15dde60cc77b1c99de1a95a6a"
+"checksum structopt 0.3.17 (registry+https://github.com/rust-lang/crates.io-index)" = "6cc388d94ffabf39b5ed5fadddc40147cb21e605f53db6f8f36a625d27489ac5"
+"checksum structopt-derive 0.4.10 (registry+https://github.com/rust-lang/crates.io-index)" = "5e2513111825077552a6751dfad9e11ce0fba07d7276a3943a037d7e93e64c5f"
+"checksum syn 1.0.36 (registry+https://github.com/rust-lang/crates.io-index)" = "4cdb98bcb1f9d81d07b536179c269ea15999b5d14ea958196413869445bb5250"
+"checksum textwrap 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)" = "d326610f408c7a4eb6f51c37c330e496b08506c9457c9d34287ecc38809fb060"
+"checksum tinytemplate 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "6d3dc76004a03cec1c5932bca4cdc2e39aaa798e3f82363dd94f9adf6098c12f"
+"checksum unicode-segmentation 1.6.0 (registry+https://github.com/rust-lang/crates.io-index)" = "e83e153d1053cbb5a118eeff7fd5be06ed99153f00dbcd8ae310c5fb2b22edc0"
+"checksum unicode-width 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)" = "9337591893a19b88d8d87f2cec1e73fad5cdfd10e5a6f349f498ad6ea2ffb1e3"
+"checksum unicode-xid 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "f7fe0bb3479651439c9112f72b6c505038574c9fbb575ed1bf3b797fa39dd564"
+"checksum vec_map 0.8.2 (registry+https://github.com/rust-lang/crates.io-index)" = "f1bddf1187be692e79c5ffeab891132dfb0f236ed36a43c7ed39f1165ee20191"
+"checksum version_check 0.9.2 (registry+https://github.com/rust-lang/crates.io-index)" = "b5a972e5669d67ba988ce3dc826706fb0a8b01471c088cb0b6110b805cc36aed"
+"checksum walkdir 2.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "777182bc735b6424e1a57516d35ed72cb8019d85c8c9bf536dccb3445c1a2f7d"
+"checksum wasm-bindgen 0.2.67 (registry+https://github.com/rust-lang/crates.io-index)" = "f0563a9a4b071746dd5aedbc3a28c6fe9be4586fb3fbadb67c400d4f53c6b16c"
+"checksum wasm-bindgen-backend 0.2.67 (registry+https://github.com/rust-lang/crates.io-index)" = "bc71e4c5efa60fb9e74160e89b93353bc24059999c0ae0fb03affc39770310b0"
+"checksum wasm-bindgen-macro 0.2.67 (registry+https://github.com/rust-lang/crates.io-index)" = "97c57cefa5fa80e2ba15641578b44d36e7a64279bc5ed43c6dbaf329457a2ed2"
+"checksum wasm-bindgen-macro-support 0.2.67 (registry+https://github.com/rust-lang/crates.io-index)" = "841a6d1c35c6f596ccea1f82504a192a60378f64b3bb0261904ad8f2f5657556"
+"checksum wasm-bindgen-shared 0.2.67 (registry+https://github.com/rust-lang/crates.io-index)" = "93b162580e34310e5931c4b792560108b10fd14d64915d7fff8ff00180e70092"
+"checksum web-sys 0.3.44 (registry+https://github.com/rust-lang/crates.io-index)" = "dda38f4e5ca63eda02c059d243aa25b5f35ab98451e518c51612cd0f1bd19a47"
+"checksum winapi 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)" = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419"
+"checksum winapi-i686-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6"
+"checksum winapi-util 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)" = "70ec6ce85bb158151cae5e5c87f95a8e97d2c0c4b001223f33a334e3ce5de178"
+"checksum winapi-x86_64-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f"

+ 43 - 0
zeroidc/vendor/base64-0.13.0/Cargo.toml

@@ -0,0 +1,43 @@
+# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
+#
+# When uploading crates to the registry Cargo will automatically
+# "normalize" Cargo.toml files for maximal compatibility
+# with all versions of Cargo and also rewrite `path` dependencies
+# to registry (e.g., crates.io) dependencies
+#
+# If you believe there's an error in this file please file an
+# issue against the rust-lang/cargo repository. If you're
+# editing this file be aware that the upstream Cargo.toml
+# will likely look very different (and much more reasonable)
+
+[package]
+edition = "2018"
+name = "base64"
+version = "0.13.0"
+authors = ["Alice Maz <[email protected]>", "Marshall Pierce <[email protected]>"]
+description = "encodes and decodes base64 as bytes or utf8"
+documentation = "https://docs.rs/base64"
+readme = "README.md"
+keywords = ["base64", "utf8", "encode", "decode", "no_std"]
+categories = ["encoding"]
+license = "MIT/Apache-2.0"
+repository = "https://github.com/marshallpierce/rust-base64"
+[profile.bench]
+debug = true
+
+[[bench]]
+name = "benchmarks"
+harness = false
+[dev-dependencies.criterion]
+version = "=0.3.2"
+
+[dev-dependencies.rand]
+version = "0.6.1"
+
+[dev-dependencies.structopt]
+version = "0.3"
+
+[features]
+alloc = []
+default = ["std"]
+std = []

+ 201 - 0
zeroidc/vendor/base64-0.13.0/LICENSE-APACHE

@@ -0,0 +1,201 @@
+                              Apache License
+                        Version 2.0, January 2004
+                     http://www.apache.org/licenses/
+
+TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+1. Definitions.
+
+   "License" shall mean the terms and conditions for use, reproduction,
+   and distribution as defined by Sections 1 through 9 of this document.
+
+   "Licensor" shall mean the copyright owner or entity authorized by
+   the copyright owner that is granting the License.
+
+   "Legal Entity" shall mean the union of the acting entity and all
+   other entities that control, are controlled by, or are under common
+   control with that entity. For the purposes of this definition,
+   "control" means (i) the power, direct or indirect, to cause the
+   direction or management of such entity, whether by contract or
+   otherwise, or (ii) ownership of fifty percent (50%) or more of the
+   outstanding shares, or (iii) beneficial ownership of such entity.
+
+   "You" (or "Your") shall mean an individual or Legal Entity
+   exercising permissions granted by this License.
+
+   "Source" form shall mean the preferred form for making modifications,
+   including but not limited to software source code, documentation
+   source, and configuration files.
+
+   "Object" form shall mean any form resulting from mechanical
+   transformation or translation of a Source form, including but
+   not limited to compiled object code, generated documentation,
+   and conversions to other media types.
+
+   "Work" shall mean the work of authorship, whether in Source or
+   Object form, made available under the License, as indicated by a
+   copyright notice that is included in or attached to the work
+   (an example is provided in the Appendix below).
+
+   "Derivative Works" shall mean any work, whether in Source or Object
+   form, that is based on (or derived from) the Work and for which the
+   editorial revisions, annotations, elaborations, or other modifications
+   represent, as a whole, an original work of authorship. For the purposes
+   of this License, Derivative Works shall not include works that remain
+   separable from, or merely link (or bind by name) to the interfaces of,
+   the Work and Derivative Works thereof.
+
+   "Contribution" shall mean any work of authorship, including
+   the original version of the Work and any modifications or additions
+   to that Work or Derivative Works thereof, that is intentionally
+   submitted to Licensor for inclusion in the Work by the copyright owner
+   or by an individual or Legal Entity authorized to submit on behalf of
+   the copyright owner. For the purposes of this definition, "submitted"
+   means any form of electronic, verbal, or written communication sent
+   to the Licensor or its representatives, including but not limited to
+   communication on electronic mailing lists, source code control systems,
+   and issue tracking systems that are managed by, or on behalf of, the
+   Licensor for the purpose of discussing and improving the Work, but
+   excluding communication that is conspicuously marked or otherwise
+   designated in writing by the copyright owner as "Not a Contribution."
+
+   "Contributor" shall mean Licensor and any individual or Legal Entity
+   on behalf of whom a Contribution has been received by Licensor and
+   subsequently incorporated within the Work.
+
+2. Grant of Copyright License. Subject to the terms and conditions of
+   this License, each Contributor hereby grants to You a perpetual,
+   worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+   copyright license to reproduce, prepare Derivative Works of,
+   publicly display, publicly perform, sublicense, and distribute the
+   Work and such Derivative Works in Source or Object form.
+
+3. Grant of Patent License. Subject to the terms and conditions of
+   this License, each Contributor hereby grants to You a perpetual,
+   worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+   (except as stated in this section) patent license to make, have made,
+   use, offer to sell, sell, import, and otherwise transfer the Work,
+   where such license applies only to those patent claims licensable
+   by such Contributor that are necessarily infringed by their
+   Contribution(s) alone or by combination of their Contribution(s)
+   with the Work to which such Contribution(s) was submitted. If You
+   institute patent litigation against any entity (including a
+   cross-claim or counterclaim in a lawsuit) alleging that the Work
+   or a Contribution incorporated within the Work constitutes direct
+   or contributory patent infringement, then any patent licenses
+   granted to You under this License for that Work shall terminate
+   as of the date such litigation is filed.
+
+4. Redistribution. You may reproduce and distribute copies of the
+   Work or Derivative Works thereof in any medium, with or without
+   modifications, and in Source or Object form, provided that You
+   meet the following conditions:
+
+   (a) You must give any other recipients of the Work or
+       Derivative Works a copy of this License; and
+
+   (b) You must cause any modified files to carry prominent notices
+       stating that You changed the files; and
+
+   (c) You must retain, in the Source form of any Derivative Works
+       that You distribute, all copyright, patent, trademark, and
+       attribution notices from the Source form of the Work,
+       excluding those notices that do not pertain to any part of
+       the Derivative Works; and
+
+   (d) If the Work includes a "NOTICE" text file as part of its
+       distribution, then any Derivative Works that You distribute must
+       include a readable copy of the attribution notices contained
+       within such NOTICE file, excluding those notices that do not
+       pertain to any part of the Derivative Works, in at least one
+       of the following places: within a NOTICE text file distributed
+       as part of the Derivative Works; within the Source form or
+       documentation, if provided along with the Derivative Works; or,
+       within a display generated by the Derivative Works, if and
+       wherever such third-party notices normally appear. The contents
+       of the NOTICE file are for informational purposes only and
+       do not modify the License. You may add Your own attribution
+       notices within Derivative Works that You distribute, alongside
+       or as an addendum to the NOTICE text from the Work, provided
+       that such additional attribution notices cannot be construed
+       as modifying the License.
+
+   You may add Your own copyright statement to Your modifications and
+   may provide additional or different license terms and conditions
+   for use, reproduction, or distribution of Your modifications, or
+   for any such Derivative Works as a whole, provided Your use,
+   reproduction, and distribution of the Work otherwise complies with
+   the conditions stated in this License.
+
+5. Submission of Contributions. Unless You explicitly state otherwise,
+   any Contribution intentionally submitted for inclusion in the Work
+   by You to the Licensor shall be under the terms and conditions of
+   this License, without any additional terms or conditions.
+   Notwithstanding the above, nothing herein shall supersede or modify
+   the terms of any separate license agreement you may have executed
+   with Licensor regarding such Contributions.
+
+6. Trademarks. This License does not grant permission to use the trade
+   names, trademarks, service marks, or product names of the Licensor,
+   except as required for reasonable and customary use in describing the
+   origin of the Work and reproducing the content of the NOTICE file.
+
+7. Disclaimer of Warranty. Unless required by applicable law or
+   agreed to in writing, Licensor provides the Work (and each
+   Contributor provides its Contributions) on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+   implied, including, without limitation, any warranties or conditions
+   of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+   PARTICULAR PURPOSE. You are solely responsible for determining the
+   appropriateness of using or redistributing the Work and assume any
+   risks associated with Your exercise of permissions under this License.
+
+8. Limitation of Liability. In no event and under no legal theory,
+   whether in tort (including negligence), contract, or otherwise,
+   unless required by applicable law (such as deliberate and grossly
+   negligent acts) or agreed to in writing, shall any Contributor be
+   liable to You for damages, including any direct, indirect, special,
+   incidental, or consequential damages of any character arising as a
+   result of this License or out of the use or inability to use the
+   Work (including but not limited to damages for loss of goodwill,
+   work stoppage, computer failure or malfunction, or any and all
+   other commercial damages or losses), even if such Contributor
+   has been advised of the possibility of such damages.
+
+9. Accepting Warranty or Additional Liability. While redistributing
+   the Work or Derivative Works thereof, You may choose to offer,
+   and charge a fee for, acceptance of support, warranty, indemnity,
+   or other liability obligations and/or rights consistent with this
+   License. However, in accepting such obligations, You may act only
+   on Your own behalf and on Your sole responsibility, not on behalf
+   of any other Contributor, and only if You agree to indemnify,
+   defend, and hold each Contributor harmless for any liability
+   incurred by, or claims asserted against, such Contributor by reason
+   of your accepting any such warranty or additional liability.
+
+END OF TERMS AND CONDITIONS
+
+APPENDIX: How to apply the Apache License to your work.
+
+   To apply the Apache License to your work, attach the following
+   boilerplate notice, with the fields enclosed by brackets "[]"
+   replaced with your own identifying information. (Don't include
+   the brackets!)  The text should be enclosed in the appropriate
+   comment syntax for the file format. We also recommend that a
+   file or class name and description of purpose be included on the
+   same "printed page" as the copyright notice for easier
+   identification within third-party archives.
+
+Copyright [yyyy] [name of copyright owner]
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+	http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.

+ 21 - 0
zeroidc/vendor/base64-0.13.0/LICENSE-MIT

@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2015 Alice Maz
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.

+ 114 - 0
zeroidc/vendor/base64-0.13.0/README.md

@@ -0,0 +1,114 @@
+[base64](https://crates.io/crates/base64)
+===
+
+[![](https://img.shields.io/crates/v/base64.svg)](https://crates.io/crates/base64) [![Docs](https://docs.rs/base64/badge.svg)](https://docs.rs/base64) [![Build](https://travis-ci.org/marshallpierce/rust-base64.svg?branch=master)](https://travis-ci.org/marshallpierce/rust-base64) [![codecov](https://codecov.io/gh/marshallpierce/rust-base64/branch/master/graph/badge.svg)](https://codecov.io/gh/marshallpierce/rust-base64) [![unsafe forbidden](https://img.shields.io/badge/unsafe-forbidden-success.svg)](https://github.com/rust-secure-code/safety-dance/)
+
+<a href="https://www.jetbrains.com/?from=rust-base64"><img src="/icon_CLion.svg" height="40px"/></a>
+
+Made with CLion. Thanks to JetBrains for supporting open source!
+
+It's base64. What more could anyone want?
+
+This library's goals are to be *correct* and *fast*. It's thoroughly tested and widely used. It exposes functionality at multiple levels of abstraction so you can choose the level of convenience vs performance that you want, e.g. `decode_config_slice` decodes into an existing `&mut [u8]` and is pretty fast (2.6GiB/s for a 3 KiB input), whereas `decode_config` allocates a new `Vec<u8>` and returns it, which might be more convenient in some cases, but is slower (although still fast enough for almost any purpose) at 2.1 GiB/s.
+
+Example
+---
+
+```rust
+extern crate base64;
+
+use base64::{encode, decode};
+
+fn main() {
+    let a = b"hello world";
+    let b = "aGVsbG8gd29ybGQ=";
+
+    assert_eq!(encode(a), b);
+    assert_eq!(a, &decode(b).unwrap()[..]);
+}
+```
+
+See the [docs](https://docs.rs/base64) for all the details.
+
+Rust version compatibility
+---
+
+The minimum required Rust version is 1.34.0.
+
+Developing
+---
+
+Benchmarks are in `benches/`. Running them requires nightly rust, but `rustup` makes it easy:
+
+```bash
+rustup run nightly cargo bench
+```
+
+Decoding is aided by some pre-calculated tables, which are generated by:
+
+```bash
+cargo run --example make_tables > src/tables.rs.tmp && mv src/tables.rs.tmp src/tables.rs
+```
+
+no_std
+---
+
+This crate supports no_std. By default the crate targets std via the `std` feature. You can deactivate the `default-features` to target core instead. In that case you lose out on all the functionality revolving around `std::io`, `std::error::Error` and heap allocations. There is an additional `alloc` feature that you can activate to bring back the support for heap allocations.
+
+Profiling
+---
+
+On Linux, you can use [perf](https://perf.wiki.kernel.org/index.php/Main_Page) for profiling. Then compile the benchmarks with `rustup nightly run cargo bench --no-run`.
+
+Run the benchmark binary with `perf` (shown here filtering to one particular benchmark, which will make the results easier to read). `perf` is only available to the root user on most systems as it fiddles with event counters in your CPU, so use `sudo`. We need to run the actual benchmark binary, hence the path into `target`. You can see the actual full path with `rustup run nightly cargo bench -v`; it will print out the commands it runs. If you use the exact path that `bench` outputs, make sure you get the one that's for the benchmarks, not the tests. You may also want to `cargo clean` so you have only one `benchmarks-` binary (they tend to accumulate).
+
+```bash
+sudo perf record target/release/deps/benchmarks-* --bench decode_10mib_reuse
+```
+
+Then analyze the results, again with perf:
+
+```bash
+sudo perf annotate -l
+```
+
+You'll see a bunch of interleaved rust source and assembly like this. The section with `lib.rs:327` is telling us that 4.02% of samples saw the `movzbl` aka bit shift as the active instruction. However, this percentage is not as exact as it seems due to a phenomenon called *skid*. Basically, a consequence of how fancy modern CPUs are is that this sort of instruction profiling is inherently inaccurate, especially in branch-heavy code.
+
+```text
+ lib.rs:322    0.70 :     10698:       mov    %rdi,%rax
+    2.82 :        1069b:       shr    $0x38,%rax
+         :                  if morsel == decode_tables::INVALID_VALUE {
+         :                      bad_byte_index = input_index;
+         :                      break;
+         :                  };
+         :                  accum = (morsel as u64) << 58;
+ lib.rs:327    4.02 :     1069f:       movzbl (%r9,%rax,1),%r15d
+         :              // fast loop of 8 bytes at a time
+         :              while input_index < length_of_full_chunks {
+         :                  let mut accum: u64;
+         :
+         :                  let input_chunk = BigEndian::read_u64(&input_bytes[input_index..(input_index + 8)]);
+         :                  morsel = decode_table[(input_chunk >> 56) as usize];
+ lib.rs:322    3.68 :     106a4:       cmp    $0xff,%r15
+         :                  if morsel == decode_tables::INVALID_VALUE {
+    0.00 :        106ab:       je     1090e <base64::decode_config_buf::hbf68a45fefa299c1+0x46e>
+```
+
+
+Fuzzing
+---
+
+This uses [cargo-fuzz](https://github.com/rust-fuzz/cargo-fuzz). See `fuzz/fuzzers` for the available fuzzing scripts. To run, use an invocation like these:
+
+```bash
+cargo +nightly fuzz run roundtrip
+cargo +nightly fuzz run roundtrip_no_pad
+cargo +nightly fuzz run roundtrip_random_config -- -max_len=10240
+cargo +nightly fuzz run decode_random
+```
+
+
+License
+---
+
+This project is dual-licensed under MIT and Apache 2.0.

+ 105 - 0
zeroidc/vendor/base64-0.13.0/RELEASE-NOTES.md

@@ -0,0 +1,105 @@
+# 0.13.0
+
+- Config methods are const
+- Added `EncoderStringWriter` to allow encoding directly to a String
+- `EncoderWriter` now owns its delegate writer rather than keeping a reference to it (though refs still work)
+    - As a consequence, it is now possible to extract the delegate writer from an `EncoderWriter` via `finish()`, which returns `Result<W>` instead of `Result<()>`. If you were calling `finish()` explicitly, you will now need to use `let _ = foo.finish()` instead of just `foo.finish()` to avoid a warning about the unused value.
+- When decoding input that has both an invalid length and an invalid symbol as the last byte, `InvalidByte` will be emitted instead of `InvalidLength` to make the problem more obvious.
+
+# 0.12.2
+
+- Add `BinHex` alphabet
+
+# 0.12.1
+
+- Add `Bcrypt` alphabet
+
+# 0.12.0
+
+- A `Read` implementation (`DecoderReader`) to let users transparently decoded data from a b64 input source
+- IMAP's modified b64 alphabet
+- Relaxed type restrictions to just `AsRef<[ut8]>` for main `encode*`/`decode*` functions
+- A minor performance improvement in encoding
+
+# 0.11.0
+- Minimum rust version 1.34.0
+- `no_std` is now supported via the two new features `alloc` and `std`.
+
+# 0.10.1
+
+- Minimum rust version 1.27.2
+- Fix bug in streaming encoding ([#90](https://github.com/marshallpierce/rust-base64/pull/90)): if the underlying writer didn't write all the bytes given to it, the remaining bytes would not be retried later. See the docs on `EncoderWriter::write`.
+- Make it configurable whether or not to return an error when decoding detects excess trailing bits.
+
+# 0.10.0
+
+- Remove line wrapping. Line wrapping was never a great conceptual fit in this library, and other features (streaming encoding, etc) either couldn't support it or could support only special cases of it with a great increase in complexity. Line wrapping has been pulled out into a [line-wrap](https://crates.io/crates/line-wrap) crate, so it's still available if you need it.
+  - `Base64Display` creation no longer uses a `Result` because it can't fail, which means its helper methods for common
+  configs that `unwrap()` for you are no longer needed
+- Add a streaming encoder `Write` impl to transparently base64 as you write.
+- Remove the remaining `unsafe` code.
+- Remove whitespace stripping to simplify `no_std` support. No out of the box configs use it, and it's trivial to do yourself if needed: `filter(|b| !b" \n\t\r\x0b\x0c".contains(b)`.
+- Detect invalid trailing symbols when decoding and return an error rather than silently ignoring them.
+
+# 0.9.3
+
+- Update safemem
+
+# 0.9.2
+
+- Derive `Clone` for `DecodeError`.
+
+# 0.9.1
+
+- Add support for `crypt(3)`'s base64 variant.
+
+# 0.9.0
+
+- `decode_config_slice` function for no-allocation decoding, analogous to `encode_config_slice`
+- Decode performance optimization
+
+# 0.8.0
+
+- `encode_config_slice` function for no-allocation encoding
+
+# 0.7.0
+
+- `STANDARD_NO_PAD` config
+- `Base64Display` heap-free wrapper for use in format strings, etc
+
+# 0.6.0
+
+- Decode performance improvements
+- Use `unsafe` in fewer places
+- Added fuzzers
+
+# 0.5.2
+
+- Avoid usize overflow when calculating length
+- Better line wrapping performance
+
+# 0.5.1
+
+- Temporarily disable line wrapping
+- Add Apache 2.0 license
+
+# 0.5.0
+
+- MIME support, including configurable line endings and line wrapping
+- Removed `decode_ws`
+- Renamed `Base64Error` to `DecodeError`
+
+# 0.4.1
+
+- Allow decoding a `AsRef<[u8]>` instead of just a `&str`
+
+# 0.4.0
+
+- Configurable padding
+- Encode performance improvements
+
+# 0.3.0
+
+- Added encode/decode functions that do not allocate their own storage
+- Decode performance improvements
+- Extraneous padding bytes are no longer ignored. Now, an error will be returned.

+ 210 - 0
zeroidc/vendor/base64-0.13.0/benches/benchmarks.rs

@@ -0,0 +1,210 @@
+extern crate base64;
+#[macro_use]
+extern crate criterion;
+extern crate rand;
+
+use base64::display;
+use base64::{
+    decode, decode_config_buf, decode_config_slice, encode, encode_config_buf, encode_config_slice,
+    write, Config,
+};
+
+use criterion::{black_box, Bencher, Criterion, ParameterizedBenchmark, Throughput};
+use rand::{FromEntropy, Rng};
+use std::io::{self, Read, Write};
+
+const TEST_CONFIG: Config = base64::STANDARD;
+
+fn do_decode_bench(b: &mut Bencher, &size: &usize) {
+    let mut v: Vec<u8> = Vec::with_capacity(size * 3 / 4);
+    fill(&mut v);
+    let encoded = encode(&v);
+
+    b.iter(|| {
+        let orig = decode(&encoded);
+        black_box(&orig);
+    });
+}
+
+fn do_decode_bench_reuse_buf(b: &mut Bencher, &size: &usize) {
+    let mut v: Vec<u8> = Vec::with_capacity(size * 3 / 4);
+    fill(&mut v);
+    let encoded = encode(&v);
+
+    let mut buf = Vec::new();
+    b.iter(|| {
+        decode_config_buf(&encoded, TEST_CONFIG, &mut buf).unwrap();
+        black_box(&buf);
+        buf.clear();
+    });
+}
+
+fn do_decode_bench_slice(b: &mut Bencher, &size: &usize) {
+    let mut v: Vec<u8> = Vec::with_capacity(size * 3 / 4);
+    fill(&mut v);
+    let encoded = encode(&v);
+
+    let mut buf = Vec::new();
+    buf.resize(size, 0);
+    b.iter(|| {
+        decode_config_slice(&encoded, TEST_CONFIG, &mut buf).unwrap();
+        black_box(&buf);
+    });
+}
+
+fn do_decode_bench_stream(b: &mut Bencher, &size: &usize) {
+    let mut v: Vec<u8> = Vec::with_capacity(size * 3 / 4);
+    fill(&mut v);
+    let encoded = encode(&v);
+
+    let mut buf = Vec::new();
+    buf.resize(size, 0);
+    buf.truncate(0);
+
+    b.iter(|| {
+        let mut cursor = io::Cursor::new(&encoded[..]);
+        let mut decoder = base64::read::DecoderReader::new(&mut cursor, TEST_CONFIG);
+        decoder.read_to_end(&mut buf).unwrap();
+        buf.clear();
+        black_box(&buf);
+    });
+}
+
+fn do_encode_bench(b: &mut Bencher, &size: &usize) {
+    let mut v: Vec<u8> = Vec::with_capacity(size);
+    fill(&mut v);
+    b.iter(|| {
+        let e = encode(&v);
+        black_box(&e);
+    });
+}
+
+fn do_encode_bench_display(b: &mut Bencher, &size: &usize) {
+    let mut v: Vec<u8> = Vec::with_capacity(size);
+    fill(&mut v);
+    b.iter(|| {
+        let e = format!("{}", display::Base64Display::with_config(&v, TEST_CONFIG));
+        black_box(&e);
+    });
+}
+
+fn do_encode_bench_reuse_buf(b: &mut Bencher, &size: &usize) {
+    let mut v: Vec<u8> = Vec::with_capacity(size);
+    fill(&mut v);
+    let mut buf = String::new();
+    b.iter(|| {
+        encode_config_buf(&v, TEST_CONFIG, &mut buf);
+        buf.clear();
+    });
+}
+
+fn do_encode_bench_slice(b: &mut Bencher, &size: &usize) {
+    let mut v: Vec<u8> = Vec::with_capacity(size);
+    fill(&mut v);
+    let mut buf = Vec::new();
+    // conservative estimate of encoded size
+    buf.resize(v.len() * 2, 0);
+    b.iter(|| {
+        encode_config_slice(&v, TEST_CONFIG, &mut buf);
+    });
+}
+
+fn do_encode_bench_stream(b: &mut Bencher, &size: &usize) {
+    let mut v: Vec<u8> = Vec::with_capacity(size);
+    fill(&mut v);
+    let mut buf = Vec::new();
+
+    buf.reserve(size * 2);
+    b.iter(|| {
+        buf.clear();
+        let mut stream_enc = write::EncoderWriter::new(&mut buf, TEST_CONFIG);
+        stream_enc.write_all(&v).unwrap();
+        stream_enc.flush().unwrap();
+    });
+}
+
+fn do_encode_bench_string_stream(b: &mut Bencher, &size: &usize) {
+    let mut v: Vec<u8> = Vec::with_capacity(size);
+    fill(&mut v);
+
+    b.iter(|| {
+        let mut stream_enc = write::EncoderStringWriter::new(TEST_CONFIG);
+        stream_enc.write_all(&v).unwrap();
+        stream_enc.flush().unwrap();
+        let _ = stream_enc.into_inner();
+    });
+}
+
+fn do_encode_bench_string_reuse_buf_stream(b: &mut Bencher, &size: &usize) {
+    let mut v: Vec<u8> = Vec::with_capacity(size);
+    fill(&mut v);
+
+    let mut buf = String::new();
+    b.iter(|| {
+        buf.clear();
+        let mut stream_enc = write::EncoderStringWriter::from(&mut buf, TEST_CONFIG);
+        stream_enc.write_all(&v).unwrap();
+        stream_enc.flush().unwrap();
+        let _ = stream_enc.into_inner();
+    });
+}
+
+fn fill(v: &mut Vec<u8>) {
+    let cap = v.capacity();
+    // weak randomness is plenty; we just want to not be completely friendly to the branch predictor
+    let mut r = rand::rngs::SmallRng::from_entropy();
+    while v.len() < cap {
+        v.push(r.gen::<u8>());
+    }
+}
+
+const BYTE_SIZES: [usize; 5] = [3, 50, 100, 500, 3 * 1024];
+
+// Benchmarks over these byte sizes take longer so we will run fewer samples to
+// keep the benchmark runtime reasonable.
+const LARGE_BYTE_SIZES: [usize; 3] = [3 * 1024 * 1024, 10 * 1024 * 1024, 30 * 1024 * 1024];
+
+fn encode_benchmarks(byte_sizes: &[usize]) -> ParameterizedBenchmark<usize> {
+    ParameterizedBenchmark::new("encode", do_encode_bench, byte_sizes.iter().cloned())
+        .warm_up_time(std::time::Duration::from_millis(500))
+        .measurement_time(std::time::Duration::from_secs(3))
+        .throughput(|s| Throughput::Bytes(*s as u64))
+        .with_function("encode_display", do_encode_bench_display)
+        .with_function("encode_reuse_buf", do_encode_bench_reuse_buf)
+        .with_function("encode_slice", do_encode_bench_slice)
+        .with_function("encode_reuse_buf_stream", do_encode_bench_stream)
+        .with_function("encode_string_stream", do_encode_bench_string_stream)
+        .with_function(
+            "encode_string_reuse_buf_stream",
+            do_encode_bench_string_reuse_buf_stream,
+        )
+}
+
+fn decode_benchmarks(byte_sizes: &[usize]) -> ParameterizedBenchmark<usize> {
+    ParameterizedBenchmark::new("decode", do_decode_bench, byte_sizes.iter().cloned())
+        .warm_up_time(std::time::Duration::from_millis(500))
+        .measurement_time(std::time::Duration::from_secs(3))
+        .throughput(|s| Throughput::Bytes(*s as u64))
+        .with_function("decode_reuse_buf", do_decode_bench_reuse_buf)
+        .with_function("decode_slice", do_decode_bench_slice)
+        .with_function("decode_stream", do_decode_bench_stream)
+}
+
+fn bench(c: &mut Criterion) {
+    c.bench("bench_small_input", encode_benchmarks(&BYTE_SIZES[..]));
+
+    c.bench(
+        "bench_large_input",
+        encode_benchmarks(&LARGE_BYTE_SIZES[..]).sample_size(10),
+    );
+
+    c.bench("bench_small_input", decode_benchmarks(&BYTE_SIZES[..]));
+
+    c.bench(
+        "bench_large_input",
+        decode_benchmarks(&LARGE_BYTE_SIZES[..]).sample_size(10),
+    );
+}
+
+criterion_group!(benches, bench);
+criterion_main!(benches);

+ 89 - 0
zeroidc/vendor/base64-0.13.0/examples/base64.rs

@@ -0,0 +1,89 @@
+use std::fs::File;
+use std::io::{self, Read};
+use std::path::PathBuf;
+use std::process;
+use std::str::FromStr;
+
+use base64::{read, write};
+use structopt::StructOpt;
+
+#[derive(Debug, StructOpt)]
+enum CharacterSet {
+    Standard,
+    UrlSafe,
+}
+
+impl Default for CharacterSet {
+    fn default() -> Self {
+        CharacterSet::Standard
+    }
+}
+
+impl Into<base64::Config> for CharacterSet {
+    fn into(self) -> base64::Config {
+        match self {
+            CharacterSet::Standard => base64::STANDARD,
+            CharacterSet::UrlSafe => base64::URL_SAFE,
+        }
+    }
+}
+
+impl FromStr for CharacterSet {
+    type Err = String;
+    fn from_str(s: &str) -> Result<CharacterSet, String> {
+        match s {
+            "standard" => Ok(CharacterSet::Standard),
+            "urlsafe" => Ok(CharacterSet::UrlSafe),
+            _ => Err(format!("charset '{}' unrecognized", s)),
+        }
+    }
+}
+
+/// Base64 encode or decode FILE (or standard input), to standard output.
+#[derive(Debug, StructOpt)]
+struct Opt {
+    /// decode data
+    #[structopt(short = "d", long = "decode")]
+    decode: bool,
+    /// The character set to choose. Defaults to the standard base64 character set.
+    /// Supported character sets include "standard" and "urlsafe".
+    #[structopt(long = "charset")]
+    charset: Option<CharacterSet>,
+    /// The file to encode/decode.
+    #[structopt(parse(from_os_str))]
+    file: Option<PathBuf>,
+}
+
+fn main() {
+    let opt = Opt::from_args();
+    let stdin;
+    let mut input: Box<dyn Read> = match opt.file {
+        None => {
+            stdin = io::stdin();
+            Box::new(stdin.lock())
+        }
+        Some(ref f) if f.as_os_str() == "-" => {
+            stdin = io::stdin();
+            Box::new(stdin.lock())
+        }
+        Some(f) => Box::new(File::open(f).unwrap()),
+    };
+    let config = opt.charset.unwrap_or_default().into();
+    let stdout = io::stdout();
+    let mut stdout = stdout.lock();
+    let r = if opt.decode {
+        let mut decoder = read::DecoderReader::new(&mut input, config);
+        io::copy(&mut decoder, &mut stdout)
+    } else {
+        let mut encoder = write::EncoderWriter::new(&mut stdout, config);
+        io::copy(&mut input, &mut encoder)
+    };
+    if let Err(e) = r {
+        eprintln!(
+            "Base64 {} failed with {}",
+            if opt.decode { "decode" } else { "encode" },
+            e
+        );
+        process::exit(1);
+    }
+}

+ 0 - 0
zeroidc/vendor/base64/examples/make_tables.rs → zeroidc/vendor/base64-0.13.0/examples/make_tables.rs


+ 34 - 0
zeroidc/vendor/base64-0.13.0/icon_CLion.svg

@@ -0,0 +1,34 @@
+<svg id="Layer_1" data-name="Layer 1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" viewBox="0 0 128 128">
+  <defs>
+    <linearGradient id="linear-gradient" x1="40.69" y1="-676.56" x2="83.48" y2="-676.56" gradientTransform="matrix(1, 0, 0, -1, 0, -648.86)" gradientUnits="userSpaceOnUse">
+      <stop offset="0" stop-color="#ed358c"/>
+      <stop offset="0.16" stop-color="#e9388c"/>
+      <stop offset="0.3" stop-color="#de418c"/>
+      <stop offset="0.43" stop-color="#cc508c"/>
+      <stop offset="0.57" stop-color="#b2658d"/>
+      <stop offset="0.7" stop-color="#90808d"/>
+      <stop offset="0.83" stop-color="#67a18e"/>
+      <stop offset="0.95" stop-color="#37c78f"/>
+      <stop offset="1" stop-color="#22d88f"/>
+    </linearGradient>
+    <linearGradient id="linear-gradient-2" x1="32.58" y1="-665.27" x2="13.76" y2="-791.59" gradientTransform="matrix(1, 0, 0, -1, 0, -648.86)" gradientUnits="userSpaceOnUse">
+      <stop offset="0.09" stop-color="#22d88f"/>
+      <stop offset="0.9" stop-color="#029de0"/>
+    </linearGradient>
+    <linearGradient id="linear-gradient-3" x1="116.68" y1="-660.66" x2="-12.09" y2="-796.66" xlink:href="#linear-gradient-2"/>
+    <linearGradient id="linear-gradient-4" x1="73.35" y1="-739.1" x2="122.29" y2="-746.06" xlink:href="#linear-gradient-2"/>
+  </defs>
+  <title>icon_CLion</title>
+  <g>
+    <polygon points="49.2 51.8 40.6 55.4 48.4 0 77.8 16.2 49.2 51.8" fill="url(#linear-gradient)"/>
+    <polygon points="44.6 76.8 48.8 0 11.8 23.2 0 94 44.6 76.8" fill="url(#linear-gradient-2)"/>
+    <polygon points="125.4 38.4 109 4.8 77.8 16.2 55 41.4 0 94 41.6 124.4 93.6 77.2 125.4 38.4" fill="url(#linear-gradient-3)"/>
+    <polygon points="53.8 54.6 46.6 98.4 75.8 121 107.8 128 128 82.4 53.8 54.6" fill="url(#linear-gradient-4)"/>
+  </g>
+  <g>
+    <rect x="24" y="24" width="80" height="80"/>
+    <rect x="31.6" y="89" width="30" height="5" fill="#fff"/>
+    <path d="M31,51.2h0A16.83,16.83,0,0,1,48.2,34c6.2,0,10,2,13,5.2l-4.6,5.4c-2.6-2.4-5.2-3.8-8.4-3.8-5.6,0-9.6,4.6-9.6,10.4h0c0,5.6,4,10.4,9.6,10.4,3.8,0,6.2-1.6,8.8-3.8l4.6,4.6c-3.4,3.6-7.2,6-13.6,6A17,17,0,0,1,31,51.2" fill="#fff"/>
+    <path d="M66.6,34.4H74v27H88.4v6.2H66.6V34.4Z" fill="#fff"/>
+  </g>
+</svg>

+ 247 - 0
zeroidc/vendor/base64-0.13.0/src/chunked_encoder.rs

@@ -0,0 +1,247 @@
+use crate::{
+    encode::{add_padding, encode_to_slice},
+    Config,
+};
+#[cfg(any(feature = "alloc", feature = "std", test))]
+use alloc::string::String;
+use core::cmp;
+#[cfg(any(feature = "alloc", feature = "std", test))]
+use core::str;
+
+/// The output mechanism for ChunkedEncoder's encoded bytes.
+pub trait Sink {
+    type Error;
+
+    /// Handle a chunk of encoded base64 data (as UTF-8 bytes)
+    fn write_encoded_bytes(&mut self, encoded: &[u8]) -> Result<(), Self::Error>;
+}
+
+const BUF_SIZE: usize = 1024;
+
+/// A base64 encoder that emits encoded bytes in chunks without heap allocation.
+pub struct ChunkedEncoder {
+    config: Config,
+    max_input_chunk_len: usize,
+}
+
+impl ChunkedEncoder {
+    pub fn new(config: Config) -> ChunkedEncoder {
+        ChunkedEncoder {
+            config,
+            max_input_chunk_len: max_input_length(BUF_SIZE, config),
+        }
+    }
+
+    pub fn encode<S: Sink>(&self, bytes: &[u8], sink: &mut S) -> Result<(), S::Error> {
+        let mut encode_buf: [u8; BUF_SIZE] = [0; BUF_SIZE];
+        let encode_table = self.config.char_set.encode_table();
+
+        let mut input_index = 0;
+
+        while input_index < bytes.len() {
+            // either the full input chunk size, or it's the last iteration
+            let input_chunk_len = cmp::min(self.max_input_chunk_len, bytes.len() - input_index);
+
+            let chunk = &bytes[input_index..(input_index + input_chunk_len)];
+
+            let mut b64_bytes_written = encode_to_slice(chunk, &mut encode_buf, encode_table);
+
+            input_index += input_chunk_len;
+            let more_input_left = input_index < bytes.len();
+
+            if self.config.pad && !more_input_left {
+                // no more input, add padding if needed. Buffer will have room because
+                // max_input_length leaves room for it.
+                b64_bytes_written += add_padding(bytes.len(), &mut encode_buf[b64_bytes_written..]);
+            }
+
+            sink.write_encoded_bytes(&encode_buf[0..b64_bytes_written])?;
+        }
+
+        Ok(())
+    }
+}
+
+/// Calculate the longest input that can be encoded for the given output buffer size.
+///
+/// If the config requires padding, two bytes of buffer space will be set aside so that the last
+/// chunk of input can be encoded safely.
+///
+/// The input length will always be a multiple of 3 so that no encoding state has to be carried over
+/// between chunks.
+fn max_input_length(encoded_buf_len: usize, config: Config) -> usize {
+    let effective_buf_len = if config.pad {
+        // make room for padding
+        encoded_buf_len
+            .checked_sub(2)
+            .expect("Don't use a tiny buffer")
+    } else {
+        encoded_buf_len
+    };
+
+    // No padding, so just normal base64 expansion.
+    (effective_buf_len / 4) * 3
+}
+
+// A really simple sink that just appends to a string
+#[cfg(any(feature = "alloc", feature = "std", test))]
+pub(crate) struct StringSink<'a> {
+    string: &'a mut String,
+}
+
+#[cfg(any(feature = "alloc", feature = "std", test))]
+impl<'a> StringSink<'a> {
+    pub(crate) fn new(s: &mut String) -> StringSink {
+        StringSink { string: s }
+    }
+}
+
+#[cfg(any(feature = "alloc", feature = "std", test))]
+impl<'a> Sink for StringSink<'a> {
+    type Error = ();
+
+    fn write_encoded_bytes(&mut self, s: &[u8]) -> Result<(), Self::Error> {
+        self.string.push_str(str::from_utf8(s).unwrap());
+
+        Ok(())
+    }
+}
+
+#[cfg(test)]
+pub mod tests {
+    use super::*;
+    use crate::{encode_config_buf, tests::random_config, CharacterSet, STANDARD};
+
+    use rand::{
+        distributions::{Distribution, Uniform},
+        FromEntropy, Rng,
+    };
+
+    #[test]
+    fn chunked_encode_empty() {
+        assert_eq!("", chunked_encode_str(&[], STANDARD));
+    }
+
+    #[test]
+    fn chunked_encode_intermediate_fast_loop() {
+        // > 8 bytes input, will enter the pretty fast loop
+        assert_eq!(
+            "Zm9vYmFyYmF6cXV4",
+            chunked_encode_str(b"foobarbazqux", STANDARD)
+        );
+    }
+
+    #[test]
+    fn chunked_encode_fast_loop() {
+        // > 32 bytes input, will enter the uber fast loop
+        assert_eq!(
+            "Zm9vYmFyYmF6cXV4cXV1eGNvcmdlZ3JhdWx0Z2FycGx5eg==",
+            chunked_encode_str(b"foobarbazquxquuxcorgegraultgarplyz", STANDARD)
+        );
+    }
+
+    #[test]
+    fn chunked_encode_slow_loop_only() {
+        // < 8 bytes input, slow loop only
+        assert_eq!("Zm9vYmFy", chunked_encode_str(b"foobar", STANDARD));
+    }
+
+    #[test]
+    fn chunked_encode_matches_normal_encode_random_string_sink() {
+        let helper = StringSinkTestHelper;
+        chunked_encode_matches_normal_encode_random(&helper);
+    }
+
+    #[test]
+    fn max_input_length_no_pad() {
+        let config = config_with_pad(false);
+        assert_eq!(768, max_input_length(1024, config));
+    }
+
+    #[test]
+    fn max_input_length_with_pad_decrements_one_triple() {
+        let config = config_with_pad(true);
+        assert_eq!(765, max_input_length(1024, config));
+    }
+
+    #[test]
+    fn max_input_length_with_pad_one_byte_short() {
+        let config = config_with_pad(true);
+        assert_eq!(765, max_input_length(1025, config));
+    }
+
+    #[test]
+    fn max_input_length_with_pad_fits_exactly() {
+        let config = config_with_pad(true);
+        assert_eq!(768, max_input_length(1026, config));
+    }
+
+    #[test]
+    fn max_input_length_cant_use_extra_single_encoded_byte() {
+        let config = Config::new(crate::CharacterSet::Standard, false);
+        assert_eq!(300, max_input_length(401, config));
+    }
+
+    pub fn chunked_encode_matches_normal_encode_random<S: SinkTestHelper>(sink_test_helper: &S) {
+        let mut input_buf: Vec<u8> = Vec::new();
+        let mut output_buf = String::new();
+        let mut rng = rand::rngs::SmallRng::from_entropy();
+        let input_len_range = Uniform::new(1, 10_000);
+
+        for _ in 0..5_000 {
+            input_buf.clear();
+            output_buf.clear();
+
+            let buf_len = input_len_range.sample(&mut rng);
+            for _ in 0..buf_len {
+                input_buf.push(rng.gen());
+            }
+
+            let config = random_config(&mut rng);
+
+            let chunk_encoded_string = sink_test_helper.encode_to_string(config, &input_buf);
+            encode_config_buf(&input_buf, config, &mut output_buf);
+
+            assert_eq!(
+                output_buf, chunk_encoded_string,
+                "input len={}, config: pad={}",
+                buf_len, config.pad
+            );
+        }
+    }
+
+    fn chunked_encode_str(bytes: &[u8], config: Config) -> String {
+        let mut s = String::new();
+        {
+            let mut sink = StringSink::new(&mut s);
+            let encoder = ChunkedEncoder::new(config);
+            encoder.encode(bytes, &mut sink).unwrap();
+        }
+
+        return s;
+    }
+
+    fn config_with_pad(pad: bool) -> Config {
+        Config::new(CharacterSet::Standard, pad)
+    }
+
+    // An abstraction around sinks so that we can have tests that easily to any sink implementation
+    pub trait SinkTestHelper {
+        fn encode_to_string(&self, config: Config, bytes: &[u8]) -> String;
+    }
+
+    struct StringSinkTestHelper;
+
+    impl SinkTestHelper for StringSinkTestHelper {
+        fn encode_to_string(&self, config: Config, bytes: &[u8]) -> String {
+            let encoder = ChunkedEncoder::new(config);
+            let mut s = String::new();
+            {
+                let mut sink = StringSink::new(&mut s);
+                encoder.encode(bytes, &mut sink).unwrap();
+            }
+
+            s
+        }
+    }
+}

+ 873 - 0
zeroidc/vendor/base64-0.13.0/src/decode.rs

@@ -0,0 +1,873 @@
+use crate::{tables, Config, PAD_BYTE};
+
+#[cfg(any(feature = "alloc", feature = "std", test))]
+use crate::STANDARD;
+#[cfg(any(feature = "alloc", feature = "std", test))]
+use alloc::vec::Vec;
+use core::fmt;
+#[cfg(any(feature = "std", test))]
+use std::error;
+
+// decode logic operates on chunks of 8 input bytes without padding
+const INPUT_CHUNK_LEN: usize = 8;
+const DECODED_CHUNK_LEN: usize = 6;
+// we read a u64 and write a u64, but a u64 of input only yields 6 bytes of output, so the last
+// 2 bytes of any output u64 should not be counted as written to (but must be available in a
+// slice).
+const DECODED_CHUNK_SUFFIX: usize = 2;
+
+// how many u64's of input to handle at a time
+const CHUNKS_PER_FAST_LOOP_BLOCK: usize = 4;
+const INPUT_BLOCK_LEN: usize = CHUNKS_PER_FAST_LOOP_BLOCK * INPUT_CHUNK_LEN;
+// includes the trailing 2 bytes for the final u64 write
+const DECODED_BLOCK_LEN: usize =
+    CHUNKS_PER_FAST_LOOP_BLOCK * DECODED_CHUNK_LEN + DECODED_CHUNK_SUFFIX;
+
+/// Errors that can occur while decoding.
+#[derive(Clone, Debug, PartialEq, Eq)]
+pub enum DecodeError {
+    /// An invalid byte was found in the input. The offset and offending byte are provided.
+    InvalidByte(usize, u8),
+    /// The length of the input is invalid.
+    /// A typical cause of this is stray trailing whitespace or other separator bytes.
+    /// In the case where excess trailing bytes have produced an invalid length *and* the last byte
+    /// is also an invalid base64 symbol (as would be the case for whitespace, etc), `InvalidByte`
+    /// will be emitted instead of `InvalidLength` to make the issue easier to debug.
+    InvalidLength,
+    /// The last non-padding input symbol's encoded 6 bits have nonzero bits that will be discarded.
+    /// This is indicative of corrupted or truncated Base64.
+    /// Unlike InvalidByte, which reports symbols that aren't in the alphabet, this error is for
+    /// symbols that are in the alphabet but represent nonsensical encodings.
+    InvalidLastSymbol(usize, u8),
+}
+
+impl fmt::Display for DecodeError {
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+        match *self {
+            DecodeError::InvalidByte(index, byte) => {
+                write!(f, "Invalid byte {}, offset {}.", byte, index)
+            }
+            DecodeError::InvalidLength => write!(f, "Encoded text cannot have a 6-bit remainder."),
+            DecodeError::InvalidLastSymbol(index, byte) => {
+                write!(f, "Invalid last symbol {}, offset {}.", byte, index)
+            }
+        }
+    }
+}
+
+#[cfg(any(feature = "std", test))]
+impl error::Error for DecodeError {
+    fn description(&self) -> &str {
+        match *self {
+            DecodeError::InvalidByte(_, _) => "invalid byte",
+            DecodeError::InvalidLength => "invalid length",
+            DecodeError::InvalidLastSymbol(_, _) => "invalid last symbol",
+        }
+    }
+
+    fn cause(&self) -> Option<&dyn error::Error> {
+        None
+    }
+}
+
+///Decode from string reference as octets.
+///Returns a Result containing a Vec<u8>.
+///Convenience `decode_config(input, base64::STANDARD);`.
+///
+///# Example
+///
+///```rust
+///extern crate base64;
+///
+///fn main() {
+///    let bytes = base64::decode("aGVsbG8gd29ybGQ=").unwrap();
+///    println!("{:?}", bytes);
+///}
+///```
+#[cfg(any(feature = "alloc", feature = "std", test))]
+pub fn decode<T: AsRef<[u8]>>(input: T) -> Result<Vec<u8>, DecodeError> {
+    decode_config(input, STANDARD)
+}
+
+///Decode from string reference as octets.
+///Returns a Result containing a Vec<u8>.
+///
+///# Example
+///
+///```rust
+///extern crate base64;
+///
+///fn main() {
+///    let bytes = base64::decode_config("aGVsbG8gd29ybGR+Cg==", base64::STANDARD).unwrap();
+///    println!("{:?}", bytes);
+///
+///    let bytes_url = base64::decode_config("aGVsbG8gaW50ZXJuZXR-Cg==", base64::URL_SAFE).unwrap();
+///    println!("{:?}", bytes_url);
+///}
+///```
+#[cfg(any(feature = "alloc", feature = "std", test))]
+pub fn decode_config<T: AsRef<[u8]>>(input: T, config: Config) -> Result<Vec<u8>, DecodeError> {
+    let mut buffer = Vec::<u8>::with_capacity(input.as_ref().len() * 4 / 3);
+
+    decode_config_buf(input, config, &mut buffer).map(|_| buffer)
+}
+
+///Decode from string reference as octets.
+///Writes into the supplied buffer to avoid allocation.
+///Returns a Result containing an empty tuple, aka ().
+///
+///# Example
+///
+///```rust
+///extern crate base64;
+///
+///fn main() {
+///    let mut buffer = Vec::<u8>::new();
+///    base64::decode_config_buf("aGVsbG8gd29ybGR+Cg==", base64::STANDARD, &mut buffer).unwrap();
+///    println!("{:?}", buffer);
+///
+///    buffer.clear();
+///
+///    base64::decode_config_buf("aGVsbG8gaW50ZXJuZXR-Cg==", base64::URL_SAFE, &mut buffer)
+///        .unwrap();
+///    println!("{:?}", buffer);
+///}
+///```
+#[cfg(any(feature = "alloc", feature = "std", test))]
+pub fn decode_config_buf<T: AsRef<[u8]>>(
+    input: T,
+    config: Config,
+    buffer: &mut Vec<u8>,
+) -> Result<(), DecodeError> {
+    let input_bytes = input.as_ref();
+
+    let starting_output_len = buffer.len();
+
+    let num_chunks = num_chunks(input_bytes);
+    let decoded_len_estimate = num_chunks
+        .checked_mul(DECODED_CHUNK_LEN)
+        .and_then(|p| p.checked_add(starting_output_len))
+        .expect("Overflow when calculating output buffer length");
+    buffer.resize(decoded_len_estimate, 0);
+
+    let bytes_written;
+    {
+        let buffer_slice = &mut buffer.as_mut_slice()[starting_output_len..];
+        bytes_written = decode_helper(input_bytes, num_chunks, config, buffer_slice)?;
+    }
+
+    buffer.truncate(starting_output_len + bytes_written);
+
+    Ok(())
+}
+
+/// Decode the input into the provided output slice.
+///
+/// This will not write any bytes past exactly what is decoded (no stray garbage bytes at the end).
+///
+/// If you don't know ahead of time what the decoded length should be, size your buffer with a
+/// conservative estimate for the decoded length of an input: 3 bytes of output for every 4 bytes of
+/// input, rounded up, or in other words `(input_len + 3) / 4 * 3`.
+///
+/// If the slice is not large enough, this will panic.
+pub fn decode_config_slice<T: AsRef<[u8]>>(
+    input: T,
+    config: Config,
+    output: &mut [u8],
+) -> Result<usize, DecodeError> {
+    let input_bytes = input.as_ref();
+
+    decode_helper(input_bytes, num_chunks(input_bytes), config, output)
+}
+
+/// Return the number of input chunks (including a possibly partial final chunk) in the input
+fn num_chunks(input: &[u8]) -> usize {
+    input
+        .len()
+        .checked_add(INPUT_CHUNK_LEN - 1)
+        .expect("Overflow when calculating number of chunks in input")
+        / INPUT_CHUNK_LEN
+}
+
+/// Helper to avoid duplicating num_chunks calculation, which is costly on short inputs.
+/// Returns the number of bytes written, or an error.
+// We're on the fragile edge of compiler heuristics here. If this is not inlined, slow. If this is
+// inlined(always), a different slow. plain ol' inline makes the benchmarks happiest at the moment,
+// but this is fragile and the best setting changes with only minor code modifications.
+#[inline]
+fn decode_helper(
+    input: &[u8],
+    num_chunks: usize,
+    config: Config,
+    output: &mut [u8],
+) -> Result<usize, DecodeError> {
+    let char_set = config.char_set;
+    let decode_table = char_set.decode_table();
+
+    let remainder_len = input.len() % INPUT_CHUNK_LEN;
+
+    // Because the fast decode loop writes in groups of 8 bytes (unrolled to
+    // CHUNKS_PER_FAST_LOOP_BLOCK times 8 bytes, where possible) and outputs 8 bytes at a time (of
+    // which only 6 are valid data), we need to be sure that we stop using the fast decode loop
+    // soon enough that there will always be 2 more bytes of valid data written after that loop.
+    let trailing_bytes_to_skip = match remainder_len {
+        // if input is a multiple of the chunk size, ignore the last chunk as it may have padding,
+        // and the fast decode logic cannot handle padding
+        0 => INPUT_CHUNK_LEN,
+        // 1 and 5 trailing bytes are illegal: can't decode 6 bits of input into a byte
+        1 | 5 => {
+            // trailing whitespace is so common that it's worth it to check the last byte to
+            // possibly return a better error message
+            if let Some(b) = input.last() {
+                if *b != PAD_BYTE && decode_table[*b as usize] == tables::INVALID_VALUE {
+                    return Err(DecodeError::InvalidByte(input.len() - 1, *b));
+                }
+            }
+
+            return Err(DecodeError::InvalidLength);
+        }
+        // This will decode to one output byte, which isn't enough to overwrite the 2 extra bytes
+        // written by the fast decode loop. So, we have to ignore both these 2 bytes and the
+        // previous chunk.
+        2 => INPUT_CHUNK_LEN + 2,
+        // If this is 3 unpadded chars, then it would actually decode to 2 bytes. However, if this
+        // is an erroneous 2 chars + 1 pad char that would decode to 1 byte, then it should fail
+        // with an error, not panic from going past the bounds of the output slice, so we let it
+        // use stage 3 + 4.
+        3 => INPUT_CHUNK_LEN + 3,
+        // This can also decode to one output byte because it may be 2 input chars + 2 padding
+        // chars, which would decode to 1 byte.
+        4 => INPUT_CHUNK_LEN + 4,
+        // Everything else is a legal decode len (given that we don't require padding), and will
+        // decode to at least 2 bytes of output.
+        _ => remainder_len,
+    };
+
+    // rounded up to include partial chunks
+    let mut remaining_chunks = num_chunks;
+
+    let mut input_index = 0;
+    let mut output_index = 0;
+
+    {
+        let length_of_fast_decode_chunks = input.len().saturating_sub(trailing_bytes_to_skip);
+
+        // Fast loop, stage 1
+        // manual unroll to CHUNKS_PER_FAST_LOOP_BLOCK of u64s to amortize slice bounds checks
+        if let Some(max_start_index) = length_of_fast_decode_chunks.checked_sub(INPUT_BLOCK_LEN) {
+            while input_index <= max_start_index {
+                let input_slice = &input[input_index..(input_index + INPUT_BLOCK_LEN)];
+                let output_slice = &mut output[output_index..(output_index + DECODED_BLOCK_LEN)];
+
+                decode_chunk(
+                    &input_slice[0..],
+                    input_index,
+                    decode_table,
+                    &mut output_slice[0..],
+                )?;
+                decode_chunk(
+                    &input_slice[8..],
+                    input_index + 8,
+                    decode_table,
+                    &mut output_slice[6..],
+                )?;
+                decode_chunk(
+                    &input_slice[16..],
+                    input_index + 16,
+                    decode_table,
+                    &mut output_slice[12..],
+                )?;
+                decode_chunk(
+                    &input_slice[24..],
+                    input_index + 24,
+                    decode_table,
+                    &mut output_slice[18..],
+                )?;
+
+                input_index += INPUT_BLOCK_LEN;
+                output_index += DECODED_BLOCK_LEN - DECODED_CHUNK_SUFFIX;
+                remaining_chunks -= CHUNKS_PER_FAST_LOOP_BLOCK;
+            }
+        }
+
+        // Fast loop, stage 2 (aka still pretty fast loop)
+        // 8 bytes at a time for whatever we didn't do in stage 1.
+        if let Some(max_start_index) = length_of_fast_decode_chunks.checked_sub(INPUT_CHUNK_LEN) {
+            while input_index < max_start_index {
+                decode_chunk(
+                    &input[input_index..(input_index + INPUT_CHUNK_LEN)],
+                    input_index,
+                    decode_table,
+                    &mut output
+                        [output_index..(output_index + DECODED_CHUNK_LEN + DECODED_CHUNK_SUFFIX)],
+                )?;
+
+                output_index += DECODED_CHUNK_LEN;
+                input_index += INPUT_CHUNK_LEN;
+                remaining_chunks -= 1;
+            }
+        }
+    }
+
+    // Stage 3
+    // If input length was such that a chunk had to be deferred until after the fast loop
+    // because decoding it would have produced 2 trailing bytes that wouldn't then be
+    // overwritten, we decode that chunk here. This way is slower but doesn't write the 2
+    // trailing bytes.
+    // However, we still need to avoid the last chunk (partial or complete) because it could
+    // have padding, so we always do 1 fewer to avoid the last chunk.
+    for _ in 1..remaining_chunks {
+        decode_chunk_precise(
+            &input[input_index..],
+            input_index,
+            decode_table,
+            &mut output[output_index..(output_index + DECODED_CHUNK_LEN)],
+        )?;
+
+        input_index += INPUT_CHUNK_LEN;
+        output_index += DECODED_CHUNK_LEN;
+    }
+
+    // always have one more (possibly partial) block of 8 input
+    debug_assert!(input.len() - input_index > 1 || input.is_empty());
+    debug_assert!(input.len() - input_index <= 8);
+
+    // Stage 4
+    // Finally, decode any leftovers that aren't a complete input block of 8 bytes.
+    // Use a u64 as a stack-resident 8 byte buffer.
+    let mut leftover_bits: u64 = 0;
+    let mut morsels_in_leftover = 0;
+    let mut padding_bytes = 0;
+    let mut first_padding_index: usize = 0;
+    let mut last_symbol = 0_u8;
+    let start_of_leftovers = input_index;
+    for (i, b) in input[start_of_leftovers..].iter().enumerate() {
+        // '=' padding
+        if *b == PAD_BYTE {
+            // There can be bad padding in a few ways:
+            // 1 - Padding with non-padding characters after it
+            // 2 - Padding after zero or one non-padding characters before it
+            //     in the current quad.
+            // 3 - More than two characters of padding. If 3 or 4 padding chars
+            //     are in the same quad, that implies it will be caught by #2.
+            //     If it spreads from one quad to another, it will be caught by
+            //     #2 in the second quad.
+
+            if i % 4 < 2 {
+                // Check for case #2.
+                let bad_padding_index = start_of_leftovers
+                    + if padding_bytes > 0 {
+                        // If we've already seen padding, report the first padding index.
+                        // This is to be consistent with the faster logic above: it will report an
+                        // error on the first padding character (since it doesn't expect to see
+                        // anything but actual encoded data).
+                        first_padding_index
+                    } else {
+                        // haven't seen padding before, just use where we are now
+                        i
+                    };
+                return Err(DecodeError::InvalidByte(bad_padding_index, *b));
+            }
+
+            if padding_bytes == 0 {
+                first_padding_index = i;
+            }
+
+            padding_bytes += 1;
+            continue;
+        }
+
+        // Check for case #1.
+        // To make '=' handling consistent with the main loop, don't allow
+        // non-suffix '=' in trailing chunk either. Report error as first
+        // erroneous padding.
+        if padding_bytes > 0 {
+            return Err(DecodeError::InvalidByte(
+                start_of_leftovers + first_padding_index,
+                PAD_BYTE,
+            ));
+        }
+        last_symbol = *b;
+
+        // can use up to 8 * 6 = 48 bits of the u64, if last chunk has no padding.
+        // To minimize shifts, pack the leftovers from left to right.
+        let shift = 64 - (morsels_in_leftover + 1) * 6;
+        // tables are all 256 elements, lookup with a u8 index always succeeds
+        let morsel = decode_table[*b as usize];
+        if morsel == tables::INVALID_VALUE {
+            return Err(DecodeError::InvalidByte(start_of_leftovers + i, *b));
+        }
+
+        leftover_bits |= (morsel as u64) << shift;
+        morsels_in_leftover += 1;
+    }
+
+    let leftover_bits_ready_to_append = match morsels_in_leftover {
+        0 => 0,
+        2 => 8,
+        3 => 16,
+        4 => 24,
+        6 => 32,
+        7 => 40,
+        8 => 48,
+        _ => unreachable!(
+            "Impossible: must only have 0 to 8 input bytes in last chunk, with no invalid lengths"
+        ),
+    };
+
+    // if there are bits set outside the bits we care about, last symbol encodes trailing bits that
+    // will not be included in the output
+    let mask = !0 >> leftover_bits_ready_to_append;
+    if !config.decode_allow_trailing_bits && (leftover_bits & mask) != 0 {
+        // last morsel is at `morsels_in_leftover` - 1
+        return Err(DecodeError::InvalidLastSymbol(
+            start_of_leftovers + morsels_in_leftover - 1,
+            last_symbol,
+        ));
+    }
+
+    let mut leftover_bits_appended_to_buf = 0;
+    while leftover_bits_appended_to_buf < leftover_bits_ready_to_append {
+        // `as` simply truncates the higher bits, which is what we want here
+        let selected_bits = (leftover_bits >> (56 - leftover_bits_appended_to_buf)) as u8;
+        output[output_index] = selected_bits;
+        output_index += 1;
+
+        leftover_bits_appended_to_buf += 8;
+    }
+
+    Ok(output_index)
+}
+
+#[inline]
+fn write_u64(output: &mut [u8], value: u64) {
+    output[..8].copy_from_slice(&value.to_be_bytes());
+}
+
+/// Decode 8 bytes of input into 6 bytes of output. 8 bytes of output will be written, but only the
+/// first 6 of those contain meaningful data.
+///
+/// `input` is the bytes to decode, of which the first 8 bytes will be processed.
+/// `index_at_start_of_input` is the offset in the overall input (used for reporting errors
+/// accurately)
+/// `decode_table` is the lookup table for the particular base64 alphabet.
+/// `output` will have its first 8 bytes overwritten, of which only the first 6 are valid decoded
+/// data.
+// yes, really inline (worth 30-50% speedup)
+#[inline(always)]
+fn decode_chunk(
+    input: &[u8],
+    index_at_start_of_input: usize,
+    decode_table: &[u8; 256],
+    output: &mut [u8],
+) -> Result<(), DecodeError> {
+    let mut accum: u64;
+
+    let morsel = decode_table[input[0] as usize];
+    if morsel == tables::INVALID_VALUE {
+        return Err(DecodeError::InvalidByte(index_at_start_of_input, input[0]));
+    }
+    accum = (morsel as u64) << 58;
+
+    let morsel = decode_table[input[1] as usize];
+    if morsel == tables::INVALID_VALUE {
+        return Err(DecodeError::InvalidByte(
+            index_at_start_of_input + 1,
+            input[1],
+        ));
+    }
+    accum |= (morsel as u64) << 52;
+
+    let morsel = decode_table[input[2] as usize];
+    if morsel == tables::INVALID_VALUE {
+        return Err(DecodeError::InvalidByte(
+            index_at_start_of_input + 2,
+            input[2],
+        ));
+    }
+    accum |= (morsel as u64) << 46;
+
+    let morsel = decode_table[input[3] as usize];
+    if morsel == tables::INVALID_VALUE {
+        return Err(DecodeError::InvalidByte(
+            index_at_start_of_input + 3,
+            input[3],
+        ));
+    }
+    accum |= (morsel as u64) << 40;
+
+    let morsel = decode_table[input[4] as usize];
+    if morsel == tables::INVALID_VALUE {
+        return Err(DecodeError::InvalidByte(
+            index_at_start_of_input + 4,
+            input[4],
+        ));
+    }
+    accum |= (morsel as u64) << 34;
+
+    let morsel = decode_table[input[5] as usize];
+    if morsel == tables::INVALID_VALUE {
+        return Err(DecodeError::InvalidByte(
+            index_at_start_of_input + 5,
+            input[5],
+        ));
+    }
+    accum |= (morsel as u64) << 28;
+
+    let morsel = decode_table[input[6] as usize];
+    if morsel == tables::INVALID_VALUE {
+        return Err(DecodeError::InvalidByte(
+            index_at_start_of_input + 6,
+            input[6],
+        ));
+    }
+    accum |= (morsel as u64) << 22;
+
+    let morsel = decode_table[input[7] as usize];
+    if morsel == tables::INVALID_VALUE {
+        return Err(DecodeError::InvalidByte(
+            index_at_start_of_input + 7,
+            input[7],
+        ));
+    }
+    accum |= (morsel as u64) << 16;
+
+    write_u64(output, accum);
+
+    Ok(())
+}
+
+/// Decode an 8-byte chunk, but only write the 6 bytes actually decoded instead of including 2
+/// trailing garbage bytes.
+#[inline]
+fn decode_chunk_precise(
+    input: &[u8],
+    index_at_start_of_input: usize,
+    decode_table: &[u8; 256],
+    output: &mut [u8],
+) -> Result<(), DecodeError> {
+    let mut tmp_buf = [0_u8; 8];
+
+    decode_chunk(
+        input,
+        index_at_start_of_input,
+        decode_table,
+        &mut tmp_buf[..],
+    )?;
+
+    output[0..6].copy_from_slice(&tmp_buf[0..6]);
+
+    Ok(())
+}
+
+#[cfg(test)]
+mod tests {
+    use super::*;
+    use crate::{
+        encode::encode_config_buf,
+        encode::encode_config_slice,
+        tests::{assert_encode_sanity, random_config},
+    };
+
+    use rand::{
+        distributions::{Distribution, Uniform},
+        FromEntropy, Rng,
+    };
+
+    #[test]
+    fn decode_chunk_precise_writes_only_6_bytes() {
+        let input = b"Zm9vYmFy"; // "foobar"
+        let mut output = [0_u8, 1, 2, 3, 4, 5, 6, 7];
+        decode_chunk_precise(&input[..], 0, tables::STANDARD_DECODE, &mut output).unwrap();
+        assert_eq!(&vec![b'f', b'o', b'o', b'b', b'a', b'r', 6, 7], &output);
+    }
+
+    #[test]
+    fn decode_chunk_writes_8_bytes() {
+        let input = b"Zm9vYmFy"; // "foobar"
+        let mut output = [0_u8, 1, 2, 3, 4, 5, 6, 7];
+        decode_chunk(&input[..], 0, tables::STANDARD_DECODE, &mut output).unwrap();
+        assert_eq!(&vec![b'f', b'o', b'o', b'b', b'a', b'r', 0, 0], &output);
+    }
+
+    #[test]
+    fn decode_into_nonempty_vec_doesnt_clobber_existing_prefix() {
+        let mut orig_data = Vec::new();
+        let mut encoded_data = String::new();
+        let mut decoded_with_prefix = Vec::new();
+        let mut decoded_without_prefix = Vec::new();
+        let mut prefix = Vec::new();
+
+        let prefix_len_range = Uniform::new(0, 1000);
+        let input_len_range = Uniform::new(0, 1000);
+
+        let mut rng = rand::rngs::SmallRng::from_entropy();
+
+        for _ in 0..10_000 {
+            orig_data.clear();
+            encoded_data.clear();
+            decoded_with_prefix.clear();
+            decoded_without_prefix.clear();
+            prefix.clear();
+
+            let input_len = input_len_range.sample(&mut rng);
+
+            for _ in 0..input_len {
+                orig_data.push(rng.gen());
+            }
+
+            let config = random_config(&mut rng);
+            encode_config_buf(&orig_data, config, &mut encoded_data);
+            assert_encode_sanity(&encoded_data, config, input_len);
+
+            let prefix_len = prefix_len_range.sample(&mut rng);
+
+            // fill the buf with a prefix
+            for _ in 0..prefix_len {
+                prefix.push(rng.gen());
+            }
+
+            decoded_with_prefix.resize(prefix_len, 0);
+            decoded_with_prefix.copy_from_slice(&prefix);
+
+            // decode into the non-empty buf
+            decode_config_buf(&encoded_data, config, &mut decoded_with_prefix).unwrap();
+            // also decode into the empty buf
+            decode_config_buf(&encoded_data, config, &mut decoded_without_prefix).unwrap();
+
+            assert_eq!(
+                prefix_len + decoded_without_prefix.len(),
+                decoded_with_prefix.len()
+            );
+            assert_eq!(orig_data, decoded_without_prefix);
+
+            // append plain decode onto prefix
+            prefix.append(&mut decoded_without_prefix);
+
+            assert_eq!(prefix, decoded_with_prefix);
+        }
+    }
+
+    #[test]
+    fn decode_into_slice_doesnt_clobber_existing_prefix_or_suffix() {
+        let mut orig_data = Vec::new();
+        let mut encoded_data = String::new();
+        let mut decode_buf = Vec::new();
+        let mut decode_buf_copy: Vec<u8> = Vec::new();
+
+        let input_len_range = Uniform::new(0, 1000);
+
+        let mut rng = rand::rngs::SmallRng::from_entropy();
+
+        for _ in 0..10_000 {
+            orig_data.clear();
+            encoded_data.clear();
+            decode_buf.clear();
+            decode_buf_copy.clear();
+
+            let input_len = input_len_range.sample(&mut rng);
+
+            for _ in 0..input_len {
+                orig_data.push(rng.gen());
+            }
+
+            let config = random_config(&mut rng);
+            encode_config_buf(&orig_data, config, &mut encoded_data);
+            assert_encode_sanity(&encoded_data, config, input_len);
+
+            // fill the buffer with random garbage, long enough to have some room before and after
+            for _ in 0..5000 {
+                decode_buf.push(rng.gen());
+            }
+
+            // keep a copy for later comparison
+            decode_buf_copy.extend(decode_buf.iter());
+
+            let offset = 1000;
+
+            // decode into the non-empty buf
+            let decode_bytes_written =
+                decode_config_slice(&encoded_data, config, &mut decode_buf[offset..]).unwrap();
+
+            assert_eq!(orig_data.len(), decode_bytes_written);
+            assert_eq!(
+                orig_data,
+                &decode_buf[offset..(offset + decode_bytes_written)]
+            );
+            assert_eq!(&decode_buf_copy[0..offset], &decode_buf[0..offset]);
+            assert_eq!(
+                &decode_buf_copy[offset + decode_bytes_written..],
+                &decode_buf[offset + decode_bytes_written..]
+            );
+        }
+    }
+
+    #[test]
+    fn decode_into_slice_fits_in_precisely_sized_slice() {
+        let mut orig_data = Vec::new();
+        let mut encoded_data = String::new();
+        let mut decode_buf = Vec::new();
+
+        let input_len_range = Uniform::new(0, 1000);
+
+        let mut rng = rand::rngs::SmallRng::from_entropy();
+
+        for _ in 0..10_000 {
+            orig_data.clear();
+            encoded_data.clear();
+            decode_buf.clear();
+
+            let input_len = input_len_range.sample(&mut rng);
+
+            for _ in 0..input_len {
+                orig_data.push(rng.gen());
+            }
+
+            let config = random_config(&mut rng);
+            encode_config_buf(&orig_data, config, &mut encoded_data);
+            assert_encode_sanity(&encoded_data, config, input_len);
+
+            decode_buf.resize(input_len, 0);
+
+            // decode into the non-empty buf
+            let decode_bytes_written =
+                decode_config_slice(&encoded_data, config, &mut decode_buf[..]).unwrap();
+
+            assert_eq!(orig_data.len(), decode_bytes_written);
+            assert_eq!(orig_data, decode_buf);
+        }
+    }
+
+    #[test]
+    fn detect_invalid_last_symbol_two_bytes() {
+        let decode =
+            |input, forgiving| decode_config(input, STANDARD.decode_allow_trailing_bits(forgiving));
+
+        // example from https://github.com/marshallpierce/rust-base64/issues/75
+        assert!(decode("iYU=", false).is_ok());
+        // trailing 01
+        assert_eq!(
+            Err(DecodeError::InvalidLastSymbol(2, b'V')),
+            decode("iYV=", false)
+        );
+        assert_eq!(Ok(vec![137, 133]), decode("iYV=", true));
+        // trailing 10
+        assert_eq!(
+            Err(DecodeError::InvalidLastSymbol(2, b'W')),
+            decode("iYW=", false)
+        );
+        assert_eq!(Ok(vec![137, 133]), decode("iYV=", true));
+        // trailing 11
+        assert_eq!(
+            Err(DecodeError::InvalidLastSymbol(2, b'X')),
+            decode("iYX=", false)
+        );
+        assert_eq!(Ok(vec![137, 133]), decode("iYV=", true));
+
+        // also works when there are 2 quads in the last block
+        assert_eq!(
+            Err(DecodeError::InvalidLastSymbol(6, b'X')),
+            decode("AAAAiYX=", false)
+        );
+        assert_eq!(Ok(vec![0, 0, 0, 137, 133]), decode("AAAAiYX=", true));
+    }
+
+    #[test]
+    fn detect_invalid_last_symbol_one_byte() {
+        // 0xFF -> "/w==", so all letters > w, 0-9, and '+', '/' should get InvalidLastSymbol
+
+        assert!(decode("/w==").is_ok());
+        // trailing 01
+        assert_eq!(Err(DecodeError::InvalidLastSymbol(1, b'x')), decode("/x=="));
+        assert_eq!(Err(DecodeError::InvalidLastSymbol(1, b'z')), decode("/z=="));
+        assert_eq!(Err(DecodeError::InvalidLastSymbol(1, b'0')), decode("/0=="));
+        assert_eq!(Err(DecodeError::InvalidLastSymbol(1, b'9')), decode("/9=="));
+        assert_eq!(Err(DecodeError::InvalidLastSymbol(1, b'+')), decode("/+=="));
+        assert_eq!(Err(DecodeError::InvalidLastSymbol(1, b'/')), decode("//=="));
+
+        // also works when there are 2 quads in the last block
+        assert_eq!(
+            Err(DecodeError::InvalidLastSymbol(5, b'x')),
+            decode("AAAA/x==")
+        );
+    }
+
+    #[test]
+    fn detect_invalid_last_symbol_every_possible_three_symbols() {
+        let mut base64_to_bytes = ::std::collections::HashMap::new();
+
+        let mut bytes = [0_u8; 2];
+        for b1 in 0_u16..256 {
+            bytes[0] = b1 as u8;
+            for b2 in 0_u16..256 {
+                bytes[1] = b2 as u8;
+                let mut b64 = vec![0_u8; 4];
+                assert_eq!(4, encode_config_slice(&bytes, STANDARD, &mut b64[..]));
+                let mut v = ::std::vec::Vec::with_capacity(2);
+                v.extend_from_slice(&bytes[..]);
+
+                assert!(base64_to_bytes.insert(b64, v).is_none());
+            }
+        }
+
+        // every possible combination of symbols must either decode to 2 bytes or get InvalidLastSymbol
+
+        let mut symbols = [0_u8; 4];
+        for &s1 in STANDARD.char_set.encode_table().iter() {
+            symbols[0] = s1;
+            for &s2 in STANDARD.char_set.encode_table().iter() {
+                symbols[1] = s2;
+                for &s3 in STANDARD.char_set.encode_table().iter() {
+                    symbols[2] = s3;
+                    symbols[3] = PAD_BYTE;
+
+                    match base64_to_bytes.get(&symbols[..]) {
+                        Some(bytes) => {
+                            assert_eq!(Ok(bytes.to_vec()), decode_config(&symbols, STANDARD))
+                        }
+                        None => assert_eq!(
+                            Err(DecodeError::InvalidLastSymbol(2, s3)),
+                            decode_config(&symbols[..], STANDARD)
+                        ),
+                    }
+                }
+            }
+        }
+    }
+
+    #[test]
+    fn detect_invalid_last_symbol_every_possible_two_symbols() {
+        let mut base64_to_bytes = ::std::collections::HashMap::new();
+
+        for b in 0_u16..256 {
+            let mut b64 = vec![0_u8; 4];
+            assert_eq!(4, encode_config_slice(&[b as u8], STANDARD, &mut b64[..]));
+            let mut v = ::std::vec::Vec::with_capacity(1);
+            v.push(b as u8);
+
+            assert!(base64_to_bytes.insert(b64, v).is_none());
+        }
+
+        // every possible combination of symbols must either decode to 1 byte or get InvalidLastSymbol
+
+        let mut symbols = [0_u8; 4];
+        for &s1 in STANDARD.char_set.encode_table().iter() {
+            symbols[0] = s1;
+            for &s2 in STANDARD.char_set.encode_table().iter() {
+                symbols[1] = s2;
+                symbols[2] = PAD_BYTE;
+                symbols[3] = PAD_BYTE;
+
+                match base64_to_bytes.get(&symbols[..]) {
+                    Some(bytes) => {
+                        assert_eq!(Ok(bytes.to_vec()), decode_config(&symbols, STANDARD))
+                    }
+                    None => assert_eq!(
+                        Err(DecodeError::InvalidLastSymbol(1, s2)),
+                        decode_config(&symbols[..], STANDARD)
+                    ),
+                }
+            }
+        }
+    }
+}

+ 88 - 0
zeroidc/vendor/base64-0.13.0/src/display.rs

@@ -0,0 +1,88 @@
+//! Enables base64'd output anywhere you might use a `Display` implementation, like a format string.
+//!
+//! ```
+//! use base64::display::Base64Display;
+//!
+//! let data = vec![0x0, 0x1, 0x2, 0x3];
+//! let wrapper = Base64Display::with_config(&data, base64::STANDARD);
+//!
+//! assert_eq!("base64: AAECAw==", format!("base64: {}", wrapper));
+//! ```
+
+use super::chunked_encoder::ChunkedEncoder;
+use super::Config;
+use core::fmt::{Display, Formatter};
+use core::{fmt, str};
+
+/// A convenience wrapper for base64'ing bytes into a format string without heap allocation.
+pub struct Base64Display<'a> {
+    bytes: &'a [u8],
+    chunked_encoder: ChunkedEncoder,
+}
+
+impl<'a> Base64Display<'a> {
+    /// Create a `Base64Display` with the provided config.
+    pub fn with_config(bytes: &[u8], config: Config) -> Base64Display {
+        Base64Display {
+            bytes,
+            chunked_encoder: ChunkedEncoder::new(config),
+        }
+    }
+}
+
+impl<'a> Display for Base64Display<'a> {
+    fn fmt(&self, formatter: &mut Formatter) -> Result<(), fmt::Error> {
+        let mut sink = FormatterSink { f: formatter };
+        self.chunked_encoder.encode(self.bytes, &mut sink)
+    }
+}
+
+struct FormatterSink<'a, 'b: 'a> {
+    f: &'a mut Formatter<'b>,
+}
+
+impl<'a, 'b: 'a> super::chunked_encoder::Sink for FormatterSink<'a, 'b> {
+    type Error = fmt::Error;
+
+    fn write_encoded_bytes(&mut self, encoded: &[u8]) -> Result<(), Self::Error> {
+        // Avoid unsafe. If max performance is needed, write your own display wrapper that uses
+        // unsafe here to gain about 10-15%.
+        self.f
+            .write_str(str::from_utf8(encoded).expect("base64 data was not utf8"))
+    }
+}
+
+#[cfg(test)]
+mod tests {
+    use super::super::chunked_encoder::tests::{
+        chunked_encode_matches_normal_encode_random, SinkTestHelper,
+    };
+    use super::super::*;
+    use super::*;
+
+    #[test]
+    fn basic_display() {
+        assert_eq!(
+            "~$Zm9vYmFy#*",
+            format!("~${}#*", Base64Display::with_config(b"foobar", STANDARD))
+        );
+        assert_eq!(
+            "~$Zm9vYmFyZg==#*",
+            format!("~${}#*", Base64Display::with_config(b"foobarf", STANDARD))
+        );
+    }
+
+    #[test]
+    fn display_encode_matches_normal_encode() {
+        let helper = DisplaySinkTestHelper;
+        chunked_encode_matches_normal_encode_random(&helper);
+    }
+
+    struct DisplaySinkTestHelper;
+
+    impl SinkTestHelper for DisplaySinkTestHelper {
+        fn encode_to_string(&self, config: Config, bytes: &[u8]) -> String {
+            format!("{}", Base64Display::with_config(bytes, config))
+        }
+    }
+}

+ 675 - 0
zeroidc/vendor/base64-0.13.0/src/encode.rs

@@ -0,0 +1,675 @@
+use crate::{Config, PAD_BYTE};
+#[cfg(any(feature = "alloc", feature = "std", test))]
+use crate::{chunked_encoder, STANDARD};
+#[cfg(any(feature = "alloc", feature = "std", test))]
+use alloc::{string::String, vec};
+use core::convert::TryInto;
+
+///Encode arbitrary octets as base64.
+///Returns a String.
+///Convenience for `encode_config(input, base64::STANDARD);`.
+///
+///# Example
+///
+///```rust
+///extern crate base64;
+///
+///fn main() {
+///    let b64 = base64::encode(b"hello world");
+///    println!("{}", b64);
+///}
+///```
+#[cfg(any(feature = "alloc", feature = "std", test))]
+pub fn encode<T: AsRef<[u8]>>(input: T) -> String {
+    encode_config(input, STANDARD)
+}
+
+///Encode arbitrary octets as base64.
+///Returns a String.
+///
+///# Example
+///
+///```rust
+///extern crate base64;
+///
+///fn main() {
+///    let b64 = base64::encode_config(b"hello world~", base64::STANDARD);
+///    println!("{}", b64);
+///
+///    let b64_url = base64::encode_config(b"hello internet~", base64::URL_SAFE);
+///    println!("{}", b64_url);
+///}
+///```
+#[cfg(any(feature = "alloc", feature = "std", test))]
+pub fn encode_config<T: AsRef<[u8]>>(input: T, config: Config) -> String {
+    let mut buf = match encoded_size(input.as_ref().len(), config) {
+        Some(n) => vec![0; n],
+        None => panic!("integer overflow when calculating buffer size"),
+    };
+
+    encode_with_padding(input.as_ref(), config, buf.len(), &mut buf[..]);
+
+    String::from_utf8(buf).expect("Invalid UTF8")
+}
+
+///Encode arbitrary octets as base64.
+///Writes into the supplied output buffer, which will grow the buffer if needed.
+///
+///# Example
+///
+///```rust
+///extern crate base64;
+///
+///fn main() {
+///    let mut buf = String::new();
+///    base64::encode_config_buf(b"hello world~", base64::STANDARD, &mut buf);
+///    println!("{}", buf);
+///
+///    buf.clear();
+///    base64::encode_config_buf(b"hello internet~", base64::URL_SAFE, &mut buf);
+///    println!("{}", buf);
+///}
+///```
+#[cfg(any(feature = "alloc", feature = "std", test))]
+pub fn encode_config_buf<T: AsRef<[u8]>>(input: T, config: Config, buf: &mut String) {
+    let input_bytes = input.as_ref();
+
+    {
+        let mut sink = chunked_encoder::StringSink::new(buf);
+        let encoder = chunked_encoder::ChunkedEncoder::new(config);
+
+        encoder
+            .encode(input_bytes, &mut sink)
+            .expect("Writing to a String shouldn't fail")
+    }
+}
+
+/// Encode arbitrary octets as base64.
+/// Writes into the supplied output buffer.
+///
+/// This is useful if you wish to avoid allocation entirely (e.g. encoding into a stack-resident
+/// or statically-allocated buffer).
+///
+/// # Panics
+///
+/// If `output` is too small to hold the encoded version of `input`, a panic will result.
+///
+/// # Example
+///
+/// ```rust
+/// extern crate base64;
+///
+/// fn main() {
+///     let s = b"hello internet!";
+///     let mut buf = Vec::new();
+///     // make sure we'll have a slice big enough for base64 + padding
+///     buf.resize(s.len() * 4 / 3 + 4, 0);
+///
+///     let bytes_written = base64::encode_config_slice(s,
+///                             base64::STANDARD, &mut buf);
+///
+///     // shorten our vec down to just what was written
+///     buf.resize(bytes_written, 0);
+///
+///     assert_eq!(s, base64::decode(&buf).unwrap().as_slice());
+/// }
+/// ```
+pub fn encode_config_slice<T: AsRef<[u8]>>(input: T, config: Config, output: &mut [u8]) -> usize {
+    let input_bytes = input.as_ref();
+
+    let encoded_size = encoded_size(input_bytes.len(), config)
+        .expect("usize overflow when calculating buffer size");
+
+    let mut b64_output = &mut output[0..encoded_size];
+
+    encode_with_padding(&input_bytes, config, encoded_size, &mut b64_output);
+
+    encoded_size
+}
+
+/// B64-encode and pad (if configured).
+///
+/// This helper exists to avoid recalculating encoded_size, which is relatively expensive on short
+/// inputs.
+///
+/// `encoded_size` is the encoded size calculated for `input`.
+///
+/// `output` must be of size `encoded_size`.
+///
+/// All bytes in `output` will be written to since it is exactly the size of the output.
+fn encode_with_padding(input: &[u8], config: Config, encoded_size: usize, output: &mut [u8]) {
+    debug_assert_eq!(encoded_size, output.len());
+
+    let b64_bytes_written = encode_to_slice(input, output, config.char_set.encode_table());
+
+    let padding_bytes = if config.pad {
+        add_padding(input.len(), &mut output[b64_bytes_written..])
+    } else {
+        0
+    };
+
+    let encoded_bytes = b64_bytes_written
+        .checked_add(padding_bytes)
+        .expect("usize overflow when calculating b64 length");
+
+    debug_assert_eq!(encoded_size, encoded_bytes);
+}
+
+#[inline]
+fn read_u64(s: &[u8]) -> u64 {
+    u64::from_be_bytes(s[..8].try_into().unwrap())
+}
+
+/// Encode input bytes to utf8 base64 bytes. Does not pad.
+/// `output` must be long enough to hold the encoded `input` without padding.
+/// Returns the number of bytes written.
+#[inline]
+pub fn encode_to_slice(input: &[u8], output: &mut [u8], encode_table: &[u8; 64]) -> usize {
+    let mut input_index: usize = 0;
+
+    const BLOCKS_PER_FAST_LOOP: usize = 4;
+    const LOW_SIX_BITS: u64 = 0x3F;
+
+    // we read 8 bytes at a time (u64) but only actually consume 6 of those bytes. Thus, we need
+    // 2 trailing bytes to be available to read..
+    let last_fast_index = input.len().saturating_sub(BLOCKS_PER_FAST_LOOP * 6 + 2);
+    let mut output_index = 0;
+
+    if last_fast_index > 0 {
+        while input_index <= last_fast_index {
+            // Major performance wins from letting the optimizer do the bounds check once, mostly
+            // on the output side
+            let input_chunk = &input[input_index..(input_index + (BLOCKS_PER_FAST_LOOP * 6 + 2))];
+            let output_chunk = &mut output[output_index..(output_index + BLOCKS_PER_FAST_LOOP * 8)];
+
+            // Hand-unrolling for 32 vs 16 or 8 bytes produces yields performance about equivalent
+            // to unsafe pointer code on a Xeon E5-1650v3. 64 byte unrolling was slightly better for
+            // large inputs but significantly worse for 50-byte input, unsurprisingly. I suspect
+            // that it's a not uncommon use case to encode smallish chunks of data (e.g. a 64-byte
+            // SHA-512 digest), so it would be nice if that fit in the unrolled loop at least once.
+            // Plus, single-digit percentage performance differences might well be quite different
+            // on different hardware.
+
+            let input_u64 = read_u64(&input_chunk[0..]);
+
+            output_chunk[0] = encode_table[((input_u64 >> 58) & LOW_SIX_BITS) as usize];
+            output_chunk[1] = encode_table[((input_u64 >> 52) & LOW_SIX_BITS) as usize];
+            output_chunk[2] = encode_table[((input_u64 >> 46) & LOW_SIX_BITS) as usize];
+            output_chunk[3] = encode_table[((input_u64 >> 40) & LOW_SIX_BITS) as usize];
+            output_chunk[4] = encode_table[((input_u64 >> 34) & LOW_SIX_BITS) as usize];
+            output_chunk[5] = encode_table[((input_u64 >> 28) & LOW_SIX_BITS) as usize];
+            output_chunk[6] = encode_table[((input_u64 >> 22) & LOW_SIX_BITS) as usize];
+            output_chunk[7] = encode_table[((input_u64 >> 16) & LOW_SIX_BITS) as usize];
+
+            let input_u64 = read_u64(&input_chunk[6..]);
+
+            output_chunk[8] = encode_table[((input_u64 >> 58) & LOW_SIX_BITS) as usize];
+            output_chunk[9] = encode_table[((input_u64 >> 52) & LOW_SIX_BITS) as usize];
+            output_chunk[10] = encode_table[((input_u64 >> 46) & LOW_SIX_BITS) as usize];
+            output_chunk[11] = encode_table[((input_u64 >> 40) & LOW_SIX_BITS) as usize];
+            output_chunk[12] = encode_table[((input_u64 >> 34) & LOW_SIX_BITS) as usize];
+            output_chunk[13] = encode_table[((input_u64 >> 28) & LOW_SIX_BITS) as usize];
+            output_chunk[14] = encode_table[((input_u64 >> 22) & LOW_SIX_BITS) as usize];
+            output_chunk[15] = encode_table[((input_u64 >> 16) & LOW_SIX_BITS) as usize];
+
+            let input_u64 = read_u64(&input_chunk[12..]);
+
+            output_chunk[16] = encode_table[((input_u64 >> 58) & LOW_SIX_BITS) as usize];
+            output_chunk[17] = encode_table[((input_u64 >> 52) & LOW_SIX_BITS) as usize];
+            output_chunk[18] = encode_table[((input_u64 >> 46) & LOW_SIX_BITS) as usize];
+            output_chunk[19] = encode_table[((input_u64 >> 40) & LOW_SIX_BITS) as usize];
+            output_chunk[20] = encode_table[((input_u64 >> 34) & LOW_SIX_BITS) as usize];
+            output_chunk[21] = encode_table[((input_u64 >> 28) & LOW_SIX_BITS) as usize];
+            output_chunk[22] = encode_table[((input_u64 >> 22) & LOW_SIX_BITS) as usize];
+            output_chunk[23] = encode_table[((input_u64 >> 16) & LOW_SIX_BITS) as usize];
+
+            let input_u64 = read_u64(&input_chunk[18..]);
+
+            output_chunk[24] = encode_table[((input_u64 >> 58) & LOW_SIX_BITS) as usize];
+            output_chunk[25] = encode_table[((input_u64 >> 52) & LOW_SIX_BITS) as usize];
+            output_chunk[26] = encode_table[((input_u64 >> 46) & LOW_SIX_BITS) as usize];
+            output_chunk[27] = encode_table[((input_u64 >> 40) & LOW_SIX_BITS) as usize];
+            output_chunk[28] = encode_table[((input_u64 >> 34) & LOW_SIX_BITS) as usize];
+            output_chunk[29] = encode_table[((input_u64 >> 28) & LOW_SIX_BITS) as usize];
+            output_chunk[30] = encode_table[((input_u64 >> 22) & LOW_SIX_BITS) as usize];
+            output_chunk[31] = encode_table[((input_u64 >> 16) & LOW_SIX_BITS) as usize];
+
+            output_index += BLOCKS_PER_FAST_LOOP * 8;
+            input_index += BLOCKS_PER_FAST_LOOP * 6;
+        }
+    }
+
+    // Encode what's left after the fast loop.
+
+    const LOW_SIX_BITS_U8: u8 = 0x3F;
+
+    let rem = input.len() % 3;
+    let start_of_rem = input.len() - rem;
+
+    // start at the first index not handled by fast loop, which may be 0.
+
+    while input_index < start_of_rem {
+        let input_chunk = &input[input_index..(input_index + 3)];
+        let output_chunk = &mut output[output_index..(output_index + 4)];
+
+        output_chunk[0] = encode_table[(input_chunk[0] >> 2) as usize];
+        output_chunk[1] =
+            encode_table[((input_chunk[0] << 4 | input_chunk[1] >> 4) & LOW_SIX_BITS_U8) as usize];
+        output_chunk[2] =
+            encode_table[((input_chunk[1] << 2 | input_chunk[2] >> 6) & LOW_SIX_BITS_U8) as usize];
+        output_chunk[3] = encode_table[(input_chunk[2] & LOW_SIX_BITS_U8) as usize];
+
+        input_index += 3;
+        output_index += 4;
+    }
+
+    if rem == 2 {
+        output[output_index] = encode_table[(input[start_of_rem] >> 2) as usize];
+        output[output_index + 1] = encode_table[((input[start_of_rem] << 4
+            | input[start_of_rem + 1] >> 4)
+            & LOW_SIX_BITS_U8) as usize];
+        output[output_index + 2] =
+            encode_table[((input[start_of_rem + 1] << 2) & LOW_SIX_BITS_U8) as usize];
+        output_index += 3;
+    } else if rem == 1 {
+        output[output_index] = encode_table[(input[start_of_rem] >> 2) as usize];
+        output[output_index + 1] =
+            encode_table[((input[start_of_rem] << 4) & LOW_SIX_BITS_U8) as usize];
+        output_index += 2;
+    }
+
+    output_index
+}
+
+/// calculate the base64 encoded string size, including padding if appropriate
+pub fn encoded_size(bytes_len: usize, config: Config) -> Option<usize> {
+    let rem = bytes_len % 3;
+
+    let complete_input_chunks = bytes_len / 3;
+    let complete_chunk_output = complete_input_chunks.checked_mul(4);
+
+    if rem > 0 {
+        if config.pad {
+            complete_chunk_output.and_then(|c| c.checked_add(4))
+        } else {
+            let encoded_rem = match rem {
+                1 => 2,
+                2 => 3,
+                _ => unreachable!("Impossible remainder"),
+            };
+            complete_chunk_output.and_then(|c| c.checked_add(encoded_rem))
+        }
+    } else {
+        complete_chunk_output
+    }
+}
+
+/// Write padding characters.
+/// `output` is the slice where padding should be written, of length at least 2.
+///
+/// Returns the number of padding bytes written.
+pub fn add_padding(input_len: usize, output: &mut [u8]) -> usize {
+    let rem = input_len % 3;
+    let mut bytes_written = 0;
+    for _ in 0..((3 - rem) % 3) {
+        output[bytes_written] = PAD_BYTE;
+        bytes_written += 1;
+    }
+
+    bytes_written
+}
+
+#[cfg(test)]
+mod tests {
+    use super::*;
+    use crate::{
+        decode::decode_config_buf,
+        tests::{assert_encode_sanity, random_config},
+        Config, STANDARD, URL_SAFE_NO_PAD,
+    };
+
+    use rand::{
+        distributions::{Distribution, Uniform},
+        FromEntropy, Rng,
+    };
+    use std;
+    use std::str;
+
+    #[test]
+    fn encoded_size_correct_standard() {
+        assert_encoded_length(0, 0, STANDARD);
+
+        assert_encoded_length(1, 4, STANDARD);
+        assert_encoded_length(2, 4, STANDARD);
+        assert_encoded_length(3, 4, STANDARD);
+
+        assert_encoded_length(4, 8, STANDARD);
+        assert_encoded_length(5, 8, STANDARD);
+        assert_encoded_length(6, 8, STANDARD);
+
+        assert_encoded_length(7, 12, STANDARD);
+        assert_encoded_length(8, 12, STANDARD);
+        assert_encoded_length(9, 12, STANDARD);
+
+        assert_encoded_length(54, 72, STANDARD);
+
+        assert_encoded_length(55, 76, STANDARD);
+        assert_encoded_length(56, 76, STANDARD);
+        assert_encoded_length(57, 76, STANDARD);
+
+        assert_encoded_length(58, 80, STANDARD);
+    }
+
+    #[test]
+    fn encoded_size_correct_no_pad() {
+        assert_encoded_length(0, 0, URL_SAFE_NO_PAD);
+
+        assert_encoded_length(1, 2, URL_SAFE_NO_PAD);
+        assert_encoded_length(2, 3, URL_SAFE_NO_PAD);
+        assert_encoded_length(3, 4, URL_SAFE_NO_PAD);
+
+        assert_encoded_length(4, 6, URL_SAFE_NO_PAD);
+        assert_encoded_length(5, 7, URL_SAFE_NO_PAD);
+        assert_encoded_length(6, 8, URL_SAFE_NO_PAD);
+
+        assert_encoded_length(7, 10, URL_SAFE_NO_PAD);
+        assert_encoded_length(8, 11, URL_SAFE_NO_PAD);
+        assert_encoded_length(9, 12, URL_SAFE_NO_PAD);
+
+        assert_encoded_length(54, 72, URL_SAFE_NO_PAD);
+
+        assert_encoded_length(55, 74, URL_SAFE_NO_PAD);
+        assert_encoded_length(56, 75, URL_SAFE_NO_PAD);
+        assert_encoded_length(57, 76, URL_SAFE_NO_PAD);
+
+        assert_encoded_length(58, 78, URL_SAFE_NO_PAD);
+    }
+
+    #[test]
+    fn encoded_size_overflow() {
+        assert_eq!(None, encoded_size(std::usize::MAX, STANDARD));
+    }
+
+    #[test]
+    fn encode_config_buf_into_nonempty_buffer_doesnt_clobber_prefix() {
+        let mut orig_data = Vec::new();
+        let mut prefix = String::new();
+        let mut encoded_data_no_prefix = String::new();
+        let mut encoded_data_with_prefix = String::new();
+        let mut decoded = Vec::new();
+
+        let prefix_len_range = Uniform::new(0, 1000);
+        let input_len_range = Uniform::new(0, 1000);
+
+        let mut rng = rand::rngs::SmallRng::from_entropy();
+
+        for _ in 0..10_000 {
+            orig_data.clear();
+            prefix.clear();
+            encoded_data_no_prefix.clear();
+            encoded_data_with_prefix.clear();
+            decoded.clear();
+
+            let input_len = input_len_range.sample(&mut rng);
+
+            for _ in 0..input_len {
+                orig_data.push(rng.gen());
+            }
+
+            let prefix_len = prefix_len_range.sample(&mut rng);
+            for _ in 0..prefix_len {
+                // getting convenient random single-byte printable chars that aren't base64 is
+                // annoying
+                prefix.push('#');
+            }
+            encoded_data_with_prefix.push_str(&prefix);
+
+            let config = random_config(&mut rng);
+            encode_config_buf(&orig_data, config, &mut encoded_data_no_prefix);
+            encode_config_buf(&orig_data, config, &mut encoded_data_with_prefix);
+
+            assert_eq!(
+                encoded_data_no_prefix.len() + prefix_len,
+                encoded_data_with_prefix.len()
+            );
+            assert_encode_sanity(&encoded_data_no_prefix, config, input_len);
+            assert_encode_sanity(&encoded_data_with_prefix[prefix_len..], config, input_len);
+
+            // append plain encode onto prefix
+            prefix.push_str(&mut encoded_data_no_prefix);
+
+            assert_eq!(prefix, encoded_data_with_prefix);
+
+            decode_config_buf(&encoded_data_no_prefix, config, &mut decoded).unwrap();
+            assert_eq!(orig_data, decoded);
+        }
+    }
+
+    #[test]
+    fn encode_config_slice_into_nonempty_buffer_doesnt_clobber_suffix() {
+        let mut orig_data = Vec::new();
+        let mut encoded_data = Vec::new();
+        let mut encoded_data_original_state = Vec::new();
+        let mut decoded = Vec::new();
+
+        let input_len_range = Uniform::new(0, 1000);
+
+        let mut rng = rand::rngs::SmallRng::from_entropy();
+
+        for _ in 0..10_000 {
+            orig_data.clear();
+            encoded_data.clear();
+            encoded_data_original_state.clear();
+            decoded.clear();
+
+            let input_len = input_len_range.sample(&mut rng);
+
+            for _ in 0..input_len {
+                orig_data.push(rng.gen());
+            }
+
+            // plenty of existing garbage in the encoded buffer
+            for _ in 0..10 * input_len {
+                encoded_data.push(rng.gen());
+            }
+
+            encoded_data_original_state.extend_from_slice(&encoded_data);
+
+            let config = random_config(&mut rng);
+
+            let encoded_size = encoded_size(input_len, config).unwrap();
+
+            assert_eq!(
+                encoded_size,
+                encode_config_slice(&orig_data, config, &mut encoded_data)
+            );
+
+            assert_encode_sanity(
+                std::str::from_utf8(&encoded_data[0..encoded_size]).unwrap(),
+                config,
+                input_len,
+            );
+
+            assert_eq!(
+                &encoded_data[encoded_size..],
+                &encoded_data_original_state[encoded_size..]
+            );
+
+            decode_config_buf(&encoded_data[0..encoded_size], config, &mut decoded).unwrap();
+            assert_eq!(orig_data, decoded);
+        }
+    }
+
+    #[test]
+    fn encode_config_slice_fits_into_precisely_sized_slice() {
+        let mut orig_data = Vec::new();
+        let mut encoded_data = Vec::new();
+        let mut decoded = Vec::new();
+
+        let input_len_range = Uniform::new(0, 1000);
+
+        let mut rng = rand::rngs::SmallRng::from_entropy();
+
+        for _ in 0..10_000 {
+            orig_data.clear();
+            encoded_data.clear();
+            decoded.clear();
+
+            let input_len = input_len_range.sample(&mut rng);
+
+            for _ in 0..input_len {
+                orig_data.push(rng.gen());
+            }
+
+            let config = random_config(&mut rng);
+
+            let encoded_size = encoded_size(input_len, config).unwrap();
+
+            encoded_data.resize(encoded_size, 0);
+
+            assert_eq!(
+                encoded_size,
+                encode_config_slice(&orig_data, config, &mut encoded_data)
+            );
+
+            assert_encode_sanity(
+                std::str::from_utf8(&encoded_data[0..encoded_size]).unwrap(),
+                config,
+                input_len,
+            );
+
+            decode_config_buf(&encoded_data[0..encoded_size], config, &mut decoded).unwrap();
+            assert_eq!(orig_data, decoded);
+        }
+    }
+
+    #[test]
+    fn encode_to_slice_random_valid_utf8() {
+        let mut input = Vec::new();
+        let mut output = Vec::new();
+
+        let input_len_range = Uniform::new(0, 1000);
+
+        let mut rng = rand::rngs::SmallRng::from_entropy();
+
+        for _ in 0..10_000 {
+            input.clear();
+            output.clear();
+
+            let input_len = input_len_range.sample(&mut rng);
+
+            for _ in 0..input_len {
+                input.push(rng.gen());
+            }
+
+            let config = random_config(&mut rng);
+
+            // fill up the output buffer with garbage
+            let encoded_size = encoded_size(input_len, config).unwrap();
+            for _ in 0..encoded_size {
+                output.push(rng.gen());
+            }
+
+            let orig_output_buf = output.to_vec();
+
+            let bytes_written =
+                encode_to_slice(&input, &mut output, config.char_set.encode_table());
+
+            // make sure the part beyond bytes_written is the same garbage it was before
+            assert_eq!(orig_output_buf[bytes_written..], output[bytes_written..]);
+
+            // make sure the encoded bytes are UTF-8
+            let _ = str::from_utf8(&output[0..bytes_written]).unwrap();
+        }
+    }
+
+    #[test]
+    fn encode_with_padding_random_valid_utf8() {
+        let mut input = Vec::new();
+        let mut output = Vec::new();
+
+        let input_len_range = Uniform::new(0, 1000);
+
+        let mut rng = rand::rngs::SmallRng::from_entropy();
+
+        for _ in 0..10_000 {
+            input.clear();
+            output.clear();
+
+            let input_len = input_len_range.sample(&mut rng);
+
+            for _ in 0..input_len {
+                input.push(rng.gen());
+            }
+
+            let config = random_config(&mut rng);
+
+            // fill up the output buffer with garbage
+            let encoded_size = encoded_size(input_len, config).unwrap();
+            for _ in 0..encoded_size + 1000 {
+                output.push(rng.gen());
+            }
+
+            let orig_output_buf = output.to_vec();
+
+            encode_with_padding(&input, config, encoded_size, &mut output[0..encoded_size]);
+
+            // make sure the part beyond b64 is the same garbage it was before
+            assert_eq!(orig_output_buf[encoded_size..], output[encoded_size..]);
+
+            // make sure the encoded bytes are UTF-8
+            let _ = str::from_utf8(&output[0..encoded_size]).unwrap();
+        }
+    }
+
+    #[test]
+    fn add_padding_random_valid_utf8() {
+        let mut output = Vec::new();
+
+        let mut rng = rand::rngs::SmallRng::from_entropy();
+
+        // cover our bases for length % 3
+        for input_len in 0..10 {
+            output.clear();
+
+            // fill output with random
+            for _ in 0..10 {
+                output.push(rng.gen());
+            }
+
+            let orig_output_buf = output.to_vec();
+
+            let bytes_written = add_padding(input_len, &mut output);
+
+            // make sure the part beyond bytes_written is the same garbage it was before
+            assert_eq!(orig_output_buf[bytes_written..], output[bytes_written..]);
+
+            // make sure the encoded bytes are UTF-8
+            let _ = str::from_utf8(&output[0..bytes_written]).unwrap();
+        }
+    }
+
+    fn assert_encoded_length(input_len: usize, encoded_len: usize, config: Config) {
+        assert_eq!(encoded_len, encoded_size(input_len, config).unwrap());
+
+        let mut bytes: Vec<u8> = Vec::new();
+        let mut rng = rand::rngs::SmallRng::from_entropy();
+
+        for _ in 0..input_len {
+            bytes.push(rng.gen());
+        }
+
+        let encoded = encode_config(&bytes, config);
+        assert_encode_sanity(&encoded, config, input_len);
+
+        assert_eq!(encoded_len, encoded.len());
+    }
+
+    #[test]
+    fn encode_imap() {
+        assert_eq!(
+            encode_config(b"\xFB\xFF", crate::IMAP_MUTF7),
+            encode_config(b"\xFB\xFF", crate::STANDARD_NO_PAD).replace("/", ",")
+        );
+    }
+}

+ 245 - 0
zeroidc/vendor/base64-0.13.0/src/lib.rs

@@ -0,0 +1,245 @@
+//! # Configs
+//!
+//! There isn't just one type of Base64; that would be too simple. You need to choose a character
+//! set (standard, URL-safe, etc) and padding suffix (yes/no).
+//! The `Config` struct encapsulates this info. There are some common configs included: `STANDARD`,
+//! `URL_SAFE`, etc. You can also make your own `Config` if needed.
+//!
+//! The functions that don't have `config` in the name (e.g. `encode()` and `decode()`) use the
+//! `STANDARD` config .
+//!
+//! The functions that write to a slice (the ones that end in `_slice`) are generally the fastest
+//! because they don't need to resize anything. If it fits in your workflow and you care about
+//! performance, keep using the same buffer (growing as need be) and use the `_slice` methods for
+//! the best performance.
+//!
+//! # Encoding
+//!
+//! Several different encoding functions are available to you depending on your desire for
+//! convenience vs performance.
+//!
+//! | Function                | Output                       | Allocates                      |
+//! | ----------------------- | ---------------------------- | ------------------------------ |
+//! | `encode`                | Returns a new `String`       | Always                         |
+//! | `encode_config`         | Returns a new `String`       | Always                         |
+//! | `encode_config_buf`     | Appends to provided `String` | Only if `String` needs to grow |
+//! | `encode_config_slice`   | Writes to provided `&[u8]`   | Never                          |
+//!
+//! All of the encoding functions that take a `Config` will pad as per the config.
+//!
+//! # Decoding
+//!
+//! Just as for encoding, there are different decoding functions available.
+//!
+//! | Function                | Output                        | Allocates                      |
+//! | ----------------------- | ----------------------------- | ------------------------------ |
+//! | `decode`                | Returns a new `Vec<u8>`       | Always                         |
+//! | `decode_config`         | Returns a new `Vec<u8>`       | Always                         |
+//! | `decode_config_buf`     | Appends to provided `Vec<u8>` | Only if `Vec` needs to grow    |
+//! | `decode_config_slice`   | Writes to provided `&[u8]`    | Never                          |
+//!
+//! Unlike encoding, where all possible input is valid, decoding can fail (see `DecodeError`).
+//!
+//! Input can be invalid because it has invalid characters or invalid padding. (No padding at all is
+//! valid, but excess padding is not.) Whitespace in the input is invalid.
+//!
+//! # `Read` and `Write`
+//!
+//! To map a `Read` of b64 bytes to the decoded bytes, wrap a reader (file, network socket, etc)
+//! with `base64::read::DecoderReader`. To write raw bytes and have them b64 encoded on the fly,
+//! wrap a writer with `base64::write::EncoderWriter`. There is some performance overhead (15% or
+//! so) because of the necessary buffer shuffling -- still fast enough that almost nobody cares.
+//! Also, these implementations do not heap allocate.
+//!
+//! # Panics
+//!
+//! If length calculations result in overflowing `usize`, a panic will result.
+//!
+//! The `_slice` flavors of encode or decode will panic if the provided output slice is too small,
+
+#![cfg_attr(feature = "cargo-clippy", allow(clippy::cast_lossless))]
+#![deny(
+    missing_docs,
+    trivial_casts,
+    trivial_numeric_casts,
+    unused_extern_crates,
+    unused_import_braces,
+    unused_results,
+    variant_size_differences,
+    warnings
+)]
+#![forbid(unsafe_code)]
+#![cfg_attr(not(any(feature = "std", test)), no_std)]
+
+#[cfg(all(feature = "alloc", not(any(feature = "std", test))))]
+extern crate alloc;
+#[cfg(any(feature = "std", test))]
+extern crate std as alloc;
+
+mod chunked_encoder;
+pub mod display;
+#[cfg(any(feature = "std", test))]
+pub mod read;
+mod tables;
+#[cfg(any(feature = "std", test))]
+pub mod write;
+
+mod encode;
+pub use crate::encode::encode_config_slice;
+#[cfg(any(feature = "alloc", feature = "std", test))]
+pub use crate::encode::{encode, encode_config, encode_config_buf};
+
+mod decode;
+#[cfg(any(feature = "alloc", feature = "std", test))]
+pub use crate::decode::{decode, decode_config, decode_config_buf};
+pub use crate::decode::{decode_config_slice, DecodeError};
+
+#[cfg(test)]
+mod tests;
+
+/// Available encoding character sets
+#[derive(Clone, Copy, Debug)]
+pub enum CharacterSet {
+    /// The standard character set (uses `+` and `/`).
+    ///
+    /// See [RFC 3548](https://tools.ietf.org/html/rfc3548#section-3).
+    Standard,
+    /// The URL safe character set (uses `-` and `_`).
+    ///
+    /// See [RFC 3548](https://tools.ietf.org/html/rfc3548#section-4).
+    UrlSafe,
+    /// The `crypt(3)` character set (uses `./0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz`).
+    ///
+    /// Not standardized, but folk wisdom on the net asserts that this alphabet is what crypt uses.
+    Crypt,
+    /// The bcrypt character set (uses `./ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789`).
+    Bcrypt,
+    /// The character set used in IMAP-modified UTF-7 (uses `+` and `,`).
+    ///
+    /// See [RFC 3501](https://tools.ietf.org/html/rfc3501#section-5.1.3)
+    ImapMutf7,
+    /// The character set used in BinHex 4.0 files.
+    ///
+    /// See [BinHex 4.0 Definition](http://files.stairways.com/other/binhex-40-specs-info.txt)
+    BinHex,
+}
+
+impl CharacterSet {
+    fn encode_table(self) -> &'static [u8; 64] {
+        match self {
+            CharacterSet::Standard => tables::STANDARD_ENCODE,
+            CharacterSet::UrlSafe => tables::URL_SAFE_ENCODE,
+            CharacterSet::Crypt => tables::CRYPT_ENCODE,
+            CharacterSet::Bcrypt => tables::BCRYPT_ENCODE,
+            CharacterSet::ImapMutf7 => tables::IMAP_MUTF7_ENCODE,
+            CharacterSet::BinHex => tables::BINHEX_ENCODE,
+        }
+    }
+
+    fn decode_table(self) -> &'static [u8; 256] {
+        match self {
+            CharacterSet::Standard => tables::STANDARD_DECODE,
+            CharacterSet::UrlSafe => tables::URL_SAFE_DECODE,
+            CharacterSet::Crypt => tables::CRYPT_DECODE,
+            CharacterSet::Bcrypt => tables::BCRYPT_DECODE,
+            CharacterSet::ImapMutf7 => tables::IMAP_MUTF7_DECODE,
+            CharacterSet::BinHex => tables::BINHEX_DECODE,
+        }
+    }
+}
+
+/// Contains configuration parameters for base64 encoding
+#[derive(Clone, Copy, Debug)]
+pub struct Config {
+    /// Character set to use
+    char_set: CharacterSet,
+    /// True to pad output with `=` characters
+    pad: bool,
+    /// True to ignore excess nonzero bits in the last few symbols, otherwise an error is returned.
+    decode_allow_trailing_bits: bool,
+}
+
+impl Config {
+    /// Create a new `Config`.
+    pub const fn new(char_set: CharacterSet, pad: bool) -> Config {
+        Config {
+            char_set,
+            pad,
+            decode_allow_trailing_bits: false,
+        }
+    }
+
+    /// Sets whether to pad output with `=` characters.
+    pub const fn pad(self, pad: bool) -> Config {
+        Config { pad, ..self }
+    }
+
+    /// Sets whether to emit errors for nonzero trailing bits.
+    ///
+    /// This is useful when implementing
+    /// [forgiving-base64 decode](https://infra.spec.whatwg.org/#forgiving-base64-decode).
+    pub const fn decode_allow_trailing_bits(self, allow: bool) -> Config {
+        Config {
+            decode_allow_trailing_bits: allow,
+            ..self
+        }
+    }
+}
+
+/// Standard character set with padding.
+pub const STANDARD: Config = Config {
+    char_set: CharacterSet::Standard,
+    pad: true,
+    decode_allow_trailing_bits: false,
+};
+
+/// Standard character set without padding.
+pub const STANDARD_NO_PAD: Config = Config {
+    char_set: CharacterSet::Standard,
+    pad: false,
+    decode_allow_trailing_bits: false,
+};
+
+/// URL-safe character set with padding
+pub const URL_SAFE: Config = Config {
+    char_set: CharacterSet::UrlSafe,
+    pad: true,
+    decode_allow_trailing_bits: false,
+};
+
+/// URL-safe character set without padding
+pub const URL_SAFE_NO_PAD: Config = Config {
+    char_set: CharacterSet::UrlSafe,
+    pad: false,
+    decode_allow_trailing_bits: false,
+};
+
+/// As per `crypt(3)` requirements
+pub const CRYPT: Config = Config {
+    char_set: CharacterSet::Crypt,
+    pad: false,
+    decode_allow_trailing_bits: false,
+};
+
+/// Bcrypt character set
+pub const BCRYPT: Config = Config {
+    char_set: CharacterSet::Bcrypt,
+    pad: false,
+    decode_allow_trailing_bits: false,
+};
+
+/// IMAP modified UTF-7 requirements
+pub const IMAP_MUTF7: Config = Config {
+    char_set: CharacterSet::ImapMutf7,
+    pad: false,
+    decode_allow_trailing_bits: false,
+};
+
+/// BinHex character set
+pub const BINHEX: Config = Config {
+    char_set: CharacterSet::BinHex,
+    pad: false,
+    decode_allow_trailing_bits: false,
+};
+
+const PAD_BYTE: u8 = b'=';

+ 282 - 0
zeroidc/vendor/base64-0.13.0/src/read/decoder.rs

@@ -0,0 +1,282 @@
+use crate::{decode_config_slice, Config, DecodeError};
+use std::io::Read;
+use std::{cmp, fmt, io};
+
+// This should be large, but it has to fit on the stack.
+pub(crate) const BUF_SIZE: usize = 1024;
+
+// 4 bytes of base64 data encode 3 bytes of raw data (modulo padding).
+const BASE64_CHUNK_SIZE: usize = 4;
+const DECODED_CHUNK_SIZE: usize = 3;
+
+/// A `Read` implementation that decodes base64 data read from an underlying reader.
+///
+/// # Examples
+///
+/// ```
+/// use std::io::Read;
+/// use std::io::Cursor;
+///
+/// // use a cursor as the simplest possible `Read` -- in real code this is probably a file, etc.
+/// let mut wrapped_reader = Cursor::new(b"YXNkZg==");
+/// let mut decoder = base64::read::DecoderReader::new(
+///     &mut wrapped_reader, base64::STANDARD);
+///
+/// // handle errors as you normally would
+/// let mut result = Vec::new();
+/// decoder.read_to_end(&mut result).unwrap();
+///
+/// assert_eq!(b"asdf", &result[..]);
+///
+/// ```
+pub struct DecoderReader<'a, R: 'a + io::Read> {
+    config: Config,
+    /// Where b64 data is read from
+    r: &'a mut R,
+
+    // Holds b64 data read from the delegate reader.
+    b64_buffer: [u8; BUF_SIZE],
+    // The start of the pending buffered data in b64_buffer.
+    b64_offset: usize,
+    // The amount of buffered b64 data.
+    b64_len: usize,
+    // Since the caller may provide us with a buffer of size 1 or 2 that's too small to copy a
+    // decoded chunk in to, we have to be able to hang on to a few decoded bytes.
+    // Technically we only need to hold 2 bytes but then we'd need a separate temporary buffer to
+    // decode 3 bytes into and then juggle copying one byte into the provided read buf and the rest
+    // into here, which seems like a lot of complexity for 1 extra byte of storage.
+    decoded_buffer: [u8; 3],
+    // index of start of decoded data
+    decoded_offset: usize,
+    // length of decoded data
+    decoded_len: usize,
+    // used to provide accurate offsets in errors
+    total_b64_decoded: usize,
+}
+
+impl<'a, R: io::Read> fmt::Debug for DecoderReader<'a, R> {
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+        f.debug_struct("DecoderReader")
+            .field("config", &self.config)
+            .field("b64_offset", &self.b64_offset)
+            .field("b64_len", &self.b64_len)
+            .field("decoded_buffer", &self.decoded_buffer)
+            .field("decoded_offset", &self.decoded_offset)
+            .field("decoded_len", &self.decoded_len)
+            .field("total_b64_decoded", &self.total_b64_decoded)
+            .finish()
+    }
+}
+
+impl<'a, R: io::Read> DecoderReader<'a, R> {
+    /// Create a new decoder that will read from the provided reader `r`.
+    pub fn new(r: &'a mut R, config: Config) -> Self {
+        DecoderReader {
+            config,
+            r,
+            b64_buffer: [0; BUF_SIZE],
+            b64_offset: 0,
+            b64_len: 0,
+            decoded_buffer: [0; DECODED_CHUNK_SIZE],
+            decoded_offset: 0,
+            decoded_len: 0,
+            total_b64_decoded: 0,
+        }
+    }
+
+    /// Write as much as possible of the decoded buffer into the target buffer.
+    /// Must only be called when there is something to write and space to write into.
+    /// Returns a Result with the number of (decoded) bytes copied.
+    fn flush_decoded_buf(&mut self, buf: &mut [u8]) -> io::Result<usize> {
+        debug_assert!(self.decoded_len > 0);
+        debug_assert!(buf.len() > 0);
+
+        let copy_len = cmp::min(self.decoded_len, buf.len());
+        debug_assert!(copy_len > 0);
+        debug_assert!(copy_len <= self.decoded_len);
+
+        buf[..copy_len].copy_from_slice(
+            &self.decoded_buffer[self.decoded_offset..self.decoded_offset + copy_len],
+        );
+
+        self.decoded_offset += copy_len;
+        self.decoded_len -= copy_len;
+
+        debug_assert!(self.decoded_len < DECODED_CHUNK_SIZE);
+
+        Ok(copy_len)
+    }
+
+    /// Read into the remaining space in the buffer after the current contents.
+    /// Must only be called when there is space to read into in the buffer.
+    /// Returns the number of bytes read.
+    fn read_from_delegate(&mut self) -> io::Result<usize> {
+        debug_assert!(self.b64_offset + self.b64_len < BUF_SIZE);
+
+        let read = self
+            .r
+            .read(&mut self.b64_buffer[self.b64_offset + self.b64_len..])?;
+        self.b64_len += read;
+
+        debug_assert!(self.b64_offset + self.b64_len <= BUF_SIZE);
+
+        return Ok(read);
+    }
+
+    /// Decode the requested number of bytes from the b64 buffer into the provided buffer. It's the
+    /// caller's responsibility to choose the number of b64 bytes to decode correctly.
+    ///
+    /// Returns a Result with the number of decoded bytes written to `buf`.
+    fn decode_to_buf(&mut self, num_bytes: usize, buf: &mut [u8]) -> io::Result<usize> {
+        debug_assert!(self.b64_len >= num_bytes);
+        debug_assert!(self.b64_offset + self.b64_len <= BUF_SIZE);
+        debug_assert!(buf.len() > 0);
+
+        let decoded = decode_config_slice(
+            &self.b64_buffer[self.b64_offset..self.b64_offset + num_bytes],
+            self.config,
+            &mut buf[..],
+        )
+        .map_err(|e| match e {
+            DecodeError::InvalidByte(offset, byte) => {
+                DecodeError::InvalidByte(self.total_b64_decoded + offset, byte)
+            }
+            DecodeError::InvalidLength => DecodeError::InvalidLength,
+            DecodeError::InvalidLastSymbol(offset, byte) => {
+                DecodeError::InvalidLastSymbol(self.total_b64_decoded + offset, byte)
+            }
+        })
+        .map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e))?;
+
+        self.total_b64_decoded += num_bytes;
+        self.b64_offset += num_bytes;
+        self.b64_len -= num_bytes;
+
+        debug_assert!(self.b64_offset + self.b64_len <= BUF_SIZE);
+
+        Ok(decoded)
+    }
+}
+
+impl<'a, R: Read> Read for DecoderReader<'a, R> {
+    /// Decode input from the wrapped reader.
+    ///
+    /// Under non-error circumstances, this returns `Ok` with the value being the number of bytes
+    /// written in `buf`.
+    ///
+    /// Where possible, this function buffers base64 to minimize the number of read() calls to the
+    /// delegate reader.
+    ///
+    /// # Errors
+    ///
+    /// Any errors emitted by the delegate reader are returned. Decoding errors due to invalid
+    /// base64 are also possible, and will have `io::ErrorKind::InvalidData`.
+    fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
+        if buf.len() == 0 {
+            return Ok(0);
+        }
+
+        // offset == BUF_SIZE when we copied it all last time
+        debug_assert!(self.b64_offset <= BUF_SIZE);
+        debug_assert!(self.b64_offset + self.b64_len <= BUF_SIZE);
+        debug_assert!(if self.b64_offset == BUF_SIZE {
+            self.b64_len == 0
+        } else {
+            self.b64_len <= BUF_SIZE
+        });
+
+        debug_assert!(if self.decoded_len == 0 {
+            // can be = when we were able to copy the complete chunk
+            self.decoded_offset <= DECODED_CHUNK_SIZE
+        } else {
+            self.decoded_offset < DECODED_CHUNK_SIZE
+        });
+
+        // We shouldn't ever decode into here when we can't immediately write at least one byte into
+        // the provided buf, so the effective length should only be 3 momentarily between when we
+        // decode and when we copy into the target buffer.
+        debug_assert!(self.decoded_len < DECODED_CHUNK_SIZE);
+        debug_assert!(self.decoded_len + self.decoded_offset <= DECODED_CHUNK_SIZE);
+
+        if self.decoded_len > 0 {
+            // we have a few leftover decoded bytes; flush that rather than pull in more b64
+            self.flush_decoded_buf(buf)
+        } else {
+            let mut at_eof = false;
+            while self.b64_len < BASE64_CHUNK_SIZE {
+                // Work around lack of copy_within, which is only present in 1.37
+                // Copy any bytes we have to the start of the buffer.
+                // We know we have < 1 chunk, so we can use a tiny tmp buffer.
+                let mut memmove_buf = [0_u8; BASE64_CHUNK_SIZE];
+                memmove_buf[..self.b64_len].copy_from_slice(
+                    &self.b64_buffer[self.b64_offset..self.b64_offset + self.b64_len],
+                );
+                self.b64_buffer[0..self.b64_len].copy_from_slice(&memmove_buf[..self.b64_len]);
+                self.b64_offset = 0;
+
+                // then fill in more data
+                let read = self.read_from_delegate()?;
+                if read == 0 {
+                    // we never pass in an empty buf, so 0 => we've hit EOF
+                    at_eof = true;
+                    break;
+                }
+            }
+
+            if self.b64_len == 0 {
+                debug_assert!(at_eof);
+                // we must be at EOF, and we have no data left to decode
+                return Ok(0);
+            };
+
+            debug_assert!(if at_eof {
+                // if we are at eof, we may not have a complete chunk
+                self.b64_len > 0
+            } else {
+                // otherwise, we must have at least one chunk
+                self.b64_len >= BASE64_CHUNK_SIZE
+            });
+
+            debug_assert_eq!(0, self.decoded_len);
+
+            if buf.len() < DECODED_CHUNK_SIZE {
+                // caller requested an annoyingly short read
+                // have to write to a tmp buf first to avoid double mutable borrow
+                let mut decoded_chunk = [0_u8; DECODED_CHUNK_SIZE];
+                // if we are at eof, could have less than BASE64_CHUNK_SIZE, in which case we have
+                // to assume that these last few tokens are, in fact, valid (i.e. must be 2-4 b64
+                // tokens, not 1, since 1 token can't decode to 1 byte).
+                let to_decode = cmp::min(self.b64_len, BASE64_CHUNK_SIZE);
+
+                let decoded = self.decode_to_buf(to_decode, &mut decoded_chunk[..])?;
+                self.decoded_buffer[..decoded].copy_from_slice(&decoded_chunk[..decoded]);
+
+                self.decoded_offset = 0;
+                self.decoded_len = decoded;
+
+                // can be less than 3 on last block due to padding
+                debug_assert!(decoded <= 3);
+
+                self.flush_decoded_buf(buf)
+            } else {
+                let b64_bytes_that_can_decode_into_buf = (buf.len() / DECODED_CHUNK_SIZE)
+                    .checked_mul(BASE64_CHUNK_SIZE)
+                    .expect("too many chunks");
+                debug_assert!(b64_bytes_that_can_decode_into_buf >= BASE64_CHUNK_SIZE);
+
+                let b64_bytes_available_to_decode = if at_eof {
+                    self.b64_len
+                } else {
+                    // only use complete chunks
+                    self.b64_len - self.b64_len % 4
+                };
+
+                let actual_decode_len = cmp::min(
+                    b64_bytes_that_can_decode_into_buf,
+                    b64_bytes_available_to_decode,
+                );
+                self.decode_to_buf(actual_decode_len, buf)
+            }
+        }
+    }
+}

+ 335 - 0
zeroidc/vendor/base64-0.13.0/src/read/decoder_tests.rs

@@ -0,0 +1,335 @@
+use std::io::{self, Read};
+
+use rand::{Rng, RngCore};
+use std::{cmp, iter};
+
+use super::decoder::{DecoderReader, BUF_SIZE};
+use crate::encode::encode_config_buf;
+use crate::tests::random_config;
+use crate::{decode_config_buf, DecodeError, STANDARD};
+
+#[test]
+fn simple() {
+    let tests: &[(&[u8], &[u8])] = &[
+        (&b"0"[..], &b"MA=="[..]),
+        (b"01", b"MDE="),
+        (b"012", b"MDEy"),
+        (b"0123", b"MDEyMw=="),
+        (b"01234", b"MDEyMzQ="),
+        (b"012345", b"MDEyMzQ1"),
+        (b"0123456", b"MDEyMzQ1Ng=="),
+        (b"01234567", b"MDEyMzQ1Njc="),
+        (b"012345678", b"MDEyMzQ1Njc4"),
+        (b"0123456789", b"MDEyMzQ1Njc4OQ=="),
+    ][..];
+
+    for (text_expected, base64data) in tests.iter() {
+        // Read n bytes at a time.
+        for n in 1..base64data.len() + 1 {
+            let mut wrapped_reader = io::Cursor::new(base64data);
+            let mut decoder = DecoderReader::new(&mut wrapped_reader, STANDARD);
+
+            // handle errors as you normally would
+            let mut text_got = Vec::new();
+            let mut buffer = vec![0u8; n];
+            while let Ok(read) = decoder.read(&mut buffer[..]) {
+                if read == 0 {
+                    break;
+                }
+                text_got.extend_from_slice(&buffer[..read]);
+            }
+
+            assert_eq!(
+                text_got,
+                *text_expected,
+                "\nGot: {}\nExpected: {}",
+                String::from_utf8_lossy(&text_got[..]),
+                String::from_utf8_lossy(text_expected)
+            );
+        }
+    }
+}
+
+// Make sure we error out on trailing junk.
+#[test]
+fn trailing_junk() {
+    let tests: &[&[u8]] = &[&b"MDEyMzQ1Njc4*!@#$%^&"[..], b"MDEyMzQ1Njc4OQ== "][..];
+
+    for base64data in tests.iter() {
+        // Read n bytes at a time.
+        for n in 1..base64data.len() + 1 {
+            let mut wrapped_reader = io::Cursor::new(base64data);
+            let mut decoder = DecoderReader::new(&mut wrapped_reader, STANDARD);
+
+            // handle errors as you normally would
+            let mut buffer = vec![0u8; n];
+            let mut saw_error = false;
+            loop {
+                match decoder.read(&mut buffer[..]) {
+                    Err(_) => {
+                        saw_error = true;
+                        break;
+                    }
+                    Ok(read) if read == 0 => break,
+                    Ok(_) => (),
+                }
+            }
+
+            assert!(saw_error);
+        }
+    }
+}
+
+#[test]
+fn handles_short_read_from_delegate() {
+    let mut rng = rand::thread_rng();
+    let mut bytes = Vec::new();
+    let mut b64 = String::new();
+    let mut decoded = Vec::new();
+
+    for _ in 0..10_000 {
+        bytes.clear();
+        b64.clear();
+        decoded.clear();
+
+        let size = rng.gen_range(0, 10 * BUF_SIZE);
+        bytes.extend(iter::repeat(0).take(size));
+        bytes.truncate(size);
+        rng.fill_bytes(&mut bytes[..size]);
+        assert_eq!(size, bytes.len());
+
+        let config = random_config(&mut rng);
+        encode_config_buf(&bytes[..], config, &mut b64);
+
+        let mut wrapped_reader = io::Cursor::new(b64.as_bytes());
+        let mut short_reader = RandomShortRead {
+            delegate: &mut wrapped_reader,
+            rng: &mut rng,
+        };
+
+        let mut decoder = DecoderReader::new(&mut short_reader, config);
+
+        let decoded_len = decoder.read_to_end(&mut decoded).unwrap();
+        assert_eq!(size, decoded_len);
+        assert_eq!(&bytes[..], &decoded[..]);
+    }
+}
+
+#[test]
+fn read_in_short_increments() {
+    let mut rng = rand::thread_rng();
+    let mut bytes = Vec::new();
+    let mut b64 = String::new();
+    let mut decoded = Vec::new();
+
+    for _ in 0..10_000 {
+        bytes.clear();
+        b64.clear();
+        decoded.clear();
+
+        let size = rng.gen_range(0, 10 * BUF_SIZE);
+        bytes.extend(iter::repeat(0).take(size));
+        // leave room to play around with larger buffers
+        decoded.extend(iter::repeat(0).take(size * 3));
+
+        rng.fill_bytes(&mut bytes[..]);
+        assert_eq!(size, bytes.len());
+
+        let config = random_config(&mut rng);
+
+        encode_config_buf(&bytes[..], config, &mut b64);
+
+        let mut wrapped_reader = io::Cursor::new(&b64[..]);
+        let mut decoder = DecoderReader::new(&mut wrapped_reader, config);
+
+        consume_with_short_reads_and_validate(&mut rng, &bytes[..], &mut decoded, &mut decoder);
+    }
+}
+
+#[test]
+fn read_in_short_increments_with_short_delegate_reads() {
+    let mut rng = rand::thread_rng();
+    let mut bytes = Vec::new();
+    let mut b64 = String::new();
+    let mut decoded = Vec::new();
+
+    for _ in 0..10_000 {
+        bytes.clear();
+        b64.clear();
+        decoded.clear();
+
+        let size = rng.gen_range(0, 10 * BUF_SIZE);
+        bytes.extend(iter::repeat(0).take(size));
+        // leave room to play around with larger buffers
+        decoded.extend(iter::repeat(0).take(size * 3));
+
+        rng.fill_bytes(&mut bytes[..]);
+        assert_eq!(size, bytes.len());
+
+        let config = random_config(&mut rng);
+
+        encode_config_buf(&bytes[..], config, &mut b64);
+
+        let mut base_reader = io::Cursor::new(&b64[..]);
+        let mut decoder = DecoderReader::new(&mut base_reader, config);
+        let mut short_reader = RandomShortRead {
+            delegate: &mut decoder,
+            rng: &mut rand::thread_rng(),
+        };
+
+        consume_with_short_reads_and_validate(&mut rng, &bytes[..], &mut decoded, &mut short_reader)
+    }
+}
+
+#[test]
+fn reports_invalid_last_symbol_correctly() {
+    let mut rng = rand::thread_rng();
+    let mut bytes = Vec::new();
+    let mut b64 = String::new();
+    let mut b64_bytes = Vec::new();
+    let mut decoded = Vec::new();
+    let mut bulk_decoded = Vec::new();
+
+    for _ in 0..1_000 {
+        bytes.clear();
+        b64.clear();
+        b64_bytes.clear();
+
+        let size = rng.gen_range(1, 10 * BUF_SIZE);
+        bytes.extend(iter::repeat(0).take(size));
+        decoded.extend(iter::repeat(0).take(size));
+        rng.fill_bytes(&mut bytes[..]);
+        assert_eq!(size, bytes.len());
+
+        let mut config = random_config(&mut rng);
+        // changing padding will cause invalid padding errors when we twiddle the last byte
+        config.pad = false;
+
+        encode_config_buf(&bytes[..], config, &mut b64);
+        b64_bytes.extend(b64.bytes());
+        assert_eq!(b64_bytes.len(), b64.len());
+
+        // change the last character to every possible symbol. Should behave the same as bulk
+        // decoding whether invalid or valid.
+        for &s1 in config.char_set.encode_table().iter() {
+            decoded.clear();
+            bulk_decoded.clear();
+
+            // replace the last
+            *b64_bytes.last_mut().unwrap() = s1;
+            let bulk_res = decode_config_buf(&b64_bytes[..], config, &mut bulk_decoded);
+
+            let mut wrapped_reader = io::Cursor::new(&b64_bytes[..]);
+            let mut decoder = DecoderReader::new(&mut wrapped_reader, config);
+
+            let stream_res = decoder.read_to_end(&mut decoded).map(|_| ()).map_err(|e| {
+                e.into_inner()
+                    .and_then(|e| e.downcast::<DecodeError>().ok())
+            });
+
+            assert_eq!(bulk_res.map_err(|e| Some(Box::new(e))), stream_res);
+        }
+    }
+}
+
+#[test]
+fn reports_invalid_byte_correctly() {
+    let mut rng = rand::thread_rng();
+    let mut bytes = Vec::new();
+    let mut b64 = String::new();
+    let mut decoded = Vec::new();
+
+    for _ in 0..10_000 {
+        bytes.clear();
+        b64.clear();
+        decoded.clear();
+
+        let size = rng.gen_range(1, 10 * BUF_SIZE);
+        bytes.extend(iter::repeat(0).take(size));
+        rng.fill_bytes(&mut bytes[..size]);
+        assert_eq!(size, bytes.len());
+
+        let config = random_config(&mut rng);
+        encode_config_buf(&bytes[..], config, &mut b64);
+        // replace one byte, somewhere, with '*', which is invalid
+        let bad_byte_pos = rng.gen_range(0, &b64.len());
+        let mut b64_bytes = b64.bytes().collect::<Vec<u8>>();
+        b64_bytes[bad_byte_pos] = b'*';
+
+        let mut wrapped_reader = io::Cursor::new(b64_bytes.clone());
+        let mut decoder = DecoderReader::new(&mut wrapped_reader, config);
+
+        // some gymnastics to avoid double-moving the io::Error, which is not Copy
+        let read_decode_err = decoder
+            .read_to_end(&mut decoded)
+            .map_err(|e| {
+                let kind = e.kind();
+                let inner = e
+                    .into_inner()
+                    .and_then(|e| e.downcast::<DecodeError>().ok());
+                inner.map(|i| (*i, kind))
+            })
+            .err()
+            .and_then(|o| o);
+
+        let mut bulk_buf = Vec::new();
+        let bulk_decode_err = decode_config_buf(&b64_bytes[..], config, &mut bulk_buf).err();
+
+        // it's tricky to predict where the invalid data's offset will be since if it's in the last
+        // chunk it will be reported at the first padding location because it's treated as invalid
+        // padding. So, we just check that it's the same as it is for decoding all at once.
+        assert_eq!(
+            bulk_decode_err.map(|e| (e, io::ErrorKind::InvalidData)),
+            read_decode_err
+        );
+    }
+}
+
+fn consume_with_short_reads_and_validate<R: Read>(
+    rng: &mut rand::rngs::ThreadRng,
+    expected_bytes: &[u8],
+    decoded: &mut Vec<u8>,
+    short_reader: &mut R,
+) -> () {
+    let mut total_read = 0_usize;
+    loop {
+        assert!(
+            total_read <= expected_bytes.len(),
+            "tr {} size {}",
+            total_read,
+            expected_bytes.len()
+        );
+        if total_read == expected_bytes.len() {
+            assert_eq!(expected_bytes, &decoded[..total_read]);
+            // should be done
+            assert_eq!(0, short_reader.read(&mut decoded[..]).unwrap());
+            // didn't write anything
+            assert_eq!(expected_bytes, &decoded[..total_read]);
+
+            break;
+        }
+        let decode_len = rng.gen_range(1, cmp::max(2, expected_bytes.len() * 2));
+
+        let read = short_reader
+            .read(&mut decoded[total_read..total_read + decode_len])
+            .unwrap();
+        total_read += read;
+    }
+}
+
+/// Limits how many bytes a reader will provide in each read call.
+/// Useful for shaking out code that may work fine only with typical input sources that always fill
+/// the buffer.
+struct RandomShortRead<'a, 'b, R: io::Read, N: rand::Rng> {
+    delegate: &'b mut R,
+    rng: &'a mut N,
+}
+
+impl<'a, 'b, R: io::Read, N: rand::Rng> io::Read for RandomShortRead<'a, 'b, R, N> {
+    fn read(&mut self, buf: &mut [u8]) -> Result<usize, io::Error> {
+        // avoid 0 since it means EOF for non-empty buffers
+        let effective_len = cmp::min(self.rng.gen_range(1, 20), buf.len());
+
+        self.delegate.read(&mut buf[..effective_len])
+    }
+}

+ 6 - 0
zeroidc/vendor/base64-0.13.0/src/read/mod.rs

@@ -0,0 +1,6 @@
+//! Implementations of `io::Read` to transparently decode base64.
+mod decoder;
+pub use self::decoder::DecoderReader;
+
+#[cfg(test)]
+mod decoder_tests;

+ 0 - 0
zeroidc/vendor/base64/src/tables.rs → zeroidc/vendor/base64-0.13.0/src/tables.rs


+ 81 - 0
zeroidc/vendor/base64-0.13.0/src/tests.rs

@@ -0,0 +1,81 @@
+use crate::{decode_config, encode::encoded_size, encode_config_buf, CharacterSet, Config};
+
+use std::str;
+
+use rand::{
+    distributions::{Distribution, Uniform},
+    seq::SliceRandom,
+    FromEntropy, Rng,
+};
+
+#[test]
+fn roundtrip_random_config_short() {
+    // exercise the slower encode/decode routines that operate on shorter buffers more vigorously
+    roundtrip_random_config(Uniform::new(0, 50), 10_000);
+}
+
+#[test]
+fn roundtrip_random_config_long() {
+    roundtrip_random_config(Uniform::new(0, 1000), 10_000);
+}
+
+pub fn assert_encode_sanity(encoded: &str, config: Config, input_len: usize) {
+    let input_rem = input_len % 3;
+    let expected_padding_len = if input_rem > 0 {
+        if config.pad {
+            3 - input_rem
+        } else {
+            0
+        }
+    } else {
+        0
+    };
+
+    let expected_encoded_len = encoded_size(input_len, config).unwrap();
+
+    assert_eq!(expected_encoded_len, encoded.len());
+
+    let padding_len = encoded.chars().filter(|&c| c == '=').count();
+
+    assert_eq!(expected_padding_len, padding_len);
+
+    let _ = str::from_utf8(encoded.as_bytes()).expect("Base64 should be valid utf8");
+}
+
+fn roundtrip_random_config(input_len_range: Uniform<usize>, iterations: u32) {
+    let mut input_buf: Vec<u8> = Vec::new();
+    let mut encoded_buf = String::new();
+    let mut rng = rand::rngs::SmallRng::from_entropy();
+
+    for _ in 0..iterations {
+        input_buf.clear();
+        encoded_buf.clear();
+
+        let input_len = input_len_range.sample(&mut rng);
+
+        let config = random_config(&mut rng);
+
+        for _ in 0..input_len {
+            input_buf.push(rng.gen());
+        }
+
+        encode_config_buf(&input_buf, config, &mut encoded_buf);
+
+        assert_encode_sanity(&encoded_buf, config, input_len);
+
+        assert_eq!(input_buf, decode_config(&encoded_buf, config).unwrap());
+    }
+}
+
+pub fn random_config<R: Rng>(rng: &mut R) -> Config {
+    const CHARSETS: &[CharacterSet] = &[
+        CharacterSet::UrlSafe,
+        CharacterSet::Standard,
+        CharacterSet::Crypt,
+        CharacterSet::ImapMutf7,
+        CharacterSet::BinHex,
+    ];
+    let charset = *CHARSETS.choose(rng).unwrap();
+
+    Config::new(charset, rng.gen())
+}

+ 381 - 0
zeroidc/vendor/base64-0.13.0/src/write/encoder.rs

@@ -0,0 +1,381 @@
+use crate::encode::encode_to_slice;
+use crate::{encode_config_slice, Config};
+use std::{
+    cmp, fmt,
+    io::{ErrorKind, Result, Write},
+};
+
+pub(crate) const BUF_SIZE: usize = 1024;
+/// The most bytes whose encoding will fit in `BUF_SIZE`
+const MAX_INPUT_LEN: usize = BUF_SIZE / 4 * 3;
+// 3 bytes of input = 4 bytes of base64, always (because we don't allow line wrapping)
+const MIN_ENCODE_CHUNK_SIZE: usize = 3;
+
+/// A `Write` implementation that base64 encodes data before delegating to the wrapped writer.
+///
+/// Because base64 has special handling for the end of the input data (padding, etc), there's a
+/// `finish()` method on this type that encodes any leftover input bytes and adds padding if
+/// appropriate. It's called automatically when deallocated (see the `Drop` implementation), but
+/// any error that occurs when invoking the underlying writer will be suppressed. If you want to
+/// handle such errors, call `finish()` yourself.
+///
+/// # Examples
+///
+/// ```
+/// use std::io::Write;
+///
+/// // use a vec as the simplest possible `Write` -- in real code this is probably a file, etc.
+/// let mut enc = base64::write::EncoderWriter::new(Vec::new(), base64::STANDARD);
+///
+/// // handle errors as you normally would
+/// enc.write_all(b"asdf").unwrap();
+///
+/// // could leave this out to be called by Drop, if you don't care
+/// // about handling errors or getting the delegate writer back
+/// let delegate = enc.finish().unwrap();
+///
+/// // base64 was written to the writer
+/// assert_eq!(b"YXNkZg==", &delegate[..]);
+///
+/// ```
+///
+/// # Panics
+///
+/// Calling `write()` (or related methods) or `finish()` after `finish()` has completed without
+/// error is invalid and will panic.
+///
+/// # Errors
+///
+/// Base64 encoding itself does not generate errors, but errors from the wrapped writer will be
+/// returned as per the contract of `Write`.
+///
+/// # Performance
+///
+/// It has some minor performance loss compared to encoding slices (a couple percent).
+/// It does not do any heap allocation.
+pub struct EncoderWriter<W: Write> {
+    config: Config,
+    /// Where encoded data is written to. It's an Option as it's None immediately before Drop is
+    /// called so that finish() can return the underlying writer. None implies that finish() has
+    /// been called successfully.
+    delegate: Option<W>,
+    /// Holds a partial chunk, if any, after the last `write()`, so that we may then fill the chunk
+    /// with the next `write()`, encode it, then proceed with the rest of the input normally.
+    extra_input: [u8; MIN_ENCODE_CHUNK_SIZE],
+    /// How much of `extra` is occupied, in `[0, MIN_ENCODE_CHUNK_SIZE]`.
+    extra_input_occupied_len: usize,
+    /// Buffer to encode into. May hold leftover encoded bytes from a previous write call that the underlying writer
+    /// did not write last time.
+    output: [u8; BUF_SIZE],
+    /// How much of `output` is occupied with encoded data that couldn't be written last time
+    output_occupied_len: usize,
+    /// panic safety: don't write again in destructor if writer panicked while we were writing to it
+    panicked: bool,
+}
+
+impl<W: Write> fmt::Debug for EncoderWriter<W> {
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+        write!(
+            f,
+            "extra_input: {:?} extra_input_occupied_len:{:?} output[..5]: {:?} output_occupied_len: {:?}",
+            self.extra_input,
+            self.extra_input_occupied_len,
+            &self.output[0..5],
+            self.output_occupied_len
+        )
+    }
+}
+
+impl<W: Write> EncoderWriter<W> {
+    /// Create a new encoder that will write to the provided delegate writer `w`.
+    pub fn new(w: W, config: Config) -> EncoderWriter<W> {
+        EncoderWriter {
+            config,
+            delegate: Some(w),
+            extra_input: [0u8; MIN_ENCODE_CHUNK_SIZE],
+            extra_input_occupied_len: 0,
+            output: [0u8; BUF_SIZE],
+            output_occupied_len: 0,
+            panicked: false,
+        }
+    }
+
+    /// Encode all remaining buffered data and write it, including any trailing incomplete input
+    /// triples and associated padding.
+    ///
+    /// Once this succeeds, no further writes or calls to this method are allowed.
+    ///
+    /// This may write to the delegate writer multiple times if the delegate writer does not accept
+    /// all input provided to its `write` each invocation.
+    ///
+    /// If you don't care about error handling, it is not necessary to call this function, as the
+    /// equivalent finalization is done by the Drop impl.
+    ///
+    /// Returns the writer that this was constructed around.
+    ///
+    /// # Errors
+    ///
+    /// The first error that is not of `ErrorKind::Interrupted` will be returned.
+    pub fn finish(&mut self) -> Result<W> {
+        // If we could consume self in finish(), we wouldn't have to worry about this case, but
+        // finish() is retryable in the face of I/O errors, so we can't consume here.
+        if self.delegate.is_none() {
+            panic!("Encoder has already had finish() called")
+        };
+
+        self.write_final_leftovers()?;
+
+        let writer = self.delegate.take().expect("Writer must be present");
+
+        Ok(writer)
+    }
+
+    /// Write any remaining buffered data to the delegate writer.
+    fn write_final_leftovers(&mut self) -> Result<()> {
+        if self.delegate.is_none() {
+            // finish() has already successfully called this, and we are now in drop() with a None
+            // writer, so just no-op
+            return Ok(());
+        }
+
+        self.write_all_encoded_output()?;
+
+        if self.extra_input_occupied_len > 0 {
+            let encoded_len = encode_config_slice(
+                &self.extra_input[..self.extra_input_occupied_len],
+                self.config,
+                &mut self.output[..],
+            );
+
+            self.output_occupied_len = encoded_len;
+
+            self.write_all_encoded_output()?;
+
+            // write succeeded, do not write the encoding of extra again if finish() is retried
+            self.extra_input_occupied_len = 0;
+        }
+
+        Ok(())
+    }
+
+    /// Write as much of the encoded output to the delegate writer as it will accept, and store the
+    /// leftovers to be attempted at the next write() call. Updates `self.output_occupied_len`.
+    ///
+    /// # Errors
+    ///
+    /// Errors from the delegate writer are returned. In the case of an error,
+    /// `self.output_occupied_len` will not be updated, as errors from `write` are specified to mean
+    /// that no write took place.
+    fn write_to_delegate(&mut self, current_output_len: usize) -> Result<()> {
+        self.panicked = true;
+        let res = self
+            .delegate
+            .as_mut()
+            .expect("Writer must be present")
+            .write(&self.output[..current_output_len]);
+        self.panicked = false;
+
+        res.map(|consumed| {
+            debug_assert!(consumed <= current_output_len);
+
+            if consumed < current_output_len {
+                self.output_occupied_len = current_output_len.checked_sub(consumed).unwrap();
+                // If we're blocking on I/O, the minor inefficiency of copying bytes to the
+                // start of the buffer is the least of our concerns...
+                // Rotate moves more than we need to, but copy_within isn't stabilized yet.
+                self.output.rotate_left(consumed);
+            } else {
+                self.output_occupied_len = 0;
+            }
+        })
+    }
+
+    /// Write all buffered encoded output. If this returns `Ok`, `self.output_occupied_len` is `0`.
+    ///
+    /// This is basically write_all for the remaining buffered data but without the undesirable
+    /// abort-on-`Ok(0)` behavior.
+    ///
+    /// # Errors
+    ///
+    /// Any error emitted by the delegate writer abort the write loop and is returned, unless it's
+    /// `Interrupted`, in which case the error is ignored and writes will continue.
+    fn write_all_encoded_output(&mut self) -> Result<()> {
+        while self.output_occupied_len > 0 {
+            let remaining_len = self.output_occupied_len;
+            match self.write_to_delegate(remaining_len) {
+                // try again on interrupts ala write_all
+                Err(ref e) if e.kind() == ErrorKind::Interrupted => {}
+                // other errors return
+                Err(e) => return Err(e),
+                // success no-ops because remaining length is already updated
+                Ok(_) => {}
+            };
+        }
+
+        debug_assert_eq!(0, self.output_occupied_len);
+        Ok(())
+    }
+}
+
+impl<W: Write> Write for EncoderWriter<W> {
+    /// Encode input and then write to the delegate writer.
+    ///
+    /// Under non-error circumstances, this returns `Ok` with the value being the number of bytes
+    /// of `input` consumed. The value may be `0`, which interacts poorly with `write_all`, which
+    /// interprets `Ok(0)` as an error, despite it being allowed by the contract of `write`. See
+    /// https://github.com/rust-lang/rust/issues/56889 for more on that.
+    ///
+    /// If the previous call to `write` provided more (encoded) data than the delegate writer could
+    /// accept in a single call to its `write`, the remaining data is buffered. As long as buffered
+    /// data is present, subsequent calls to `write` will try to write the remaining buffered data
+    /// to the delegate and return either `Ok(0)` -- and therefore not consume any of `input` -- or
+    /// an error.
+    ///
+    /// # Errors
+    ///
+    /// Any errors emitted by the delegate writer are returned.
+    fn write(&mut self, input: &[u8]) -> Result<usize> {
+        if self.delegate.is_none() {
+            panic!("Cannot write more after calling finish()");
+        }
+
+        if input.is_empty() {
+            return Ok(0);
+        }
+
+        // The contract of `Write::write` places some constraints on this implementation:
+        // - a call to `write()` represents at most one call to a wrapped `Write`, so we can't
+        // iterate over the input and encode multiple chunks.
+        // - Errors mean that "no bytes were written to this writer", so we need to reset the
+        // internal state to what it was before the error occurred
+
+        // before reading any input, write any leftover encoded output from last time
+        if self.output_occupied_len > 0 {
+            let current_len = self.output_occupied_len;
+            return self
+                .write_to_delegate(current_len)
+                // did not read any input
+                .map(|_| 0);
+        }
+
+        debug_assert_eq!(0, self.output_occupied_len);
+
+        // how many bytes, if any, were read into `extra` to create a triple to encode
+        let mut extra_input_read_len = 0;
+        let mut input = input;
+
+        let orig_extra_len = self.extra_input_occupied_len;
+
+        let mut encoded_size = 0;
+        // always a multiple of MIN_ENCODE_CHUNK_SIZE
+        let mut max_input_len = MAX_INPUT_LEN;
+
+        // process leftover un-encoded input from last write
+        if self.extra_input_occupied_len > 0 {
+            debug_assert!(self.extra_input_occupied_len < 3);
+            if input.len() + self.extra_input_occupied_len >= MIN_ENCODE_CHUNK_SIZE {
+                // Fill up `extra`, encode that into `output`, and consume as much of the rest of
+                // `input` as possible.
+                // We could write just the encoding of `extra` by itself but then we'd have to
+                // return after writing only 4 bytes, which is inefficient if the underlying writer
+                // would make a syscall.
+                extra_input_read_len = MIN_ENCODE_CHUNK_SIZE - self.extra_input_occupied_len;
+                debug_assert!(extra_input_read_len > 0);
+                // overwrite only bytes that weren't already used. If we need to rollback extra_len
+                // (when the subsequent write errors), the old leading bytes will still be there.
+                self.extra_input[self.extra_input_occupied_len..MIN_ENCODE_CHUNK_SIZE]
+                    .copy_from_slice(&input[0..extra_input_read_len]);
+
+                let len = encode_to_slice(
+                    &self.extra_input[0..MIN_ENCODE_CHUNK_SIZE],
+                    &mut self.output[..],
+                    self.config.char_set.encode_table(),
+                );
+                debug_assert_eq!(4, len);
+
+                input = &input[extra_input_read_len..];
+
+                // consider extra to be used up, since we encoded it
+                self.extra_input_occupied_len = 0;
+                // don't clobber where we just encoded to
+                encoded_size = 4;
+                // and don't read more than can be encoded
+                max_input_len = MAX_INPUT_LEN - MIN_ENCODE_CHUNK_SIZE;
+
+            // fall through to normal encoding
+            } else {
+                // `extra` and `input` are non empty, but `|extra| + |input| < 3`, so there must be
+                // 1 byte in each.
+                debug_assert_eq!(1, input.len());
+                debug_assert_eq!(1, self.extra_input_occupied_len);
+
+                self.extra_input[self.extra_input_occupied_len] = input[0];
+                self.extra_input_occupied_len += 1;
+                return Ok(1);
+            };
+        } else if input.len() < MIN_ENCODE_CHUNK_SIZE {
+            // `extra` is empty, and `input` fits inside it
+            self.extra_input[0..input.len()].copy_from_slice(input);
+            self.extra_input_occupied_len = input.len();
+            return Ok(input.len());
+        };
+
+        // either 0 or 1 complete chunks encoded from extra
+        debug_assert!(encoded_size == 0 || encoded_size == 4);
+        debug_assert!(
+            // didn't encode extra input
+            MAX_INPUT_LEN == max_input_len
+                // encoded one triple
+                || MAX_INPUT_LEN == max_input_len + MIN_ENCODE_CHUNK_SIZE
+        );
+
+        // encode complete triples only
+        let input_complete_chunks_len = input.len() - (input.len() % MIN_ENCODE_CHUNK_SIZE);
+        let input_chunks_to_encode_len = cmp::min(input_complete_chunks_len, max_input_len);
+        debug_assert_eq!(0, max_input_len % MIN_ENCODE_CHUNK_SIZE);
+        debug_assert_eq!(0, input_chunks_to_encode_len % MIN_ENCODE_CHUNK_SIZE);
+
+        encoded_size += encode_to_slice(
+            &input[..(input_chunks_to_encode_len)],
+            &mut self.output[encoded_size..],
+            self.config.char_set.encode_table(),
+        );
+
+        // not updating `self.output_occupied_len` here because if the below write fails, it should
+        // "never take place" -- the buffer contents we encoded are ignored and perhaps retried
+        // later, if the consumer chooses.
+
+        self.write_to_delegate(encoded_size)
+            // no matter whether we wrote the full encoded buffer or not, we consumed the same
+            // input
+            .map(|_| extra_input_read_len + input_chunks_to_encode_len)
+            .map_err(|e| {
+                // in case we filled and encoded `extra`, reset extra_len
+                self.extra_input_occupied_len = orig_extra_len;
+
+                e
+            })
+    }
+
+    /// Because this is usually treated as OK to call multiple times, it will *not* flush any
+    /// incomplete chunks of input or write padding.
+    /// # Errors
+    ///
+    /// The first error that is not of [`ErrorKind::Interrupted`] will be returned.
+    fn flush(&mut self) -> Result<()> {
+        self.write_all_encoded_output()?;
+        self.delegate
+            .as_mut()
+            .expect("Writer must be present")
+            .flush()
+    }
+}
+
+impl<W: Write> Drop for EncoderWriter<W> {
+    fn drop(&mut self) {
+        if !self.panicked {
+            // like `BufWriter`, ignore errors during drop
+            let _ = self.write_final_leftovers();
+        }
+    }
+}

+ 176 - 0
zeroidc/vendor/base64-0.13.0/src/write/encoder_string_writer.rs

@@ -0,0 +1,176 @@
+use super::encoder::EncoderWriter;
+use crate::Config;
+use std::io;
+use std::io::Write;
+
+/// A `Write` implementation that base64-encodes data using the provided config and accumulates the
+/// resulting base64 in memory, which is then exposed as a String via `into_inner()`.
+///
+/// # Examples
+///
+/// Buffer base64 in a new String:
+///
+/// ```
+/// use std::io::Write;
+///
+/// let mut enc = base64::write::EncoderStringWriter::new(base64::STANDARD);
+///
+/// enc.write_all(b"asdf").unwrap();
+///
+/// // get the resulting String
+/// let b64_string = enc.into_inner();
+///
+/// assert_eq!("YXNkZg==", &b64_string);
+/// ```
+///
+/// Or, append to an existing String:
+///
+/// ```
+/// use std::io::Write;
+///
+/// let mut buf = String::from("base64: ");
+///
+/// let mut enc = base64::write::EncoderStringWriter::from(&mut buf, base64::STANDARD);
+///
+/// enc.write_all(b"asdf").unwrap();
+///
+/// // release the &mut reference on buf
+/// let _ = enc.into_inner();
+///
+/// assert_eq!("base64: YXNkZg==", &buf);
+/// ```
+///
+/// # Panics
+///
+/// Calling `write()` (or related methods) or `finish()` after `finish()` has completed without
+/// error is invalid and will panic.
+///
+/// # Performance
+///
+/// Because it has to validate that the base64 is UTF-8, it is about 80% as fast as writing plain
+/// bytes to a `io::Write`.
+pub struct EncoderStringWriter<S: StrConsumer> {
+    encoder: EncoderWriter<Utf8SingleCodeUnitWriter<S>>,
+}
+
+impl<S: StrConsumer> EncoderStringWriter<S> {
+    /// Create a EncoderStringWriter that will append to the provided `StrConsumer`.
+    pub fn from(str_consumer: S, config: Config) -> Self {
+        EncoderStringWriter {
+            encoder: EncoderWriter::new(Utf8SingleCodeUnitWriter { str_consumer }, config),
+        }
+    }
+
+    /// Encode all remaining buffered data, including any trailing incomplete input triples and
+    /// associated padding.
+    ///
+    /// Once this succeeds, no further writes or calls to this method are allowed.
+    ///
+    /// Returns the base64-encoded form of the accumulated written data.
+    pub fn into_inner(mut self) -> S {
+        self.encoder
+            .finish()
+            .expect("Writing to a Vec<u8> should never fail")
+            .str_consumer
+    }
+}
+
+impl EncoderStringWriter<String> {
+    /// Create a EncoderStringWriter that will encode into a new String with the provided config.
+    pub fn new(config: Config) -> Self {
+        EncoderStringWriter::from(String::new(), config)
+    }
+}
+
+impl<S: StrConsumer> Write for EncoderStringWriter<S> {
+    fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
+        self.encoder.write(buf)
+    }
+
+    fn flush(&mut self) -> io::Result<()> {
+        self.encoder.flush()
+    }
+}
+
+/// An abstraction around consuming `str`s produced by base64 encoding.
+pub trait StrConsumer {
+    /// Consume the base64 encoded data in `buf`
+    fn consume(&mut self, buf: &str);
+}
+
+/// As for io::Write, `StrConsumer` is implemented automatically for `&mut S`.
+impl<S: StrConsumer + ?Sized> StrConsumer for &mut S {
+    fn consume(&mut self, buf: &str) {
+        (**self).consume(buf)
+    }
+}
+
+/// Pushes the str onto the end of the String
+impl StrConsumer for String {
+    fn consume(&mut self, buf: &str) {
+        self.push_str(buf)
+    }
+}
+
+/// A `Write` that only can handle bytes that are valid single-byte UTF-8 code units.
+///
+/// This is safe because we only use it when writing base64, which is always valid UTF-8.
+struct Utf8SingleCodeUnitWriter<S: StrConsumer> {
+    str_consumer: S,
+}
+
+impl<S: StrConsumer> io::Write for Utf8SingleCodeUnitWriter<S> {
+    fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
+        // Because we expect all input to be valid utf-8 individual bytes, we can encode any buffer
+        // length
+        let s = std::str::from_utf8(buf).expect("Input must be valid UTF-8");
+
+        self.str_consumer.consume(s);
+
+        Ok(buf.len())
+    }
+
+    fn flush(&mut self) -> io::Result<()> {
+        // no op
+        Ok(())
+    }
+}
+
+#[cfg(test)]
+mod tests {
+    use crate::encode_config_buf;
+    use crate::tests::random_config;
+    use crate::write::encoder_string_writer::EncoderStringWriter;
+    use rand::Rng;
+    use std::io::Write;
+
+    #[test]
+    fn every_possible_split_of_input() {
+        let mut rng = rand::thread_rng();
+        let mut orig_data = Vec::<u8>::new();
+        let mut normal_encoded = String::new();
+
+        let size = 5_000;
+
+        for i in 0..size {
+            orig_data.clear();
+            normal_encoded.clear();
+
+            for _ in 0..size {
+                orig_data.push(rng.gen());
+            }
+
+            let config = random_config(&mut rng);
+            encode_config_buf(&orig_data, config, &mut normal_encoded);
+
+            let mut stream_encoder = EncoderStringWriter::new(config);
+            // Write the first i bytes, then the rest
+            stream_encoder.write_all(&orig_data[0..i]).unwrap();
+            stream_encoder.write_all(&orig_data[i..]).unwrap();
+
+            let stream_encoded = stream_encoder.into_inner();
+
+            assert_eq!(normal_encoded, stream_encoded);
+        }
+    }
+}

+ 568 - 0
zeroidc/vendor/base64-0.13.0/src/write/encoder_tests.rs

@@ -0,0 +1,568 @@
+use super::EncoderWriter;
+use crate::tests::random_config;
+use crate::{encode_config, encode_config_buf, STANDARD_NO_PAD, URL_SAFE};
+
+use std::io::{Cursor, Write};
+use std::{cmp, io, str};
+
+use rand::Rng;
+
+#[test]
+fn encode_three_bytes() {
+    let mut c = Cursor::new(Vec::new());
+    {
+        let mut enc = EncoderWriter::new(&mut c, URL_SAFE);
+
+        let sz = enc.write(b"abc").unwrap();
+        assert_eq!(sz, 3);
+    }
+    assert_eq!(&c.get_ref()[..], encode_config("abc", URL_SAFE).as_bytes());
+}
+
+#[test]
+fn encode_nine_bytes_two_writes() {
+    let mut c = Cursor::new(Vec::new());
+    {
+        let mut enc = EncoderWriter::new(&mut c, URL_SAFE);
+
+        let sz = enc.write(b"abcdef").unwrap();
+        assert_eq!(sz, 6);
+        let sz = enc.write(b"ghi").unwrap();
+        assert_eq!(sz, 3);
+    }
+    assert_eq!(
+        &c.get_ref()[..],
+        encode_config("abcdefghi", URL_SAFE).as_bytes()
+    );
+}
+
+#[test]
+fn encode_one_then_two_bytes() {
+    let mut c = Cursor::new(Vec::new());
+    {
+        let mut enc = EncoderWriter::new(&mut c, URL_SAFE);
+
+        let sz = enc.write(b"a").unwrap();
+        assert_eq!(sz, 1);
+        let sz = enc.write(b"bc").unwrap();
+        assert_eq!(sz, 2);
+    }
+    assert_eq!(&c.get_ref()[..], encode_config("abc", URL_SAFE).as_bytes());
+}
+
+#[test]
+fn encode_one_then_five_bytes() {
+    let mut c = Cursor::new(Vec::new());
+    {
+        let mut enc = EncoderWriter::new(&mut c, URL_SAFE);
+
+        let sz = enc.write(b"a").unwrap();
+        assert_eq!(sz, 1);
+        let sz = enc.write(b"bcdef").unwrap();
+        assert_eq!(sz, 5);
+    }
+    assert_eq!(
+        &c.get_ref()[..],
+        encode_config("abcdef", URL_SAFE).as_bytes()
+    );
+}
+
+#[test]
+fn encode_1_2_3_bytes() {
+    let mut c = Cursor::new(Vec::new());
+    {
+        let mut enc = EncoderWriter::new(&mut c, URL_SAFE);
+
+        let sz = enc.write(b"a").unwrap();
+        assert_eq!(sz, 1);
+        let sz = enc.write(b"bc").unwrap();
+        assert_eq!(sz, 2);
+        let sz = enc.write(b"def").unwrap();
+        assert_eq!(sz, 3);
+    }
+    assert_eq!(
+        &c.get_ref()[..],
+        encode_config("abcdef", URL_SAFE).as_bytes()
+    );
+}
+
+#[test]
+fn encode_with_padding() {
+    let mut c = Cursor::new(Vec::new());
+    {
+        let mut enc = EncoderWriter::new(&mut c, URL_SAFE);
+
+        enc.write_all(b"abcd").unwrap();
+
+        enc.flush().unwrap();
+    }
+    assert_eq!(&c.get_ref()[..], encode_config("abcd", URL_SAFE).as_bytes());
+}
+
+#[test]
+fn encode_with_padding_multiple_writes() {
+    let mut c = Cursor::new(Vec::new());
+    {
+        let mut enc = EncoderWriter::new(&mut c, URL_SAFE);
+
+        assert_eq!(1, enc.write(b"a").unwrap());
+        assert_eq!(2, enc.write(b"bc").unwrap());
+        assert_eq!(3, enc.write(b"def").unwrap());
+        assert_eq!(1, enc.write(b"g").unwrap());
+
+        enc.flush().unwrap();
+    }
+    assert_eq!(
+        &c.get_ref()[..],
+        encode_config("abcdefg", URL_SAFE).as_bytes()
+    );
+}
+
+#[test]
+fn finish_writes_extra_byte() {
+    let mut c = Cursor::new(Vec::new());
+    {
+        let mut enc = EncoderWriter::new(&mut c, URL_SAFE);
+
+        assert_eq!(6, enc.write(b"abcdef").unwrap());
+
+        // will be in extra
+        assert_eq!(1, enc.write(b"g").unwrap());
+
+        // 1 trailing byte = 2 encoded chars
+        let _ = enc.finish().unwrap();
+    }
+    assert_eq!(
+        &c.get_ref()[..],
+        encode_config("abcdefg", URL_SAFE).as_bytes()
+    );
+}
+
+#[test]
+fn write_partial_chunk_encodes_partial_chunk() {
+    let mut c = Cursor::new(Vec::new());
+    {
+        let mut enc = EncoderWriter::new(&mut c, STANDARD_NO_PAD);
+
+        // nothing encoded yet
+        assert_eq!(2, enc.write(b"ab").unwrap());
+        // encoded here
+        let _ = enc.finish().unwrap();
+    }
+    assert_eq!(
+        &c.get_ref()[..],
+        encode_config("ab", STANDARD_NO_PAD).as_bytes()
+    );
+    assert_eq!(3, c.get_ref().len());
+}
+
+#[test]
+fn write_1_chunk_encodes_complete_chunk() {
+    let mut c = Cursor::new(Vec::new());
+    {
+        let mut enc = EncoderWriter::new(&mut c, STANDARD_NO_PAD);
+
+        assert_eq!(3, enc.write(b"abc").unwrap());
+        let _ = enc.finish().unwrap();
+    }
+    assert_eq!(
+        &c.get_ref()[..],
+        encode_config("abc", STANDARD_NO_PAD).as_bytes()
+    );
+    assert_eq!(4, c.get_ref().len());
+}
+
+#[test]
+fn write_1_chunk_and_partial_encodes_only_complete_chunk() {
+    let mut c = Cursor::new(Vec::new());
+    {
+        let mut enc = EncoderWriter::new(&mut c, STANDARD_NO_PAD);
+
+        // "d" not written
+        assert_eq!(3, enc.write(b"abcd").unwrap());
+        let _ = enc.finish().unwrap();
+    }
+    assert_eq!(
+        &c.get_ref()[..],
+        encode_config("abc", STANDARD_NO_PAD).as_bytes()
+    );
+    assert_eq!(4, c.get_ref().len());
+}
+
+#[test]
+fn write_2_partials_to_exactly_complete_chunk_encodes_complete_chunk() {
+    let mut c = Cursor::new(Vec::new());
+    {
+        let mut enc = EncoderWriter::new(&mut c, STANDARD_NO_PAD);
+
+        assert_eq!(1, enc.write(b"a").unwrap());
+        assert_eq!(2, enc.write(b"bc").unwrap());
+        let _ = enc.finish().unwrap();
+    }
+    assert_eq!(
+        &c.get_ref()[..],
+        encode_config("abc", STANDARD_NO_PAD).as_bytes()
+    );
+    assert_eq!(4, c.get_ref().len());
+}
+
+#[test]
+fn write_partial_then_enough_to_complete_chunk_but_not_complete_another_chunk_encodes_complete_chunk_without_consuming_remaining(
+) {
+    let mut c = Cursor::new(Vec::new());
+    {
+        let mut enc = EncoderWriter::new(&mut c, STANDARD_NO_PAD);
+
+        assert_eq!(1, enc.write(b"a").unwrap());
+        // doesn't consume "d"
+        assert_eq!(2, enc.write(b"bcd").unwrap());
+        let _ = enc.finish().unwrap();
+    }
+    assert_eq!(
+        &c.get_ref()[..],
+        encode_config("abc", STANDARD_NO_PAD).as_bytes()
+    );
+    assert_eq!(4, c.get_ref().len());
+}
+
+#[test]
+fn write_partial_then_enough_to_complete_chunk_and_another_chunk_encodes_complete_chunks() {
+    let mut c = Cursor::new(Vec::new());
+    {
+        let mut enc = EncoderWriter::new(&mut c, STANDARD_NO_PAD);
+
+        assert_eq!(1, enc.write(b"a").unwrap());
+        // completes partial chunk, and another chunk
+        assert_eq!(5, enc.write(b"bcdef").unwrap());
+        let _ = enc.finish().unwrap();
+    }
+    assert_eq!(
+        &c.get_ref()[..],
+        encode_config("abcdef", STANDARD_NO_PAD).as_bytes()
+    );
+    assert_eq!(8, c.get_ref().len());
+}
+
+#[test]
+fn write_partial_then_enough_to_complete_chunk_and_another_chunk_and_another_partial_chunk_encodes_only_complete_chunks(
+) {
+    let mut c = Cursor::new(Vec::new());
+    {
+        let mut enc = EncoderWriter::new(&mut c, STANDARD_NO_PAD);
+
+        assert_eq!(1, enc.write(b"a").unwrap());
+        // completes partial chunk, and another chunk, with one more partial chunk that's not
+        // consumed
+        assert_eq!(5, enc.write(b"bcdefe").unwrap());
+        let _ = enc.finish().unwrap();
+    }
+    assert_eq!(
+        &c.get_ref()[..],
+        encode_config("abcdef", STANDARD_NO_PAD).as_bytes()
+    );
+    assert_eq!(8, c.get_ref().len());
+}
+
+#[test]
+fn drop_calls_finish_for_you() {
+    let mut c = Cursor::new(Vec::new());
+    {
+        let mut enc = EncoderWriter::new(&mut c, STANDARD_NO_PAD);
+        assert_eq!(1, enc.write(b"a").unwrap());
+    }
+    assert_eq!(
+        &c.get_ref()[..],
+        encode_config("a", STANDARD_NO_PAD).as_bytes()
+    );
+    assert_eq!(2, c.get_ref().len());
+}
+
+#[test]
+fn every_possible_split_of_input() {
+    let mut rng = rand::thread_rng();
+    let mut orig_data = Vec::<u8>::new();
+    let mut stream_encoded = Vec::<u8>::new();
+    let mut normal_encoded = String::new();
+
+    let size = 5_000;
+
+    for i in 0..size {
+        orig_data.clear();
+        stream_encoded.clear();
+        normal_encoded.clear();
+
+        for _ in 0..size {
+            orig_data.push(rng.gen());
+        }
+
+        let config = random_config(&mut rng);
+        encode_config_buf(&orig_data, config, &mut normal_encoded);
+
+        {
+            let mut stream_encoder = EncoderWriter::new(&mut stream_encoded, config);
+            // Write the first i bytes, then the rest
+            stream_encoder.write_all(&orig_data[0..i]).unwrap();
+            stream_encoder.write_all(&orig_data[i..]).unwrap();
+        }
+
+        assert_eq!(normal_encoded, str::from_utf8(&stream_encoded).unwrap());
+    }
+}
+
+#[test]
+fn encode_random_config_matches_normal_encode_reasonable_input_len() {
+    // choose up to 2 * buf size, so ~half the time it'll use a full buffer
+    do_encode_random_config_matches_normal_encode(super::encoder::BUF_SIZE * 2)
+}
+
+#[test]
+fn encode_random_config_matches_normal_encode_tiny_input_len() {
+    do_encode_random_config_matches_normal_encode(10)
+}
+
+#[test]
+fn retrying_writes_that_error_with_interrupted_works() {
+    let mut rng = rand::thread_rng();
+    let mut orig_data = Vec::<u8>::new();
+    let mut stream_encoded = Vec::<u8>::new();
+    let mut normal_encoded = String::new();
+
+    for _ in 0..1_000 {
+        orig_data.clear();
+        stream_encoded.clear();
+        normal_encoded.clear();
+
+        let orig_len: usize = rng.gen_range(100, 20_000);
+        for _ in 0..orig_len {
+            orig_data.push(rng.gen());
+        }
+
+        // encode the normal way
+        let config = random_config(&mut rng);
+        encode_config_buf(&orig_data, config, &mut normal_encoded);
+
+        // encode via the stream encoder
+        {
+            let mut interrupt_rng = rand::thread_rng();
+            let mut interrupting_writer = InterruptingWriter {
+                w: &mut stream_encoded,
+                rng: &mut interrupt_rng,
+                fraction: 0.8,
+            };
+
+            let mut stream_encoder = EncoderWriter::new(&mut interrupting_writer, config);
+            let mut bytes_consumed = 0;
+            while bytes_consumed < orig_len {
+                // use short inputs since we want to use `extra` a lot as that's what needs rollback
+                // when errors occur
+                let input_len: usize = cmp::min(rng.gen_range(0, 10), orig_len - bytes_consumed);
+
+                retry_interrupted_write_all(
+                    &mut stream_encoder,
+                    &orig_data[bytes_consumed..bytes_consumed + input_len],
+                )
+                .unwrap();
+
+                bytes_consumed += input_len;
+            }
+
+            loop {
+                let res = stream_encoder.finish();
+                match res {
+                    Ok(_) => break,
+                    Err(e) => match e.kind() {
+                        io::ErrorKind::Interrupted => continue,
+                        _ => Err(e).unwrap(), // bail
+                    },
+                }
+            }
+
+            assert_eq!(orig_len, bytes_consumed);
+        }
+
+        assert_eq!(normal_encoded, str::from_utf8(&stream_encoded).unwrap());
+    }
+}
+
+#[test]
+fn writes_that_only_write_part_of_input_and_sometimes_interrupt_produce_correct_encoded_data() {
+    let mut rng = rand::thread_rng();
+    let mut orig_data = Vec::<u8>::new();
+    let mut stream_encoded = Vec::<u8>::new();
+    let mut normal_encoded = String::new();
+
+    for _ in 0..1_000 {
+        orig_data.clear();
+        stream_encoded.clear();
+        normal_encoded.clear();
+
+        let orig_len: usize = rng.gen_range(100, 20_000);
+        for _ in 0..orig_len {
+            orig_data.push(rng.gen());
+        }
+
+        // encode the normal way
+        let config = random_config(&mut rng);
+        encode_config_buf(&orig_data, config, &mut normal_encoded);
+
+        // encode via the stream encoder
+        {
+            let mut partial_rng = rand::thread_rng();
+            let mut partial_writer = PartialInterruptingWriter {
+                w: &mut stream_encoded,
+                rng: &mut partial_rng,
+                full_input_fraction: 0.1,
+                no_interrupt_fraction: 0.1,
+            };
+
+            let mut stream_encoder = EncoderWriter::new(&mut partial_writer, config);
+            let mut bytes_consumed = 0;
+            while bytes_consumed < orig_len {
+                // use at most medium-length inputs to exercise retry logic more aggressively
+                let input_len: usize = cmp::min(rng.gen_range(0, 100), orig_len - bytes_consumed);
+
+                let res =
+                    stream_encoder.write(&orig_data[bytes_consumed..bytes_consumed + input_len]);
+
+                // retry on interrupt
+                match res {
+                    Ok(len) => bytes_consumed += len,
+                    Err(e) => match e.kind() {
+                        io::ErrorKind::Interrupted => continue,
+                        _ => {
+                            panic!("should not see other errors");
+                        }
+                    },
+                }
+            }
+
+            let _ = stream_encoder.finish().unwrap();
+
+            assert_eq!(orig_len, bytes_consumed);
+        }
+
+        assert_eq!(normal_encoded, str::from_utf8(&stream_encoded).unwrap());
+    }
+}
+
+/// Retry writes until all the data is written or an error that isn't Interrupted is returned.
+fn retry_interrupted_write_all<W: Write>(w: &mut W, buf: &[u8]) -> io::Result<()> {
+    let mut bytes_consumed = 0;
+
+    while bytes_consumed < buf.len() {
+        let res = w.write(&buf[bytes_consumed..]);
+
+        match res {
+            Ok(len) => bytes_consumed += len,
+            Err(e) => match e.kind() {
+                io::ErrorKind::Interrupted => continue,
+                _ => return Err(e),
+            },
+        }
+    }
+
+    Ok(())
+}
+
+fn do_encode_random_config_matches_normal_encode(max_input_len: usize) {
+    let mut rng = rand::thread_rng();
+    let mut orig_data = Vec::<u8>::new();
+    let mut stream_encoded = Vec::<u8>::new();
+    let mut normal_encoded = String::new();
+
+    for _ in 0..1_000 {
+        orig_data.clear();
+        stream_encoded.clear();
+        normal_encoded.clear();
+
+        let orig_len: usize = rng.gen_range(100, 20_000);
+        for _ in 0..orig_len {
+            orig_data.push(rng.gen());
+        }
+
+        // encode the normal way
+        let config = random_config(&mut rng);
+        encode_config_buf(&orig_data, config, &mut normal_encoded);
+
+        // encode via the stream encoder
+        {
+            let mut stream_encoder = EncoderWriter::new(&mut stream_encoded, config);
+            let mut bytes_consumed = 0;
+            while bytes_consumed < orig_len {
+                let input_len: usize =
+                    cmp::min(rng.gen_range(0, max_input_len), orig_len - bytes_consumed);
+
+                // write a little bit of the data
+                stream_encoder
+                    .write_all(&orig_data[bytes_consumed..bytes_consumed + input_len])
+                    .unwrap();
+
+                bytes_consumed += input_len;
+            }
+
+            let _ = stream_encoder.finish().unwrap();
+
+            assert_eq!(orig_len, bytes_consumed);
+        }
+
+        assert_eq!(normal_encoded, str::from_utf8(&stream_encoded).unwrap());
+    }
+}
+
+/// A `Write` implementation that returns Interrupted some fraction of the time, randomly.
+struct InterruptingWriter<'a, W: 'a + Write, R: 'a + Rng> {
+    w: &'a mut W,
+    rng: &'a mut R,
+    /// In [0, 1]. If a random number in [0, 1] is  `<= threshold`, `Write` methods will return
+    /// an `Interrupted` error
+    fraction: f64,
+}
+
+impl<'a, W: Write, R: Rng> Write for InterruptingWriter<'a, W, R> {
+    fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
+        if self.rng.gen_range(0.0, 1.0) <= self.fraction {
+            return Err(io::Error::new(io::ErrorKind::Interrupted, "interrupted"));
+        }
+
+        self.w.write(buf)
+    }
+
+    fn flush(&mut self) -> io::Result<()> {
+        if self.rng.gen_range(0.0, 1.0) <= self.fraction {
+            return Err(io::Error::new(io::ErrorKind::Interrupted, "interrupted"));
+        }
+
+        self.w.flush()
+    }
+}
+
+/// A `Write` implementation that sometimes will only write part of its input.
+struct PartialInterruptingWriter<'a, W: 'a + Write, R: 'a + Rng> {
+    w: &'a mut W,
+    rng: &'a mut R,
+    /// In [0, 1]. If a random number in [0, 1] is  `<= threshold`, `write()` will write all its
+    /// input. Otherwise, it will write a random substring
+    full_input_fraction: f64,
+    no_interrupt_fraction: f64,
+}
+
+impl<'a, W: Write, R: Rng> Write for PartialInterruptingWriter<'a, W, R> {
+    fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
+        if self.rng.gen_range(0.0, 1.0) > self.no_interrupt_fraction {
+            return Err(io::Error::new(io::ErrorKind::Interrupted, "interrupted"));
+        }
+
+        if self.rng.gen_range(0.0, 1.0) <= self.full_input_fraction || buf.len() == 0 {
+            // pass through the buf untouched
+            self.w.write(buf)
+        } else {
+            // only use a prefix of it
+            self.w
+                .write(&buf[0..(self.rng.gen_range(0, buf.len() - 1))])
+        }
+    }
+
+    fn flush(&mut self) -> io::Result<()> {
+        self.w.flush()
+    }
+}

+ 8 - 0
zeroidc/vendor/base64-0.13.0/src/write/mod.rs

@@ -0,0 +1,8 @@
+//! Implementations of `io::Write` to transparently handle base64.
+mod encoder;
+mod encoder_string_writer;
+pub use self::encoder::EncoderWriter;
+pub use self::encoder_string_writer::EncoderStringWriter;
+
+#[cfg(test)]
+mod encoder_tests;

+ 0 - 0
zeroidc/vendor/base64/tests/decode.rs → zeroidc/vendor/base64-0.13.0/tests/decode.rs


+ 105 - 0
zeroidc/vendor/base64-0.13.0/tests/encode.rs

@@ -0,0 +1,105 @@
+extern crate base64;
+
+use base64::*;
+
+fn compare_encode(expected: &str, target: &[u8]) {
+    assert_eq!(expected, encode(target));
+}
+
+#[test]
+fn encode_rfc4648_0() {
+    compare_encode("", b"");
+}
+
+#[test]
+fn encode_rfc4648_1() {
+    compare_encode("Zg==", b"f");
+}
+
+#[test]
+fn encode_rfc4648_2() {
+    compare_encode("Zm8=", b"fo");
+}
+
+#[test]
+fn encode_rfc4648_3() {
+    compare_encode("Zm9v", b"foo");
+}
+
+#[test]
+fn encode_rfc4648_4() {
+    compare_encode("Zm9vYg==", b"foob");
+}
+
+#[test]
+fn encode_rfc4648_5() {
+    compare_encode("Zm9vYmE=", b"fooba");
+}
+
+#[test]
+fn encode_rfc4648_6() {
+    compare_encode("Zm9vYmFy", b"foobar");
+}
+
+#[test]
+fn encode_all_ascii() {
+    let mut ascii = Vec::<u8>::with_capacity(128);
+
+    for i in 0..128 {
+        ascii.push(i);
+    }
+
+    compare_encode(
+        "AAECAwQFBgcICQoLDA0ODxAREhMUFRYXGBkaGxwdHh8gISIjJCUmJygpKissLS4vMDEyMzQ1Njc4OTo7P\
+         D0+P0BBQkNERUZHSElKS0xNTk9QUVJTVFVWV1hZWltcXV5fYGFiY2RlZmdoaWprbG1ub3BxcnN0dXZ3eHl6e3x9fn8\
+         =",
+        &ascii,
+    );
+}
+
+#[test]
+fn encode_all_bytes() {
+    let mut bytes = Vec::<u8>::with_capacity(256);
+
+    for i in 0..255 {
+        bytes.push(i);
+    }
+    bytes.push(255); //bug with "overflowing" ranges?
+
+    compare_encode(
+        "AAECAwQFBgcICQoLDA0ODxAREhMUFRYXGBkaGxwdHh8gISIjJCUmJygpKissLS4vMDEyMzQ1Njc4OTo7P\
+         D0+P0BBQkNERUZHSElKS0xNTk9QUVJTVFVWV1hZWltcXV5fYGFiY2RlZmdoaWprbG1ub3BxcnN0dXZ3eHl6e3x9fn\
+         +AgYKDhIWGh4iJiouMjY6PkJGSk5SVlpeYmZqbnJ2en6ChoqOkpaanqKmqq6ytrq+wsbKztLW2t7i5uru8vb6\
+         /wMHCw8TFxsfIycrLzM3Oz9DR0tPU1dbX2Nna29zd3t/g4eLj5OXm5+jp6uvs7e7v8PHy8/T19vf4+fr7/P3+/w==",
+        &bytes,
+    );
+}
+
+#[test]
+fn encode_all_bytes_url() {
+    let mut bytes = Vec::<u8>::with_capacity(256);
+
+    for i in 0..255 {
+        bytes.push(i);
+    }
+    bytes.push(255); //bug with "overflowing" ranges?
+
+    assert_eq!(
+        "AAECAwQFBgcICQoLDA0ODxAREhMUFRYXGBkaGxwdHh8gISIjJCUmJygpKissLS4vMDEyMzQ1Njc4OTo7PD0\
+         -P0BBQkNERUZHSElKS0xNTk9QUVJTVFVWV1hZWltcXV5fYGFiY2RlZmdoaWprbG1ub3BxcnN0dXZ3eHl6e3x9fn\
+         -AgYKDhIWGh4iJiouMjY6PkJGSk5SVlpeYmZqbnJ2en6ChoqOkpaanqKmqq6ytrq\
+         -wsbKztLW2t7i5uru8vb6_wMHCw8TFxsfIycrLzM3Oz9DR0tPU1dbX2Nna29zd3t_g4eLj5OXm5-jp6uvs7e7v8PHy\
+         8_T19vf4-fr7_P3-_w==",
+        encode_config(&bytes, URL_SAFE)
+    );
+}
+
+#[test]
+fn encode_url_safe_without_padding() {
+    let encoded = encode_config(b"alice", URL_SAFE_NO_PAD);
+    assert_eq!(&encoded, "YWxpY2U");
+    assert_eq!(
+        String::from_utf8(decode(&encoded).unwrap()).unwrap(),
+        "alice"
+    );
+}

+ 0 - 0
zeroidc/vendor/base64/tests/helpers.rs → zeroidc/vendor/base64-0.13.0/tests/helpers.rs


+ 194 - 0
zeroidc/vendor/base64-0.13.0/tests/tests.rs

@@ -0,0 +1,194 @@
+extern crate base64;
+extern crate rand;
+
+use rand::{FromEntropy, Rng};
+
+use base64::*;
+
+mod helpers;
+use self::helpers::*;
+
+// generate random contents of the specified length and test encode/decode roundtrip
+fn roundtrip_random(
+    byte_buf: &mut Vec<u8>,
+    str_buf: &mut String,
+    config: Config,
+    byte_len: usize,
+    approx_values_per_byte: u8,
+    max_rounds: u64,
+) {
+    // let the short ones be short but don't let it get too crazy large
+    let num_rounds = calculate_number_of_rounds(byte_len, approx_values_per_byte, max_rounds);
+    let mut r = rand::rngs::SmallRng::from_entropy();
+    let mut decode_buf = Vec::new();
+
+    for _ in 0..num_rounds {
+        byte_buf.clear();
+        str_buf.clear();
+        decode_buf.clear();
+        while byte_buf.len() < byte_len {
+            byte_buf.push(r.gen::<u8>());
+        }
+
+        encode_config_buf(&byte_buf, config, str_buf);
+        decode_config_buf(&str_buf, config, &mut decode_buf).unwrap();
+
+        assert_eq!(byte_buf, &decode_buf);
+    }
+}
+
+fn calculate_number_of_rounds(byte_len: usize, approx_values_per_byte: u8, max: u64) -> u64 {
+    // don't overflow
+    let mut prod = approx_values_per_byte as u64;
+
+    for _ in 0..byte_len {
+        if prod > max {
+            return max;
+        }
+
+        prod = prod.saturating_mul(prod);
+    }
+
+    prod
+}
+
+fn no_pad_config() -> Config {
+    Config::new(CharacterSet::Standard, false)
+}
+
+#[test]
+fn roundtrip_random_short_standard() {
+    let mut byte_buf: Vec<u8> = Vec::new();
+    let mut str_buf = String::new();
+
+    for input_len in 0..40 {
+        roundtrip_random(&mut byte_buf, &mut str_buf, STANDARD, input_len, 4, 10000);
+    }
+}
+
+#[test]
+fn roundtrip_random_with_fast_loop_standard() {
+    let mut byte_buf: Vec<u8> = Vec::new();
+    let mut str_buf = String::new();
+
+    for input_len in 40..100 {
+        roundtrip_random(&mut byte_buf, &mut str_buf, STANDARD, input_len, 4, 1000);
+    }
+}
+
+#[test]
+fn roundtrip_random_short_no_padding() {
+    let mut byte_buf: Vec<u8> = Vec::new();
+    let mut str_buf = String::new();
+
+    for input_len in 0..40 {
+        roundtrip_random(
+            &mut byte_buf,
+            &mut str_buf,
+            no_pad_config(),
+            input_len,
+            4,
+            10000,
+        );
+    }
+}
+
+#[test]
+fn roundtrip_random_no_padding() {
+    let mut byte_buf: Vec<u8> = Vec::new();
+    let mut str_buf = String::new();
+
+    for input_len in 40..100 {
+        roundtrip_random(
+            &mut byte_buf,
+            &mut str_buf,
+            no_pad_config(),
+            input_len,
+            4,
+            1000,
+        );
+    }
+}
+
+#[test]
+fn roundtrip_decode_trailing_10_bytes() {
+    // This is a special case because we decode 8 byte blocks of input at a time as much as we can,
+    // ideally unrolled to 32 bytes at a time, in stages 1 and 2. Since we also write a u64's worth
+    // of bytes (8) to the output, we always write 2 garbage bytes that then will be overwritten by
+    // the NEXT block. However, if the next block only contains 2 bytes, it will decode to 1 byte,
+    // and therefore be too short to cover up the trailing 2 garbage bytes. Thus, we have stage 3
+    // to handle that case.
+
+    for num_quads in 0..25 {
+        let mut s: String = std::iter::repeat("ABCD").take(num_quads).collect();
+        s.push_str("EFGHIJKLZg");
+
+        let decoded = decode(&s).unwrap();
+        assert_eq!(num_quads * 3 + 7, decoded.len());
+
+        assert_eq!(s, encode_config(&decoded, STANDARD_NO_PAD));
+    }
+}
+
+#[test]
+fn display_wrapper_matches_normal_encode() {
+    let mut bytes = Vec::<u8>::with_capacity(256);
+
+    for i in 0..255 {
+        bytes.push(i);
+    }
+    bytes.push(255);
+
+    assert_eq!(
+        encode(&bytes),
+        format!(
+            "{}",
+            base64::display::Base64Display::with_config(&bytes, STANDARD)
+        )
+    );
+}
+
+#[test]
+fn because_we_can() {
+    compare_decode("alice", "YWxpY2U=");
+    compare_decode("alice", &encode(b"alice"));
+    compare_decode("alice", &encode(&decode(&encode(b"alice")).unwrap()));
+}
+
+#[test]
+fn encode_config_slice_can_use_inline_buffer() {
+    let mut buf: [u8; 22] = [0; 22];
+    let mut larger_buf: [u8; 24] = [0; 24];
+    let mut input: [u8; 16] = [0; 16];
+
+    let mut rng = rand::rngs::SmallRng::from_entropy();
+    for elt in &mut input {
+        *elt = rng.gen();
+    }
+
+    assert_eq!(22, encode_config_slice(&input, STANDARD_NO_PAD, &mut buf));
+    let decoded = decode_config(&buf, STANDARD_NO_PAD).unwrap();
+
+    assert_eq!(decoded, input);
+
+    // let's try it again with padding
+
+    assert_eq!(24, encode_config_slice(&input, STANDARD, &mut larger_buf));
+    let decoded = decode_config(&buf, STANDARD).unwrap();
+
+    assert_eq!(decoded, input);
+}
+
+#[test]
+#[should_panic(expected = "index 24 out of range for slice of length 22")]
+fn encode_config_slice_panics_when_buffer_too_small() {
+    let mut buf: [u8; 22] = [0; 22];
+    let mut input: [u8; 16] = [0; 16];
+
+    let mut rng = rand::rngs::SmallRng::from_entropy();
+    for elt in &mut input {
+        *elt = rng.gen();
+    }
+
+    encode_config_slice(&input, STANDARD, &mut buf);
+}

文件差异内容过多而无法显示
+ 0 - 0
zeroidc/vendor/base64/.cargo-checksum.json


+ 400 - 415
zeroidc/vendor/base64/Cargo.lock

@@ -1,826 +1,811 @@
 # This file is automatically @generated by Cargo.
 # It is not intended for manual editing.
+version = 3
+
+[[package]]
+name = "anes"
+version = "0.1.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "4b46cbb362ab8752921c97e041f5e366ee6297bd428a31275b9fcf1e380f7299"
+
 [[package]]
 name = "ansi_term"
-version = "0.11.0"
+version = "0.12.1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d52a9bb7ec0cf484c551830a7ce27bd20d67eac647e1befb56b0be4ee39a55d2"
 dependencies = [
- "winapi 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)",
+ "winapi",
 ]
 
 [[package]]
 name = "atty"
 version = "0.2.14"
 source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d9b39be18770d11421cdb1b9947a45dd3f37e93092cbf377614828a319d5fee8"
 dependencies = [
- "hermit-abi 0.1.15 (registry+https://github.com/rust-lang/crates.io-index)",
- "libc 0.2.74 (registry+https://github.com/rust-lang/crates.io-index)",
- "winapi 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)",
+ "hermit-abi 0.1.19",
+ "libc",
+ "winapi",
 ]
 
 [[package]]
 name = "autocfg"
-version = "0.1.7"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-
-[[package]]
-name = "autocfg"
-version = "1.0.0"
+version = "1.1.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa"
 
 [[package]]
 name = "base64"
-version = "0.13.0"
+version = "0.21.0"
 dependencies = [
- "criterion 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)",
- "rand 0.6.5 (registry+https://github.com/rust-lang/crates.io-index)",
- "structopt 0.3.17 (registry+https://github.com/rust-lang/crates.io-index)",
+ "criterion",
+ "rand",
+ "rstest",
+ "rstest_reuse",
+ "structopt",
 ]
 
 [[package]]
 name = "bitflags"
-version = "1.2.1"
+version = "1.3.2"
 source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a"
 
 [[package]]
-name = "bstr"
-version = "0.2.13"
+name = "bumpalo"
+version = "3.11.1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-dependencies = [
- "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "memchr 2.3.3 (registry+https://github.com/rust-lang/crates.io-index)",
- "regex-automata 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)",
- "serde 1.0.114 (registry+https://github.com/rust-lang/crates.io-index)",
-]
+checksum = "572f695136211188308f16ad2ca5c851a712c464060ae6974944458eb83880ba"
 
 [[package]]
-name = "bumpalo"
-version = "3.4.0"
+name = "cast"
+version = "0.3.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5"
 
 [[package]]
-name = "byteorder"
-version = "1.3.4"
+name = "cfg-if"
+version = "1.0.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd"
 
 [[package]]
-name = "cast"
-version = "0.2.3"
+name = "ciborium"
+version = "0.2.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b0c137568cc60b904a7724001b35ce2630fd00d5d84805fbb608ab89509d788f"
 dependencies = [
- "rustc_version 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)",
+ "ciborium-io",
+ "ciborium-ll",
+ "serde",
 ]
 
 [[package]]
-name = "cfg-if"
-version = "0.1.10"
+name = "ciborium-io"
+version = "0.2.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "346de753af073cc87b52b2083a506b38ac176a44cfb05497b622e27be899b369"
+
+[[package]]
+name = "ciborium-ll"
+version = "0.2.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "213030a2b5a4e0c0892b6652260cf6ccac84827b83a85a534e178e3906c4cf1b"
+dependencies = [
+ "ciborium-io",
+ "half",
+]
 
 [[package]]
 name = "clap"
-version = "2.33.1"
+version = "2.34.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "a0610544180c38b88101fecf2dd634b174a62eef6946f84dfc6a7127512b381c"
 dependencies = [
- "ansi_term 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "atty 0.2.14 (registry+https://github.com/rust-lang/crates.io-index)",
- "bitflags 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
- "strsim 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "textwrap 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "unicode-width 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)",
- "vec_map 0.8.2 (registry+https://github.com/rust-lang/crates.io-index)",
+ "ansi_term",
+ "atty",
+ "bitflags",
+ "strsim",
+ "textwrap 0.11.0",
+ "unicode-width",
+ "vec_map",
 ]
 
 [[package]]
-name = "cloudabi"
-version = "0.0.3"
+name = "clap"
+version = "3.2.23"
 source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "71655c45cb9845d3270c9d6df84ebe72b4dad3c2ba3f7023ad47c144e4e473a5"
 dependencies = [
- "bitflags 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
+ "bitflags",
+ "clap_lex",
+ "indexmap",
+ "textwrap 0.16.0",
 ]
 
 [[package]]
-name = "criterion"
-version = "0.3.2"
+name = "clap_lex"
+version = "0.2.4"
 source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "2850f2f5a82cbf437dd5af4d49848fbdfc27c157c3d010345776f952765261c5"
 dependencies = [
- "atty 0.2.14 (registry+https://github.com/rust-lang/crates.io-index)",
- "cast 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)",
- "clap 2.33.1 (registry+https://github.com/rust-lang/crates.io-index)",
- "criterion-plot 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)",
- "csv 1.1.3 (registry+https://github.com/rust-lang/crates.io-index)",
- "itertools 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "num-traits 0.2.12 (registry+https://github.com/rust-lang/crates.io-index)",
- "oorandom 11.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
- "plotters 0.2.15 (registry+https://github.com/rust-lang/crates.io-index)",
- "rayon 1.3.1 (registry+https://github.com/rust-lang/crates.io-index)",
- "regex 1.3.9 (registry+https://github.com/rust-lang/crates.io-index)",
- "serde 1.0.114 (registry+https://github.com/rust-lang/crates.io-index)",
- "serde_derive 1.0.114 (registry+https://github.com/rust-lang/crates.io-index)",
- "serde_json 1.0.57 (registry+https://github.com/rust-lang/crates.io-index)",
- "tinytemplate 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "walkdir 2.3.1 (registry+https://github.com/rust-lang/crates.io-index)",
+ "os_str_bytes",
+]
+
+[[package]]
+name = "criterion"
+version = "0.4.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "e7c76e09c1aae2bc52b3d2f29e13c6572553b30c4aa1b8a49fd70de6412654cb"
+dependencies = [
+ "anes",
+ "atty",
+ "cast",
+ "ciborium",
+ "clap 3.2.23",
+ "criterion-plot",
+ "itertools",
+ "lazy_static",
+ "num-traits",
+ "oorandom",
+ "plotters",
+ "rayon",
+ "regex",
+ "serde",
+ "serde_derive",
+ "serde_json",
+ "tinytemplate",
+ "walkdir",
 ]
 
 [[package]]
 name = "criterion-plot"
-version = "0.4.3"
+version = "0.5.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "6b50826342786a51a89e2da3a28f1c32b06e387201bc2d19791f622c673706b1"
 dependencies = [
- "cast 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)",
- "itertools 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "cast",
+ "itertools",
 ]
 
 [[package]]
-name = "crossbeam-deque"
-version = "0.7.3"
+name = "crossbeam-channel"
+version = "0.5.6"
 source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "c2dd04ddaf88237dc3b8d8f9a3c1004b506b54b3313403944054d23c0870c521"
 dependencies = [
- "crossbeam-epoch 0.8.2 (registry+https://github.com/rust-lang/crates.io-index)",
- "crossbeam-utils 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)",
- "maybe-uninit 2.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "cfg-if",
+ "crossbeam-utils",
 ]
 
 [[package]]
-name = "crossbeam-epoch"
+name = "crossbeam-deque"
 version = "0.8.2"
 source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "715e8152b692bba2d374b53d4875445368fdf21a94751410af607a5ac677d1fc"
 dependencies = [
- "autocfg 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)",
- "crossbeam-utils 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)",
- "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "maybe-uninit 2.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "memoffset 0.5.5 (registry+https://github.com/rust-lang/crates.io-index)",
- "scopeguard 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "cfg-if",
+ "crossbeam-epoch",
+ "crossbeam-utils",
 ]
 
 [[package]]
-name = "crossbeam-queue"
-version = "0.2.3"
+name = "crossbeam-epoch"
+version = "0.9.13"
 source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "01a9af1f4c2ef74bb8aa1f7e19706bc72d03598c8a570bb5de72243c7a9d9d5a"
 dependencies = [
- "cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)",
- "crossbeam-utils 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)",
- "maybe-uninit 2.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "autocfg",
+ "cfg-if",
+ "crossbeam-utils",
+ "memoffset",
+ "scopeguard",
 ]
 
 [[package]]
 name = "crossbeam-utils"
-version = "0.7.2"
+version = "0.8.14"
 source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "4fb766fa798726286dbbb842f174001dab8abc7b627a1dd86e0b7222a95d929f"
 dependencies = [
- "autocfg 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)",
- "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "cfg-if",
 ]
 
 [[package]]
-name = "csv"
-version = "1.1.3"
+name = "either"
+version = "1.8.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-dependencies = [
- "bstr 0.2.13 (registry+https://github.com/rust-lang/crates.io-index)",
- "csv-core 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)",
- "itoa 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)",
- "ryu 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)",
- "serde 1.0.114 (registry+https://github.com/rust-lang/crates.io-index)",
-]
+checksum = "90e5c1c8368803113bf0c9584fc495a58b86dc8a29edbf8fe877d21d9507e797"
 
 [[package]]
-name = "csv-core"
-version = "0.1.10"
+name = "getrandom"
+version = "0.2.8"
 source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "c05aeb6a22b8f62540c194aac980f2115af067bfe15a0734d7277a768d396b31"
 dependencies = [
- "memchr 2.3.3 (registry+https://github.com/rust-lang/crates.io-index)",
+ "cfg-if",
+ "libc",
+ "wasi",
 ]
 
 [[package]]
-name = "either"
-version = "1.5.3"
+name = "half"
+version = "1.8.2"
 source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "eabb4a44450da02c90444cf74558da904edde8fb4e9035a9a6a4e15445af0bd7"
 
 [[package]]
-name = "fuchsia-cprng"
-version = "0.1.1"
+name = "hashbrown"
+version = "0.12.3"
 source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888"
 
 [[package]]
 name = "heck"
-version = "0.3.1"
+version = "0.3.3"
 source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "6d621efb26863f0e9924c6ac577e8275e5e6b77455db64ffa6c65c904e9e132c"
 dependencies = [
- "unicode-segmentation 1.6.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "unicode-segmentation",
 ]
 
 [[package]]
 name = "hermit-abi"
-version = "0.1.15"
+version = "0.1.19"
 source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "62b467343b94ba476dcb2500d242dadbb39557df889310ac77c5d99100aaac33"
 dependencies = [
- "libc 0.2.74 (registry+https://github.com/rust-lang/crates.io-index)",
+ "libc",
+]
+
+[[package]]
+name = "hermit-abi"
+version = "0.2.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ee512640fe35acbfb4bb779db6f0d80704c2cacfa2e39b601ef3e3f47d1ae4c7"
+dependencies = [
+ "libc",
+]
+
+[[package]]
+name = "indexmap"
+version = "1.9.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "1885e79c1fc4b10f0e172c475f458b7f7b93061064d98c3293e98c5ba0c8b399"
+dependencies = [
+ "autocfg",
+ "hashbrown",
 ]
 
 [[package]]
 name = "itertools"
-version = "0.9.0"
+version = "0.10.5"
 source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b0fd2260e829bddf4cb6ea802289de2f86d6a7a690192fbe91b3f46e0f2c8473"
 dependencies = [
- "either 1.5.3 (registry+https://github.com/rust-lang/crates.io-index)",
+ "either",
 ]
 
 [[package]]
 name = "itoa"
-version = "0.4.6"
+version = "1.0.5"
 source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "fad582f4b9e86b6caa621cabeb0963332d92eea04729ab12892c2533951e6440"
 
 [[package]]
 name = "js-sys"
-version = "0.3.44"
+version = "0.3.60"
 source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "49409df3e3bf0856b916e2ceaca09ee28e6871cf7d9ce97a692cacfdb2a25a47"
 dependencies = [
- "wasm-bindgen 0.2.67 (registry+https://github.com/rust-lang/crates.io-index)",
+ "wasm-bindgen",
 ]
 
 [[package]]
 name = "lazy_static"
 version = "1.4.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646"
 
 [[package]]
 name = "libc"
-version = "0.2.74"
+version = "0.2.139"
 source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "201de327520df007757c1f0adce6e827fe8562fbc28bfd9c15571c66ca1f5f79"
 
 [[package]]
 name = "log"
-version = "0.4.11"
+version = "0.4.17"
 source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "abb12e687cfb44aa40f41fc3978ef76448f9b6038cad6aef4259d3c095a2382e"
 dependencies = [
- "cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)",
+ "cfg-if",
 ]
 
-[[package]]
-name = "maybe-uninit"
-version = "2.0.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-
-[[package]]
-name = "memchr"
-version = "2.3.3"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-
 [[package]]
 name = "memoffset"
-version = "0.5.5"
+version = "0.7.1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "5de893c32cde5f383baa4c04c5d6dbdd735cfd4a794b0debdb2bb1b421da5ff4"
 dependencies = [
- "autocfg 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "autocfg",
 ]
 
 [[package]]
 name = "num-traits"
-version = "0.2.12"
+version = "0.2.15"
 source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "578ede34cf02f8924ab9447f50c28075b4d3e5b269972345e7e0372b38c6cdcd"
 dependencies = [
- "autocfg 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "autocfg",
 ]
 
 [[package]]
 name = "num_cpus"
-version = "1.13.0"
+version = "1.15.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "0fac9e2da13b5eb447a6ce3d392f23a29d8694bff781bf03a16cd9ac8697593b"
 dependencies = [
- "hermit-abi 0.1.15 (registry+https://github.com/rust-lang/crates.io-index)",
- "libc 0.2.74 (registry+https://github.com/rust-lang/crates.io-index)",
+ "hermit-abi 0.2.6",
+ "libc",
 ]
 
 [[package]]
-name = "oorandom"
-version = "11.1.2"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-
-[[package]]
-name = "plotters"
-version = "0.2.15"
+name = "once_cell"
+version = "1.17.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-dependencies = [
- "js-sys 0.3.44 (registry+https://github.com/rust-lang/crates.io-index)",
- "num-traits 0.2.12 (registry+https://github.com/rust-lang/crates.io-index)",
- "wasm-bindgen 0.2.67 (registry+https://github.com/rust-lang/crates.io-index)",
- "web-sys 0.3.44 (registry+https://github.com/rust-lang/crates.io-index)",
-]
+checksum = "6f61fba1741ea2b3d6a1e3178721804bb716a68a6aeba1149b5d52e3d464ea66"
 
 [[package]]
-name = "proc-macro-error"
-version = "1.0.4"
+name = "oorandom"
+version = "11.1.3"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-dependencies = [
- "proc-macro-error-attr 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)",
- "proc-macro2 1.0.19 (registry+https://github.com/rust-lang/crates.io-index)",
- "quote 1.0.7 (registry+https://github.com/rust-lang/crates.io-index)",
- "syn 1.0.36 (registry+https://github.com/rust-lang/crates.io-index)",
- "version_check 0.9.2 (registry+https://github.com/rust-lang/crates.io-index)",
-]
+checksum = "0ab1bc2a289d34bd04a330323ac98a1b4bc82c9d9fcb1e66b63caa84da26b575"
 
 [[package]]
-name = "proc-macro-error-attr"
-version = "1.0.4"
+name = "os_str_bytes"
+version = "6.4.1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-dependencies = [
- "proc-macro2 1.0.19 (registry+https://github.com/rust-lang/crates.io-index)",
- "quote 1.0.7 (registry+https://github.com/rust-lang/crates.io-index)",
- "version_check 0.9.2 (registry+https://github.com/rust-lang/crates.io-index)",
-]
+checksum = "9b7820b9daea5457c9f21c69448905d723fbd21136ccf521748f23fd49e723ee"
 
 [[package]]
-name = "proc-macro2"
-version = "1.0.19"
+name = "plotters"
+version = "0.3.4"
 source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "2538b639e642295546c50fcd545198c9d64ee2a38620a628724a3b266d5fbf97"
 dependencies = [
- "unicode-xid 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
+ "num-traits",
+ "plotters-backend",
+ "plotters-svg",
+ "wasm-bindgen",
+ "web-sys",
 ]
 
 [[package]]
-name = "quote"
-version = "1.0.7"
+name = "plotters-backend"
+version = "0.3.4"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-dependencies = [
- "proc-macro2 1.0.19 (registry+https://github.com/rust-lang/crates.io-index)",
-]
+checksum = "193228616381fecdc1224c62e96946dfbc73ff4384fba576e052ff8c1bea8142"
 
 [[package]]
-name = "rand"
-version = "0.6.5"
+name = "plotters-svg"
+version = "0.3.3"
 source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f9a81d2759aae1dae668f783c308bc5c8ebd191ff4184aaa1b37f65a6ae5a56f"
 dependencies = [
- "autocfg 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)",
- "libc 0.2.74 (registry+https://github.com/rust-lang/crates.io-index)",
- "rand_chacha 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
- "rand_core 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)",
- "rand_hc 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "rand_isaac 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
- "rand_jitter 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)",
- "rand_os 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)",
- "rand_pcg 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
- "rand_xorshift 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
- "winapi 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)",
+ "plotters-backend",
 ]
 
 [[package]]
-name = "rand_chacha"
-version = "0.1.1"
+name = "ppv-lite86"
+version = "0.2.17"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-dependencies = [
- "autocfg 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)",
- "rand_core 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)",
-]
+checksum = "5b40af805b3121feab8a3c29f04d8ad262fa8e0561883e7653e024ae4479e6de"
 
 [[package]]
-name = "rand_core"
-version = "0.3.1"
+name = "proc-macro-error"
+version = "1.0.4"
 source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "da25490ff9892aab3fcf7c36f08cfb902dd3e71ca0f9f9517bea02a73a5ce38c"
 dependencies = [
- "rand_core 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)",
+ "proc-macro-error-attr",
+ "proc-macro2",
+ "quote",
+ "syn",
+ "version_check",
 ]
 
 [[package]]
-name = "rand_core"
-version = "0.4.2"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-
-[[package]]
-name = "rand_hc"
-version = "0.1.0"
+name = "proc-macro-error-attr"
+version = "1.0.4"
 source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "a1be40180e52ecc98ad80b184934baf3d0d29f979574e439af5a55274b35f869"
 dependencies = [
- "rand_core 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)",
+ "proc-macro2",
+ "quote",
+ "version_check",
 ]
 
 [[package]]
-name = "rand_isaac"
-version = "0.1.1"
+name = "proc-macro2"
+version = "1.0.49"
 source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "57a8eca9f9c4ffde41714334dee777596264c7825420f521abc92b5b5deb63a5"
 dependencies = [
- "rand_core 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)",
+ "unicode-ident",
 ]
 
 [[package]]
-name = "rand_jitter"
-version = "0.1.4"
+name = "quote"
+version = "1.0.23"
 source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "8856d8364d252a14d474036ea1358d63c9e6965c8e5c1885c18f73d70bff9c7b"
 dependencies = [
- "libc 0.2.74 (registry+https://github.com/rust-lang/crates.io-index)",
- "rand_core 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)",
- "winapi 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)",
+ "proc-macro2",
 ]
 
 [[package]]
-name = "rand_os"
-version = "0.1.3"
+name = "rand"
+version = "0.8.5"
 source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404"
 dependencies = [
- "cloudabi 0.0.3 (registry+https://github.com/rust-lang/crates.io-index)",
- "fuchsia-cprng 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
- "libc 0.2.74 (registry+https://github.com/rust-lang/crates.io-index)",
- "rand_core 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)",
- "rdrand 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "winapi 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)",
+ "libc",
+ "rand_chacha",
+ "rand_core",
 ]
 
 [[package]]
-name = "rand_pcg"
-version = "0.1.2"
+name = "rand_chacha"
+version = "0.3.1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88"
 dependencies = [
- "autocfg 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)",
- "rand_core 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)",
+ "ppv-lite86",
+ "rand_core",
 ]
 
 [[package]]
-name = "rand_xorshift"
-version = "0.1.1"
+name = "rand_core"
+version = "0.6.4"
 source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c"
 dependencies = [
- "rand_core 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)",
+ "getrandom",
 ]
 
 [[package]]
 name = "rayon"
-version = "1.3.1"
+version = "1.6.1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "6db3a213adf02b3bcfd2d3846bb41cb22857d131789e01df434fb7e7bc0759b7"
 dependencies = [
- "autocfg 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "crossbeam-deque 0.7.3 (registry+https://github.com/rust-lang/crates.io-index)",
- "either 1.5.3 (registry+https://github.com/rust-lang/crates.io-index)",
- "rayon-core 1.7.1 (registry+https://github.com/rust-lang/crates.io-index)",
+ "either",
+ "rayon-core",
 ]
 
 [[package]]
 name = "rayon-core"
-version = "1.7.1"
+version = "1.10.1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "cac410af5d00ab6884528b4ab69d1e8e146e8d471201800fa1b4524126de6ad3"
 dependencies = [
- "crossbeam-deque 0.7.3 (registry+https://github.com/rust-lang/crates.io-index)",
- "crossbeam-queue 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)",
- "crossbeam-utils 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)",
- "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "num_cpus 1.13.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "crossbeam-channel",
+ "crossbeam-deque",
+ "crossbeam-utils",
+ "num_cpus",
 ]
 
 [[package]]
-name = "rdrand"
-version = "0.4.0"
+name = "regex"
+version = "1.7.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "e076559ef8e241f2ae3479e36f97bd5741c0330689e217ad51ce2c76808b868a"
 dependencies = [
- "rand_core 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)",
+ "regex-syntax",
 ]
 
 [[package]]
-name = "regex"
-version = "1.3.9"
+name = "regex-syntax"
+version = "0.6.28"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-dependencies = [
- "regex-syntax 0.6.18 (registry+https://github.com/rust-lang/crates.io-index)",
-]
+checksum = "456c603be3e8d448b072f410900c09faf164fbce2d480456f50eea6e25f9c848"
 
 [[package]]
-name = "regex-automata"
-version = "0.1.9"
+name = "rstest"
+version = "0.12.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d912f35156a3f99a66ee3e11ac2e0b3f34ac85a07e05263d05a7e2c8810d616f"
 dependencies = [
- "byteorder 1.3.4 (registry+https://github.com/rust-lang/crates.io-index)",
+ "cfg-if",
+ "proc-macro2",
+ "quote",
+ "rustc_version",
+ "syn",
 ]
 
 [[package]]
-name = "regex-syntax"
-version = "0.6.18"
+name = "rstest_reuse"
+version = "0.3.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b29d3117bce27ea307d1fb7ce12c64ba11b3fd04311a42d32bc5f0072e6e3d4d"
+dependencies = [
+ "quote",
+ "rustc_version",
+ "syn",
+]
 
 [[package]]
 name = "rustc_version"
-version = "0.2.3"
+version = "0.4.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "bfa0f585226d2e68097d4f95d113b15b83a82e819ab25717ec0590d9584ef366"
 dependencies = [
- "semver 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "semver",
 ]
 
 [[package]]
 name = "ryu"
-version = "1.0.5"
+version = "1.0.12"
 source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "7b4b9743ed687d4b4bcedf9ff5eaa7398495ae14e61cba0a295704edbc7decde"
 
 [[package]]
 name = "same-file"
 version = "1.0.6"
 source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "93fc1dc3aaa9bfed95e02e6eadabb4baf7e3078b0bd1b4d7b6b0b68378900502"
 dependencies = [
- "winapi-util 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)",
+ "winapi-util",
 ]
 
 [[package]]
 name = "scopeguard"
 version = "1.1.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd"
 
 [[package]]
 name = "semver"
-version = "0.9.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-dependencies = [
- "semver-parser 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)",
-]
-
-[[package]]
-name = "semver-parser"
-version = "0.7.0"
+version = "1.0.16"
 source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "58bc9567378fc7690d6b2addae4e60ac2eeea07becb2c64b9f218b53865cba2a"
 
 [[package]]
 name = "serde"
-version = "1.0.114"
+version = "1.0.152"
 source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "bb7d1f0d3021d347a83e556fc4683dea2ea09d87bccdf88ff5c12545d89d5efb"
+dependencies = [
+ "serde_derive",
+]
 
 [[package]]
 name = "serde_derive"
-version = "1.0.114"
+version = "1.0.152"
 source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "af487d118eecd09402d70a5d72551860e788df87b464af30e5ea6a38c75c541e"
 dependencies = [
- "proc-macro2 1.0.19 (registry+https://github.com/rust-lang/crates.io-index)",
- "quote 1.0.7 (registry+https://github.com/rust-lang/crates.io-index)",
- "syn 1.0.36 (registry+https://github.com/rust-lang/crates.io-index)",
+ "proc-macro2",
+ "quote",
+ "syn",
 ]
 
 [[package]]
 name = "serde_json"
-version = "1.0.57"
+version = "1.0.91"
 source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "877c235533714907a8c2464236f5c4b2a17262ef1bd71f38f35ea592c8da6883"
 dependencies = [
- "itoa 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)",
- "ryu 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)",
- "serde 1.0.114 (registry+https://github.com/rust-lang/crates.io-index)",
+ "itoa",
+ "ryu",
+ "serde",
 ]
 
 [[package]]
 name = "strsim"
 version = "0.8.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "8ea5119cdb4c55b55d432abb513a0429384878c15dde60cc77b1c99de1a95a6a"
 
 [[package]]
 name = "structopt"
-version = "0.3.17"
+version = "0.3.26"
 source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "0c6b5c64445ba8094a6ab0c3cd2ad323e07171012d9c98b0b15651daf1787a10"
 dependencies = [
- "clap 2.33.1 (registry+https://github.com/rust-lang/crates.io-index)",
- "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "structopt-derive 0.4.10 (registry+https://github.com/rust-lang/crates.io-index)",
+ "clap 2.34.0",
+ "lazy_static",
+ "structopt-derive",
 ]
 
 [[package]]
 name = "structopt-derive"
-version = "0.4.10"
+version = "0.4.18"
 source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "dcb5ae327f9cc13b68763b5749770cb9e048a99bd9dfdfa58d0cf05d5f64afe0"
 dependencies = [
- "heck 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)",
- "proc-macro-error 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)",
- "proc-macro2 1.0.19 (registry+https://github.com/rust-lang/crates.io-index)",
- "quote 1.0.7 (registry+https://github.com/rust-lang/crates.io-index)",
- "syn 1.0.36 (registry+https://github.com/rust-lang/crates.io-index)",
+ "heck",
+ "proc-macro-error",
+ "proc-macro2",
+ "quote",
+ "syn",
 ]
 
 [[package]]
 name = "syn"
-version = "1.0.36"
+version = "1.0.107"
 source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "1f4064b5b16e03ae50984a5a8ed5d4f8803e6bc1fd170a3cda91a1be4b18e3f5"
 dependencies = [
- "proc-macro2 1.0.19 (registry+https://github.com/rust-lang/crates.io-index)",
- "quote 1.0.7 (registry+https://github.com/rust-lang/crates.io-index)",
- "unicode-xid 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
+ "proc-macro2",
+ "quote",
+ "unicode-ident",
 ]
 
 [[package]]
 name = "textwrap"
 version = "0.11.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d326610f408c7a4eb6f51c37c330e496b08506c9457c9d34287ecc38809fb060"
 dependencies = [
- "unicode-width 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)",
+ "unicode-width",
 ]
 
+[[package]]
+name = "textwrap"
+version = "0.16.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "222a222a5bfe1bba4a77b45ec488a741b3cb8872e5e499451fd7d0129c9c7c3d"
+
 [[package]]
 name = "tinytemplate"
-version = "1.1.0"
+version = "1.2.1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "be4d6b5f19ff7664e8c98d03e2139cb510db9b0a60b55f8e8709b689d939b6bc"
 dependencies = [
- "serde 1.0.114 (registry+https://github.com/rust-lang/crates.io-index)",
- "serde_json 1.0.57 (registry+https://github.com/rust-lang/crates.io-index)",
+ "serde",
+ "serde_json",
 ]
 
 [[package]]
-name = "unicode-segmentation"
-version = "1.6.0"
+name = "unicode-ident"
+version = "1.0.6"
 source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "84a22b9f218b40614adcb3f4ff08b703773ad44fa9423e4e0d346d5db86e4ebc"
 
 [[package]]
-name = "unicode-width"
-version = "0.1.8"
+name = "unicode-segmentation"
+version = "1.10.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "0fdbf052a0783de01e944a6ce7a8cb939e295b1e7be835a1112c3b9a7f047a5a"
 
 [[package]]
-name = "unicode-xid"
-version = "0.2.1"
+name = "unicode-width"
+version = "0.1.10"
 source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "c0edd1e5b14653f783770bce4a4dabb4a5108a5370a5f5d8cfe8710c361f6c8b"
 
 [[package]]
 name = "vec_map"
 version = "0.8.2"
 source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f1bddf1187be692e79c5ffeab891132dfb0f236ed36a43c7ed39f1165ee20191"
 
 [[package]]
 name = "version_check"
-version = "0.9.2"
+version = "0.9.4"
 source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f"
 
 [[package]]
 name = "walkdir"
-version = "2.3.1"
+version = "2.3.2"
 source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "808cf2735cd4b6866113f648b791c6adc5714537bc222d9347bb203386ffda56"
 dependencies = [
- "same-file 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)",
- "winapi 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)",
- "winapi-util 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)",
+ "same-file",
+ "winapi",
+ "winapi-util",
 ]
 
+[[package]]
+name = "wasi"
+version = "0.11.0+wasi-snapshot-preview1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423"
+
 [[package]]
 name = "wasm-bindgen"
-version = "0.2.67"
+version = "0.2.83"
 source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "eaf9f5aceeec8be17c128b2e93e031fb8a4d469bb9c4ae2d7dc1888b26887268"
 dependencies = [
- "cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)",
- "wasm-bindgen-macro 0.2.67 (registry+https://github.com/rust-lang/crates.io-index)",
+ "cfg-if",
+ "wasm-bindgen-macro",
 ]
 
 [[package]]
 name = "wasm-bindgen-backend"
-version = "0.2.67"
+version = "0.2.83"
 source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "4c8ffb332579b0557b52d268b91feab8df3615f265d5270fec2a8c95b17c1142"
 dependencies = [
- "bumpalo 3.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "log 0.4.11 (registry+https://github.com/rust-lang/crates.io-index)",
- "proc-macro2 1.0.19 (registry+https://github.com/rust-lang/crates.io-index)",
- "quote 1.0.7 (registry+https://github.com/rust-lang/crates.io-index)",
- "syn 1.0.36 (registry+https://github.com/rust-lang/crates.io-index)",
- "wasm-bindgen-shared 0.2.67 (registry+https://github.com/rust-lang/crates.io-index)",
+ "bumpalo",
+ "log",
+ "once_cell",
+ "proc-macro2",
+ "quote",
+ "syn",
+ "wasm-bindgen-shared",
 ]
 
 [[package]]
 name = "wasm-bindgen-macro"
-version = "0.2.67"
+version = "0.2.83"
 source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "052be0f94026e6cbc75cdefc9bae13fd6052cdcaf532fa6c45e7ae33a1e6c810"
 dependencies = [
- "quote 1.0.7 (registry+https://github.com/rust-lang/crates.io-index)",
- "wasm-bindgen-macro-support 0.2.67 (registry+https://github.com/rust-lang/crates.io-index)",
+ "quote",
+ "wasm-bindgen-macro-support",
 ]
 
 [[package]]
 name = "wasm-bindgen-macro-support"
-version = "0.2.67"
+version = "0.2.83"
 source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "07bc0c051dc5f23e307b13285f9d75df86bfdf816c5721e573dec1f9b8aa193c"
 dependencies = [
- "proc-macro2 1.0.19 (registry+https://github.com/rust-lang/crates.io-index)",
- "quote 1.0.7 (registry+https://github.com/rust-lang/crates.io-index)",
- "syn 1.0.36 (registry+https://github.com/rust-lang/crates.io-index)",
- "wasm-bindgen-backend 0.2.67 (registry+https://github.com/rust-lang/crates.io-index)",
- "wasm-bindgen-shared 0.2.67 (registry+https://github.com/rust-lang/crates.io-index)",
+ "proc-macro2",
+ "quote",
+ "syn",
+ "wasm-bindgen-backend",
+ "wasm-bindgen-shared",
 ]
 
 [[package]]
 name = "wasm-bindgen-shared"
-version = "0.2.67"
+version = "0.2.83"
 source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "1c38c045535d93ec4f0b4defec448e4291638ee608530863b1e2ba115d4fff7f"
 
 [[package]]
 name = "web-sys"
-version = "0.3.44"
+version = "0.3.60"
 source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "bcda906d8be16e728fd5adc5b729afad4e444e106ab28cd1c7256e54fa61510f"
 dependencies = [
- "js-sys 0.3.44 (registry+https://github.com/rust-lang/crates.io-index)",
- "wasm-bindgen 0.2.67 (registry+https://github.com/rust-lang/crates.io-index)",
+ "js-sys",
+ "wasm-bindgen",
 ]
 
 [[package]]
 name = "winapi"
 version = "0.3.9"
 source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419"
 dependencies = [
- "winapi-i686-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "winapi-x86_64-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "winapi-i686-pc-windows-gnu",
+ "winapi-x86_64-pc-windows-gnu",
 ]
 
 [[package]]
 name = "winapi-i686-pc-windows-gnu"
 version = "0.4.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6"
 
 [[package]]
 name = "winapi-util"
 version = "0.1.5"
 source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "70ec6ce85bb158151cae5e5c87f95a8e97d2c0c4b001223f33a334e3ce5de178"
 dependencies = [
- "winapi 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)",
+ "winapi",
 ]
 
 [[package]]
 name = "winapi-x86_64-pc-windows-gnu"
 version = "0.4.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-
-[metadata]
-"checksum ansi_term 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ee49baf6cb617b853aa8d93bf420db2383fab46d314482ca2803b40d5fde979b"
-"checksum atty 0.2.14 (registry+https://github.com/rust-lang/crates.io-index)" = "d9b39be18770d11421cdb1b9947a45dd3f37e93092cbf377614828a319d5fee8"
-"checksum autocfg 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)" = "1d49d90015b3c36167a20fe2810c5cd875ad504b39cff3d4eae7977e6b7c1cb2"
-"checksum autocfg 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "f8aac770f1885fd7e387acedd76065302551364496e46b3dd00860b2f8359b9d"
-"checksum bitflags 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "cf1de2fe8c75bc145a2f577add951f8134889b4795d47466a54a5c846d691693"
-"checksum bstr 0.2.13 (registry+https://github.com/rust-lang/crates.io-index)" = "31accafdb70df7871592c058eca3985b71104e15ac32f64706022c58867da931"
-"checksum bumpalo 3.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "2e8c087f005730276d1096a652e92a8bacee2e2472bcc9715a74d2bec38b5820"
-"checksum byteorder 1.3.4 (registry+https://github.com/rust-lang/crates.io-index)" = "08c48aae112d48ed9f069b33538ea9e3e90aa263cfa3d1c24309612b1f7472de"
-"checksum cast 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)" = "4b9434b9a5aa1450faa3f9cb14ea0e8c53bb5d2b3c1bfd1ab4fc03e9f33fbfb0"
-"checksum cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)" = "4785bdd1c96b2a846b2bd7cc02e86b6b3dbf14e7e53446c4f54c92a361040822"
-"checksum clap 2.33.1 (registry+https://github.com/rust-lang/crates.io-index)" = "bdfa80d47f954d53a35a64987ca1422f495b8d6483c0fe9f7117b36c2a792129"
-"checksum cloudabi 0.0.3 (registry+https://github.com/rust-lang/crates.io-index)" = "ddfc5b9aa5d4507acaf872de71051dfd0e309860e88966e1051e462a077aac4f"
-"checksum criterion 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)" = "63f696897c88b57f4ffe3c69d8e1a0613c7d0e6c4833363c8560fbde9c47b966"
-"checksum criterion-plot 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)" = "e022feadec601fba1649cfa83586381a4ad31c6bf3a9ab7d408118b05dd9889d"
-"checksum crossbeam-deque 0.7.3 (registry+https://github.com/rust-lang/crates.io-index)" = "9f02af974daeee82218205558e51ec8768b48cf524bd01d550abe5573a608285"
-"checksum crossbeam-epoch 0.8.2 (registry+https://github.com/rust-lang/crates.io-index)" = "058ed274caafc1f60c4997b5fc07bf7dc7cca454af7c6e81edffe5f33f70dace"
-"checksum crossbeam-queue 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)" = "774ba60a54c213d409d5353bda12d49cd68d14e45036a285234c8d6f91f92570"
-"checksum crossbeam-utils 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)" = "c3c7c73a2d1e9fc0886a08b93e98eb643461230d5f1925e4036204d5f2e261a8"
-"checksum csv 1.1.3 (registry+https://github.com/rust-lang/crates.io-index)" = "00affe7f6ab566df61b4be3ce8cf16bc2576bca0963ceb0955e45d514bf9a279"
-"checksum csv-core 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)" = "2b2466559f260f48ad25fe6317b3c8dac77b5bdb5763ac7d9d6103530663bc90"
-"checksum either 1.5.3 (registry+https://github.com/rust-lang/crates.io-index)" = "bb1f6b1ce1c140482ea30ddd3335fc0024ac7ee112895426e0a629a6c20adfe3"
-"checksum fuchsia-cprng 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "a06f77d526c1a601b7c4cdd98f54b5eaabffc14d5f2f0296febdc7f357c6d3ba"
-"checksum heck 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "20564e78d53d2bb135c343b3f47714a56af2061f1c928fdb541dc7b9fdd94205"
-"checksum hermit-abi 0.1.15 (registry+https://github.com/rust-lang/crates.io-index)" = "3deed196b6e7f9e44a2ae8d94225d80302d81208b1bb673fd21fe634645c85a9"
-"checksum itertools 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)" = "284f18f85651fe11e8a991b2adb42cb078325c996ed026d994719efcfca1d54b"
-"checksum itoa 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)" = "dc6f3ad7b9d11a0c00842ff8de1b60ee58661048eb8049ed33c73594f359d7e6"
-"checksum js-sys 0.3.44 (registry+https://github.com/rust-lang/crates.io-index)" = "85a7e2c92a4804dd459b86c339278d0fe87cf93757fae222c3fa3ae75458bc73"
-"checksum lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646"
-"checksum libc 0.2.74 (registry+https://github.com/rust-lang/crates.io-index)" = "a2f02823cf78b754822df5f7f268fb59822e7296276d3e069d8e8cb26a14bd10"
-"checksum log 0.4.11 (registry+https://github.com/rust-lang/crates.io-index)" = "4fabed175da42fed1fa0746b0ea71f412aa9d35e76e95e59b192c64b9dc2bf8b"
-"checksum maybe-uninit 2.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "60302e4db3a61da70c0cb7991976248362f30319e88850c487b9b95bbf059e00"
-"checksum memchr 2.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "3728d817d99e5ac407411fa471ff9800a778d88a24685968b36824eaf4bee400"
-"checksum memoffset 0.5.5 (registry+https://github.com/rust-lang/crates.io-index)" = "c198b026e1bbf08a937e94c6c60f9ec4a2267f5b0d2eec9c1b21b061ce2be55f"
-"checksum num-traits 0.2.12 (registry+https://github.com/rust-lang/crates.io-index)" = "ac267bcc07f48ee5f8935ab0d24f316fb722d7a1292e2913f0cc196b29ffd611"
-"checksum num_cpus 1.13.0 (registry+https://github.com/rust-lang/crates.io-index)" = "05499f3756671c15885fee9034446956fff3f243d6077b91e5767df161f766b3"
-"checksum oorandom 11.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "a170cebd8021a008ea92e4db85a72f80b35df514ec664b296fdcbb654eac0b2c"
-"checksum plotters 0.2.15 (registry+https://github.com/rust-lang/crates.io-index)" = "0d1685fbe7beba33de0330629da9d955ac75bd54f33d7b79f9a895590124f6bb"
-"checksum proc-macro-error 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)" = "da25490ff9892aab3fcf7c36f08cfb902dd3e71ca0f9f9517bea02a73a5ce38c"
-"checksum proc-macro-error-attr 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)" = "a1be40180e52ecc98ad80b184934baf3d0d29f979574e439af5a55274b35f869"
-"checksum proc-macro2 1.0.19 (registry+https://github.com/rust-lang/crates.io-index)" = "04f5f085b5d71e2188cb8271e5da0161ad52c3f227a661a3c135fdf28e258b12"
-"checksum quote 1.0.7 (registry+https://github.com/rust-lang/crates.io-index)" = "aa563d17ecb180e500da1cfd2b028310ac758de548efdd203e18f283af693f37"
-"checksum rand 0.6.5 (registry+https://github.com/rust-lang/crates.io-index)" = "6d71dacdc3c88c1fde3885a3be3fbab9f35724e6ce99467f7d9c5026132184ca"
-"checksum rand_chacha 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "556d3a1ca6600bfcbab7c7c91ccb085ac7fbbcd70e008a98742e7847f4f7bcef"
-"checksum rand_core 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "7a6fdeb83b075e8266dcc8762c22776f6877a63111121f5f8c7411e5be7eed4b"
-"checksum rand_core 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)" = "9c33a3c44ca05fa6f1807d8e6743f3824e8509beca625669633be0acbdf509dc"
-"checksum rand_hc 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "7b40677c7be09ae76218dc623efbf7b18e34bced3f38883af07bb75630a21bc4"
-"checksum rand_isaac 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "ded997c9d5f13925be2a6fd7e66bf1872597f759fd9dd93513dd7e92e5a5ee08"
-"checksum rand_jitter 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)" = "1166d5c91dc97b88d1decc3285bb0a99ed84b05cfd0bc2341bdf2d43fc41e39b"
-"checksum rand_os 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)" = "7b75f676a1e053fc562eafbb47838d67c84801e38fc1ba459e8f180deabd5071"
-"checksum rand_pcg 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "abf9b09b01790cfe0364f52bf32995ea3c39f4d2dd011eac241d2914146d0b44"
-"checksum rand_xorshift 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "cbf7e9e623549b0e21f6e97cf8ecf247c1a8fd2e8a992ae265314300b2455d5c"
-"checksum rayon 1.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "62f02856753d04e03e26929f820d0a0a337ebe71f849801eea335d464b349080"
-"checksum rayon-core 1.7.1 (registry+https://github.com/rust-lang/crates.io-index)" = "e92e15d89083484e11353891f1af602cc661426deb9564c298b270c726973280"
-"checksum rdrand 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "678054eb77286b51581ba43620cc911abf02758c91f93f479767aed0f90458b2"
-"checksum regex 1.3.9 (registry+https://github.com/rust-lang/crates.io-index)" = "9c3780fcf44b193bc4d09f36d2a3c87b251da4a046c87795a0d35f4f927ad8e6"
-"checksum regex-automata 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)" = "ae1ded71d66a4a97f5e961fd0cb25a5f366a42a41570d16a763a69c092c26ae4"
-"checksum regex-syntax 0.6.18 (registry+https://github.com/rust-lang/crates.io-index)" = "26412eb97c6b088a6997e05f69403a802a92d520de2f8e63c2b65f9e0f47c4e8"
-"checksum rustc_version 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)" = "138e3e0acb6c9fb258b19b67cb8abd63c00679d2851805ea151465464fe9030a"
-"checksum ryu 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)" = "71d301d4193d031abdd79ff7e3dd721168a9572ef3fe51a1517aba235bd8f86e"
-"checksum same-file 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)" = "93fc1dc3aaa9bfed95e02e6eadabb4baf7e3078b0bd1b4d7b6b0b68378900502"
-"checksum scopeguard 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd"
-"checksum semver 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)" = "1d7eb9ef2c18661902cc47e535f9bc51b78acd254da71d375c2f6720d9a40403"
-"checksum semver-parser 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "388a1df253eca08550bef6c72392cfe7c30914bf41df5269b68cbd6ff8f570a3"
-"checksum serde 1.0.114 (registry+https://github.com/rust-lang/crates.io-index)" = "5317f7588f0a5078ee60ef675ef96735a1442132dc645eb1d12c018620ed8cd3"
-"checksum serde_derive 1.0.114 (registry+https://github.com/rust-lang/crates.io-index)" = "2a0be94b04690fbaed37cddffc5c134bf537c8e3329d53e982fe04c374978f8e"
-"checksum serde_json 1.0.57 (registry+https://github.com/rust-lang/crates.io-index)" = "164eacbdb13512ec2745fb09d51fd5b22b0d65ed294a1dcf7285a360c80a675c"
-"checksum strsim 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)" = "8ea5119cdb4c55b55d432abb513a0429384878c15dde60cc77b1c99de1a95a6a"
-"checksum structopt 0.3.17 (registry+https://github.com/rust-lang/crates.io-index)" = "6cc388d94ffabf39b5ed5fadddc40147cb21e605f53db6f8f36a625d27489ac5"
-"checksum structopt-derive 0.4.10 (registry+https://github.com/rust-lang/crates.io-index)" = "5e2513111825077552a6751dfad9e11ce0fba07d7276a3943a037d7e93e64c5f"
-"checksum syn 1.0.36 (registry+https://github.com/rust-lang/crates.io-index)" = "4cdb98bcb1f9d81d07b536179c269ea15999b5d14ea958196413869445bb5250"
-"checksum textwrap 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)" = "d326610f408c7a4eb6f51c37c330e496b08506c9457c9d34287ecc38809fb060"
-"checksum tinytemplate 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "6d3dc76004a03cec1c5932bca4cdc2e39aaa798e3f82363dd94f9adf6098c12f"
-"checksum unicode-segmentation 1.6.0 (registry+https://github.com/rust-lang/crates.io-index)" = "e83e153d1053cbb5a118eeff7fd5be06ed99153f00dbcd8ae310c5fb2b22edc0"
-"checksum unicode-width 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)" = "9337591893a19b88d8d87f2cec1e73fad5cdfd10e5a6f349f498ad6ea2ffb1e3"
-"checksum unicode-xid 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "f7fe0bb3479651439c9112f72b6c505038574c9fbb575ed1bf3b797fa39dd564"
-"checksum vec_map 0.8.2 (registry+https://github.com/rust-lang/crates.io-index)" = "f1bddf1187be692e79c5ffeab891132dfb0f236ed36a43c7ed39f1165ee20191"
-"checksum version_check 0.9.2 (registry+https://github.com/rust-lang/crates.io-index)" = "b5a972e5669d67ba988ce3dc826706fb0a8b01471c088cb0b6110b805cc36aed"
-"checksum walkdir 2.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "777182bc735b6424e1a57516d35ed72cb8019d85c8c9bf536dccb3445c1a2f7d"
-"checksum wasm-bindgen 0.2.67 (registry+https://github.com/rust-lang/crates.io-index)" = "f0563a9a4b071746dd5aedbc3a28c6fe9be4586fb3fbadb67c400d4f53c6b16c"
-"checksum wasm-bindgen-backend 0.2.67 (registry+https://github.com/rust-lang/crates.io-index)" = "bc71e4c5efa60fb9e74160e89b93353bc24059999c0ae0fb03affc39770310b0"
-"checksum wasm-bindgen-macro 0.2.67 (registry+https://github.com/rust-lang/crates.io-index)" = "97c57cefa5fa80e2ba15641578b44d36e7a64279bc5ed43c6dbaf329457a2ed2"
-"checksum wasm-bindgen-macro-support 0.2.67 (registry+https://github.com/rust-lang/crates.io-index)" = "841a6d1c35c6f596ccea1f82504a192a60378f64b3bb0261904ad8f2f5657556"
-"checksum wasm-bindgen-shared 0.2.67 (registry+https://github.com/rust-lang/crates.io-index)" = "93b162580e34310e5931c4b792560108b10fd14d64915d7fff8ff00180e70092"
-"checksum web-sys 0.3.44 (registry+https://github.com/rust-lang/crates.io-index)" = "dda38f4e5ca63eda02c059d243aa25b5f35ab98451e518c51612cd0f1bd19a47"
-"checksum winapi 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)" = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419"
-"checksum winapi-i686-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6"
-"checksum winapi-util 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)" = "70ec6ce85bb158151cae5e5c87f95a8e97d2c0c4b001223f33a334e3ce5de178"
-"checksum winapi-x86_64-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f"
+checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f"

+ 34 - 13
zeroidc/vendor/base64/Cargo.toml

@@ -3,39 +3,60 @@
 # When uploading crates to the registry Cargo will automatically
 # "normalize" Cargo.toml files for maximal compatibility
 # with all versions of Cargo and also rewrite `path` dependencies
-# to registry (e.g., crates.io) dependencies
+# to registry (e.g., crates.io) dependencies.
 #
-# If you believe there's an error in this file please file an
-# issue against the rust-lang/cargo repository. If you're
-# editing this file be aware that the upstream Cargo.toml
-# will likely look very different (and much more reasonable)
+# If you are reading this file be aware that the original Cargo.toml
+# will likely look very different (and much more reasonable).
+# See Cargo.toml.orig for the original contents.
 
 [package]
-edition = "2018"
+edition = "2021"
+rust-version = "1.57.0"
 name = "base64"
-version = "0.13.0"
-authors = ["Alice Maz <[email protected]>", "Marshall Pierce <[email protected]>"]
+version = "0.21.0"
+authors = [
+    "Alice Maz <[email protected]>",
+    "Marshall Pierce <[email protected]>",
+]
 description = "encodes and decodes base64 as bytes or utf8"
 documentation = "https://docs.rs/base64"
 readme = "README.md"
-keywords = ["base64", "utf8", "encode", "decode", "no_std"]
+keywords = [
+    "base64",
+    "utf8",
+    "encode",
+    "decode",
+    "no_std",
+]
 categories = ["encoding"]
-license = "MIT/Apache-2.0"
+license = "MIT OR Apache-2.0"
 repository = "https://github.com/marshallpierce/rust-base64"
+
 [profile.bench]
 debug = true
 
+[profile.test]
+opt-level = 3
+
 [[bench]]
 name = "benchmarks"
 harness = false
+
 [dev-dependencies.criterion]
-version = "=0.3.2"
+version = "0.4.0"
 
 [dev-dependencies.rand]
-version = "0.6.1"
+version = "0.8.5"
+features = ["small_rng"]
+
+[dev-dependencies.rstest]
+version = "0.12.0"
+
+[dev-dependencies.rstest_reuse]
+version = "0.3.0"
 
 [dev-dependencies.structopt]
-version = "0.3"
+version = "0.3.26"
 
 [features]
 alloc = []

+ 83 - 43
zeroidc/vendor/base64/README.md

@@ -1,7 +1,6 @@
-[base64](https://crates.io/crates/base64)
-===
+# [base64](https://crates.io/crates/base64)
 
-[![](https://img.shields.io/crates/v/base64.svg)](https://crates.io/crates/base64) [![Docs](https://docs.rs/base64/badge.svg)](https://docs.rs/base64) [![Build](https://travis-ci.org/marshallpierce/rust-base64.svg?branch=master)](https://travis-ci.org/marshallpierce/rust-base64) [![codecov](https://codecov.io/gh/marshallpierce/rust-base64/branch/master/graph/badge.svg)](https://codecov.io/gh/marshallpierce/rust-base64) [![unsafe forbidden](https://img.shields.io/badge/unsafe-forbidden-success.svg)](https://github.com/rust-secure-code/safety-dance/)
+[![](https://img.shields.io/crates/v/base64.svg)](https://crates.io/crates/base64) [![Docs](https://docs.rs/base64/badge.svg)](https://docs.rs/base64) [![CircleCI](https://circleci.com/gh/marshallpierce/rust-base64/tree/master.svg?style=shield)](https://circleci.com/gh/marshallpierce/rust-base64/tree/master) [![codecov](https://codecov.io/gh/marshallpierce/rust-base64/branch/master/graph/badge.svg)](https://codecov.io/gh/marshallpierce/rust-base64) [![unsafe forbidden](https://img.shields.io/badge/unsafe-forbidden-success.svg)](https://github.com/rust-secure-code/safety-dance/)
 
 <a href="https://www.jetbrains.com/?from=rust-base64"><img src="/icon_CLion.svg" height="40px"/></a>
 
@@ -9,58 +8,98 @@ Made with CLion. Thanks to JetBrains for supporting open source!
 
 It's base64. What more could anyone want?
 
-This library's goals are to be *correct* and *fast*. It's thoroughly tested and widely used. It exposes functionality at multiple levels of abstraction so you can choose the level of convenience vs performance that you want, e.g. `decode_config_slice` decodes into an existing `&mut [u8]` and is pretty fast (2.6GiB/s for a 3 KiB input), whereas `decode_config` allocates a new `Vec<u8>` and returns it, which might be more convenient in some cases, but is slower (although still fast enough for almost any purpose) at 2.1 GiB/s.
+This library's goals are to be *correct* and *fast*. It's thoroughly tested and widely used. It exposes functionality at
+multiple levels of abstraction so you can choose the level of convenience vs performance that you want,
+e.g. `decode_engine_slice` decodes into an existing `&mut [u8]` and is pretty fast (2.6GiB/s for a 3 KiB input),
+whereas `decode_engine` allocates a new `Vec<u8>` and returns it, which might be more convenient in some cases, but is
+slower (although still fast enough for almost any purpose) at 2.1 GiB/s.
 
-Example
----
+See the [docs](https://docs.rs/base64) for all the details.
 
-```rust
-extern crate base64;
+## FAQ
 
-use base64::{encode, decode};
+### I need to decode base64 with whitespace/null bytes/other random things interspersed in it. What should I do?
 
-fn main() {
-    let a = b"hello world";
-    let b = "aGVsbG8gd29ybGQ=";
+Remove non-base64 characters from your input before decoding.
 
-    assert_eq!(encode(a), b);
-    assert_eq!(a, &decode(b).unwrap()[..]);
-}
-```
+If you have a `Vec` of base64, [retain](https://doc.rust-lang.org/std/vec/struct.Vec.html#method.retain) can be used to
+strip out whatever you need removed.
 
-See the [docs](https://docs.rs/base64) for all the details.
+If you have a `Read` (e.g. reading a file or network socket), there are various approaches.
 
-Rust version compatibility
----
+- Use [iter_read](https://crates.io/crates/iter-read) together with `Read`'s `bytes()` to filter out unwanted bytes.
+- Implement `Read` with a `read()` impl that delegates to your actual `Read`, and then drops any bytes you don't want.
 
-The minimum required Rust version is 1.34.0.
+### I need to line-wrap base64, e.g. for MIME/PEM.
 
-Developing
----
+[line-wrap](https://crates.io/crates/line-wrap) does just that.
 
-Benchmarks are in `benches/`. Running them requires nightly rust, but `rustup` makes it easy:
+### I want canonical base64 encoding/decoding.
 
-```bash
-rustup run nightly cargo bench
-```
+First, don't do this. You should no more expect Base64 to be canonical than you should expect compression algorithms to
+produce canonical output across all usage in the wild (hint: they don't).
+However, [people are drawn to their own destruction like moths to a flame](https://eprint.iacr.org/2022/361), so here we
+are.
+
+There are two opportunities for non-canonical encoding (and thus, detection of the same during decoding): the final bits
+of the last encoded token in two or three token suffixes, and the `=` token used to inflate the suffix to a full four
+tokens.
+
+The trailing bits issue is unavoidable: with 6 bits available in each encoded token, 1 input byte takes 2 tokens,
+with the second one having some bits unused. Same for two input bytes: 16 bits, but 3 tokens have 18 bits. Unless we
+decide to stop shipping whole bytes around, we're stuck with those extra bits that a sneaky or buggy encoder might set
+to 1 instead of 0.
+
+The `=` pad bytes, on the other hand, are entirely a self-own by the Base64 standard. They do not affect decoding other
+than to provide an opportunity to say "that padding is incorrect". Exabytes of storage and transfer have no doubt been
+wasted on pointless `=` bytes. Somehow we all seem to be quite comfortable with, say, hex-encoded data just stopping
+when it's done rather than requiring a confirmation that the author of the encoder could count to four. Anyway, there
+are two ways to make pad bytes predictable: require canonical padding to the next multiple of four bytes as per the RFC,
+or, if you control all producers and consumers, save a few bytes by requiring no padding (especially applicable to the
+url-safe alphabet).
+
+All `Engine` implementations must at a minimum support treating non-canonical padding of both types as an error, and
+optionally may allow other behaviors.
 
-Decoding is aided by some pre-calculated tables, which are generated by:
+## Rust version compatibility
+
+The minimum supported Rust version is 1.57.0.
+
+# Contributing
+
+Contributions are very welcome. However, because this library is used widely, and in security-sensitive contexts, all
+PRs will be carefully scrutinized. Beyond that, this sort of low level library simply needs to be 100% correct. Nobody
+wants to chase bugs in encoding of any sort.
+
+All this means that it takes me a fair amount of time to review each PR, so it might take quite a while to carve out the
+free time to give each PR the attention it deserves. I will get to everyone eventually!
+
+## Developing
+
+Benchmarks are in `benches/`. Running them requires nightly rust, but `rustup` makes it easy:
 
 ```bash
-cargo run --example make_tables > src/tables.rs.tmp && mv src/tables.rs.tmp src/tables.rs
+rustup run nightly cargo bench
 ```
 
-no_std
----
+## no_std
 
-This crate supports no_std. By default the crate targets std via the `std` feature. You can deactivate the `default-features` to target core instead. In that case you lose out on all the functionality revolving around `std::io`, `std::error::Error` and heap allocations. There is an additional `alloc` feature that you can activate to bring back the support for heap allocations.
+This crate supports no_std. By default the crate targets std via the `std` feature. You can deactivate
+the `default-features` to target `core` instead. In that case you lose out on all the functionality revolving
+around `std::io`, `std::error::Error`, and heap allocations. There is an additional `alloc` feature that you can activate
+to bring back the support for heap allocations.
 
-Profiling
----
+## Profiling
 
-On Linux, you can use [perf](https://perf.wiki.kernel.org/index.php/Main_Page) for profiling. Then compile the benchmarks with `rustup nightly run cargo bench --no-run`.
+On Linux, you can use [perf](https://perf.wiki.kernel.org/index.php/Main_Page) for profiling. Then compile the
+benchmarks with `rustup nightly run cargo bench --no-run`.
 
-Run the benchmark binary with `perf` (shown here filtering to one particular benchmark, which will make the results easier to read). `perf` is only available to the root user on most systems as it fiddles with event counters in your CPU, so use `sudo`. We need to run the actual benchmark binary, hence the path into `target`. You can see the actual full path with `rustup run nightly cargo bench -v`; it will print out the commands it runs. If you use the exact path that `bench` outputs, make sure you get the one that's for the benchmarks, not the tests. You may also want to `cargo clean` so you have only one `benchmarks-` binary (they tend to accumulate).
+Run the benchmark binary with `perf` (shown here filtering to one particular benchmark, which will make the results
+easier to read). `perf` is only available to the root user on most systems as it fiddles with event counters in your
+CPU, so use `sudo`. We need to run the actual benchmark binary, hence the path into `target`. You can see the actual
+full path with `rustup run nightly cargo bench -v`; it will print out the commands it runs. If you use the exact path
+that `bench` outputs, make sure you get the one that's for the benchmarks, not the tests. You may also want
+to `cargo clean` so you have only one `benchmarks-` binary (they tend to accumulate).
 
 ```bash
 sudo perf record target/release/deps/benchmarks-* --bench decode_10mib_reuse
@@ -72,7 +111,10 @@ Then analyze the results, again with perf:
 sudo perf annotate -l
 ```
 
-You'll see a bunch of interleaved rust source and assembly like this. The section with `lib.rs:327` is telling us that 4.02% of samples saw the `movzbl` aka bit shift as the active instruction. However, this percentage is not as exact as it seems due to a phenomenon called *skid*. Basically, a consequence of how fancy modern CPUs are is that this sort of instruction profiling is inherently inaccurate, especially in branch-heavy code.
+You'll see a bunch of interleaved rust source and assembly like this. The section with `lib.rs:327` is telling us that
+4.02% of samples saw the `movzbl` aka bit shift as the active instruction. However, this percentage is not as exact as
+it seems due to a phenomenon called *skid*. Basically, a consequence of how fancy modern CPUs are is that this sort of
+instruction profiling is inherently inaccurate, especially in branch-heavy code.
 
 ```text
  lib.rs:322    0.70 :     10698:       mov    %rdi,%rax
@@ -94,11 +136,10 @@ You'll see a bunch of interleaved rust source and assembly like this. The sectio
     0.00 :        106ab:       je     1090e <base64::decode_config_buf::hbf68a45fefa299c1+0x46e>
 ```
 
+## Fuzzing
 
-Fuzzing
----
-
-This uses [cargo-fuzz](https://github.com/rust-fuzz/cargo-fuzz). See `fuzz/fuzzers` for the available fuzzing scripts. To run, use an invocation like these:
+This uses [cargo-fuzz](https://github.com/rust-fuzz/cargo-fuzz). See `fuzz/fuzzers` for the available fuzzing scripts.
+To run, use an invocation like these:
 
 ```bash
 cargo +nightly fuzz run roundtrip
@@ -107,8 +148,7 @@ cargo +nightly fuzz run roundtrip_random_config -- -max_len=10240
 cargo +nightly fuzz run decode_random
 ```
 
-
-License
----
+## License
 
 This project is dual-licensed under MIT and Apache 2.0.
+

+ 127 - 7
zeroidc/vendor/base64/RELEASE-NOTES.md

@@ -1,10 +1,122 @@
+# 0.21.0
+
+(not yet released)
+
+
+## Migration
+
+### Functions
+
+| < 0.20 function         | 0.21 equivalent                                                                     |
+|-------------------------|-------------------------------------------------------------------------------------|
+| `encode()`              | `engine::general_purpose::STANDARD.encode()` or `prelude::BASE64_STANDARD.encode()` |
+| `encode_config()`       | `engine.encode()`                                                                   |
+| `encode_config_buf()`   | `engine.encode_string()`                                                            |
+| `encode_config_slice()` | `engine.encode_slice()`                                                             |
+| `decode()`              | `engine::general_purpose::STANDARD.decode()` or `prelude::BASE64_STANDARD.decode()` |
+| `decode_config()`       | `engine.decode()`                                                                   |
+| `decode_config_buf()`   | `engine.decode_vec()`                                                               |
+| `decode_config_slice()` | `engine.decode_slice()`                                                             |
+
+The short-lived 0.20 functions were the 0.13 functions with `config` replaced with `engine`.
+
+### Padding
+
+If applicable, use the preset engines `engine::STANDARD`, `engine::STANDARD_NO_PAD`, `engine::URL_SAFE`,
+or `engine::URL_SAFE_NO_PAD`.
+The `NO_PAD` ones require that padding is absent when decoding, and the others require that
+canonical padding is present .
+
+If you need the < 0.20 behavior that did not care about padding, or want to recreate < 0.20.0's predefined `Config`s
+precisely, see the following table.
+
+| 0.13.1 Config   | 0.20.0+ alphabet | `encode_padding` | `decode_padding_mode` |
+|-----------------|------------------|------------------|-----------------------|
+| STANDARD        | STANDARD         | true             | Indifferent           |
+| STANDARD_NO_PAD | STANDARD         | false            | Indifferent           |
+| URL_SAFE        | URL_SAFE         | true             | Indifferent           |
+| URL_SAFE_NO_PAD | URL_SAFE         | false            | Indifferent           |
+
+# 0.21.0-rc.1
+
+- Restore the ability to decode into a slice of precisely the correct length with `Engine.decode_slice_unchecked`.
+- Add `Engine` as a `pub use` in `prelude`.
+
+# 0.21.0-beta.2
+
+## Breaking changes
+
+- Re-exports of preconfigured engines in `engine` are removed in favor of `base64::prelude::...` that are better suited to those who wish to `use` the entire path to a name.
+
+# 0.21.0-beta.1
+
+## Breaking changes
+
+- `FastPortable` was only meant to be an interim name, and shouldn't have shipped in 0.20. It is now `GeneralPurpose` to
+  make its intended usage more clear.
+- `GeneralPurpose` and its config are now `pub use`'d in the `engine` module for convenience.
+- Change a few `from()` functions to be `new()`. `from()` causes confusing compiler errors because of confusion
+  with `From::from`, and is a little misleading because some of those invocations are not very cheap as one would
+  usually expect from a `from` call.
+- `encode*` and `decode*` top level functions are now methods on `Engine`.
+- `DEFAULT_ENGINE` was replaced by `engine::general_purpose::STANDARD`
+- Predefined engine consts `engine::general_purpose::{STANDARD, STANDARD_NO_PAD, URL_SAFE, URL_SAFE_NO_PAD}`
+    - These are `pub use`d into `engine` as well
+- The `*_slice` decode/encode functions now return an error instead of panicking when the output slice is too small
+    - As part of this, there isn't now a public way to decode into a slice _exactly_ the size needed for inputs that
+      aren't multiples of 4 tokens. If adding up to 2 bytes to always be a multiple of 3 bytes for the decode buffer is
+      a problem, file an issue.
+
+## Other changes
+
+- `decoded_len_estimate()` is provided to make it easy to size decode buffers correctly.
+
+# 0.20.0
+
+## Breaking changes
+
+- Update MSRV to 1.57.0
+- Decoding can now either ignore padding, require correct padding, or require no padding. The default is to require
+  correct padding.
+    - The `NO_PAD` config now requires that padding be absent when decoding.
+
+## 0.20.0-alpha.1
+
+### Breaking changes
+
+- Extended the `Config` concept into the `Engine` abstraction, allowing the user to pick different encoding / decoding
+  implementations.
+    - What was formerly the only algorithm is now the `FastPortable` engine, so named because it's portable (works on
+      any CPU) and relatively fast.
+    - This opens the door to a portable constant-time
+      implementation ([#153](https://github.com/marshallpierce/rust-base64/pull/153),
+      presumably `ConstantTimePortable`?) for security-sensitive applications that need side-channel resistance, and
+      CPU-specific SIMD implementations for more speed.
+    - Standard base64 per the RFC is available via `DEFAULT_ENGINE`. To use different alphabets or other settings (
+      padding, etc), create your own engine instance.
+- `CharacterSet` is now `Alphabet` (per the RFC), and allows creating custom alphabets. The corresponding tables that
+  were previously code-generated are now built dynamically.
+- Since there are already multiple breaking changes, various functions are renamed to be more consistent and
+  discoverable.
+- MSRV is now 1.47.0 to allow various things to use `const fn`.
+- `DecoderReader` now owns its inner reader, and can expose it via `into_inner()`. For symmetry, `EncoderWriter` can do
+  the same with its writer.
+- `encoded_len` is now public so you can size encode buffers precisely.
+
+# 0.13.1
+
+- More precise decode buffer sizing, avoiding unnecessary allocation in `decode_config`.
+
 # 0.13.0
 
 - Config methods are const
 - Added `EncoderStringWriter` to allow encoding directly to a String
 - `EncoderWriter` now owns its delegate writer rather than keeping a reference to it (though refs still work)
-    - As a consequence, it is now possible to extract the delegate writer from an `EncoderWriter` via `finish()`, which returns `Result<W>` instead of `Result<()>`. If you were calling `finish()` explicitly, you will now need to use `let _ = foo.finish()` instead of just `foo.finish()` to avoid a warning about the unused value.
-- When decoding input that has both an invalid length and an invalid symbol as the last byte, `InvalidByte` will be emitted instead of `InvalidLength` to make the problem more obvious.
+    - As a consequence, it is now possible to extract the delegate writer from an `EncoderWriter` via `finish()`, which
+      returns `Result<W>` instead of `Result<()>`. If you were calling `finish()` explicitly, you will now need to
+      use `let _ = foo.finish()` instead of just `foo.finish()` to avoid a warning about the unused value.
+- When decoding input that has both an invalid length and an invalid symbol as the last byte, `InvalidByte` will be
+  emitted instead of `InvalidLength` to make the problem more obvious.
 
 # 0.12.2
 
@@ -22,23 +134,31 @@
 - A minor performance improvement in encoding
 
 # 0.11.0
+
 - Minimum rust version 1.34.0
 - `no_std` is now supported via the two new features `alloc` and `std`.
 
 # 0.10.1
 
 - Minimum rust version 1.27.2
-- Fix bug in streaming encoding ([#90](https://github.com/marshallpierce/rust-base64/pull/90)): if the underlying writer didn't write all the bytes given to it, the remaining bytes would not be retried later. See the docs on `EncoderWriter::write`.
+- Fix bug in streaming encoding ([#90](https://github.com/marshallpierce/rust-base64/pull/90)): if the underlying writer
+  didn't write all the bytes given to it, the remaining bytes would not be retried later. See the docs
+  on `EncoderWriter::write`.
 - Make it configurable whether or not to return an error when decoding detects excess trailing bits.
 
 # 0.10.0
 
-- Remove line wrapping. Line wrapping was never a great conceptual fit in this library, and other features (streaming encoding, etc) either couldn't support it or could support only special cases of it with a great increase in complexity. Line wrapping has been pulled out into a [line-wrap](https://crates.io/crates/line-wrap) crate, so it's still available if you need it.
-  - `Base64Display` creation no longer uses a `Result` because it can't fail, which means its helper methods for common
-  configs that `unwrap()` for you are no longer needed
+- Remove line wrapping. Line wrapping was never a great conceptual fit in this library, and other features (streaming
+  encoding, etc) either couldn't support it or could support only special cases of it with a great increase in
+  complexity. Line wrapping has been pulled out into a [line-wrap](https://crates.io/crates/line-wrap) crate, so it's
+  still available if you need it.
+    - `Base64Display` creation no longer uses a `Result` because it can't fail, which means its helper methods for
+      common
+      configs that `unwrap()` for you are no longer needed
 - Add a streaming encoder `Write` impl to transparently base64 as you write.
 - Remove the remaining `unsafe` code.
-- Remove whitespace stripping to simplify `no_std` support. No out of the box configs use it, and it's trivial to do yourself if needed: `filter(|b| !b" \n\t\r\x0b\x0c".contains(b)`.
+- Remove whitespace stripping to simplify `no_std` support. No out of the box configs use it, and it's trivial to do
+  yourself if needed: `filter(|b| !b" \n\t\r\x0b\x0c".contains(b)`.
 - Detect invalid trailing symbols when decoding and return an error rather than silently ignoring them.
 
 # 0.9.3

+ 93 - 61
zeroidc/vendor/base64/benches/benchmarks.rs

@@ -1,27 +1,22 @@
-extern crate base64;
 #[macro_use]
 extern crate criterion;
-extern crate rand;
 
-use base64::display;
 use base64::{
-    decode, decode_config_buf, decode_config_slice, encode, encode_config_buf, encode_config_slice,
-    write, Config,
+    display,
+    engine::{general_purpose::STANDARD, Engine},
+    write,
 };
-
-use criterion::{black_box, Bencher, Criterion, ParameterizedBenchmark, Throughput};
-use rand::{FromEntropy, Rng};
+use criterion::{black_box, Bencher, BenchmarkId, Criterion, Throughput};
+use rand::{Rng, SeedableRng};
 use std::io::{self, Read, Write};
 
-const TEST_CONFIG: Config = base64::STANDARD;
-
 fn do_decode_bench(b: &mut Bencher, &size: &usize) {
     let mut v: Vec<u8> = Vec::with_capacity(size * 3 / 4);
     fill(&mut v);
-    let encoded = encode(&v);
+    let encoded = STANDARD.encode(&v);
 
     b.iter(|| {
-        let orig = decode(&encoded);
+        let orig = STANDARD.decode(&encoded);
         black_box(&orig);
     });
 }
@@ -29,11 +24,11 @@ fn do_decode_bench(b: &mut Bencher, &size: &usize) {
 fn do_decode_bench_reuse_buf(b: &mut Bencher, &size: &usize) {
     let mut v: Vec<u8> = Vec::with_capacity(size * 3 / 4);
     fill(&mut v);
-    let encoded = encode(&v);
+    let encoded = STANDARD.encode(&v);
 
     let mut buf = Vec::new();
     b.iter(|| {
-        decode_config_buf(&encoded, TEST_CONFIG, &mut buf).unwrap();
+        STANDARD.decode_vec(&encoded, &mut buf).unwrap();
         black_box(&buf);
         buf.clear();
     });
@@ -42,12 +37,12 @@ fn do_decode_bench_reuse_buf(b: &mut Bencher, &size: &usize) {
 fn do_decode_bench_slice(b: &mut Bencher, &size: &usize) {
     let mut v: Vec<u8> = Vec::with_capacity(size * 3 / 4);
     fill(&mut v);
-    let encoded = encode(&v);
+    let encoded = STANDARD.encode(&v);
 
     let mut buf = Vec::new();
     buf.resize(size, 0);
     b.iter(|| {
-        decode_config_slice(&encoded, TEST_CONFIG, &mut buf).unwrap();
+        STANDARD.decode_slice(&encoded, &mut buf).unwrap();
         black_box(&buf);
     });
 }
@@ -55,7 +50,7 @@ fn do_decode_bench_slice(b: &mut Bencher, &size: &usize) {
 fn do_decode_bench_stream(b: &mut Bencher, &size: &usize) {
     let mut v: Vec<u8> = Vec::with_capacity(size * 3 / 4);
     fill(&mut v);
-    let encoded = encode(&v);
+    let encoded = STANDARD.encode(&v);
 
     let mut buf = Vec::new();
     buf.resize(size, 0);
@@ -63,7 +58,7 @@ fn do_decode_bench_stream(b: &mut Bencher, &size: &usize) {
 
     b.iter(|| {
         let mut cursor = io::Cursor::new(&encoded[..]);
-        let mut decoder = base64::read::DecoderReader::new(&mut cursor, TEST_CONFIG);
+        let mut decoder = base64::read::DecoderReader::new(&mut cursor, &STANDARD);
         decoder.read_to_end(&mut buf).unwrap();
         buf.clear();
         black_box(&buf);
@@ -74,7 +69,7 @@ fn do_encode_bench(b: &mut Bencher, &size: &usize) {
     let mut v: Vec<u8> = Vec::with_capacity(size);
     fill(&mut v);
     b.iter(|| {
-        let e = encode(&v);
+        let e = STANDARD.encode(&v);
         black_box(&e);
     });
 }
@@ -83,7 +78,7 @@ fn do_encode_bench_display(b: &mut Bencher, &size: &usize) {
     let mut v: Vec<u8> = Vec::with_capacity(size);
     fill(&mut v);
     b.iter(|| {
-        let e = format!("{}", display::Base64Display::with_config(&v, TEST_CONFIG));
+        let e = format!("{}", display::Base64Display::new(&v, &STANDARD));
         black_box(&e);
     });
 }
@@ -93,7 +88,7 @@ fn do_encode_bench_reuse_buf(b: &mut Bencher, &size: &usize) {
     fill(&mut v);
     let mut buf = String::new();
     b.iter(|| {
-        encode_config_buf(&v, TEST_CONFIG, &mut buf);
+        STANDARD.encode_string(&v, &mut buf);
         buf.clear();
     });
 }
@@ -104,9 +99,7 @@ fn do_encode_bench_slice(b: &mut Bencher, &size: &usize) {
     let mut buf = Vec::new();
     // conservative estimate of encoded size
     buf.resize(v.len() * 2, 0);
-    b.iter(|| {
-        encode_config_slice(&v, TEST_CONFIG, &mut buf);
-    });
+    b.iter(|| STANDARD.encode_slice(&v, &mut buf).unwrap());
 }
 
 fn do_encode_bench_stream(b: &mut Bencher, &size: &usize) {
@@ -117,7 +110,7 @@ fn do_encode_bench_stream(b: &mut Bencher, &size: &usize) {
     buf.reserve(size * 2);
     b.iter(|| {
         buf.clear();
-        let mut stream_enc = write::EncoderWriter::new(&mut buf, TEST_CONFIG);
+        let mut stream_enc = write::EncoderWriter::new(&mut buf, &STANDARD);
         stream_enc.write_all(&v).unwrap();
         stream_enc.flush().unwrap();
     });
@@ -128,7 +121,7 @@ fn do_encode_bench_string_stream(b: &mut Bencher, &size: &usize) {
     fill(&mut v);
 
     b.iter(|| {
-        let mut stream_enc = write::EncoderStringWriter::new(TEST_CONFIG);
+        let mut stream_enc = write::EncoderStringWriter::new(&STANDARD);
         stream_enc.write_all(&v).unwrap();
         stream_enc.flush().unwrap();
         let _ = stream_enc.into_inner();
@@ -142,7 +135,7 @@ fn do_encode_bench_string_reuse_buf_stream(b: &mut Bencher, &size: &usize) {
     let mut buf = String::new();
     b.iter(|| {
         buf.clear();
-        let mut stream_enc = write::EncoderStringWriter::from(&mut buf, TEST_CONFIG);
+        let mut stream_enc = write::EncoderStringWriter::from_consumer(&mut buf, &STANDARD);
         stream_enc.write_all(&v).unwrap();
         stream_enc.flush().unwrap();
         let _ = stream_enc.into_inner();
@@ -164,46 +157,85 @@ const BYTE_SIZES: [usize; 5] = [3, 50, 100, 500, 3 * 1024];
 // keep the benchmark runtime reasonable.
 const LARGE_BYTE_SIZES: [usize; 3] = [3 * 1024 * 1024, 10 * 1024 * 1024, 30 * 1024 * 1024];
 
-fn encode_benchmarks(byte_sizes: &[usize]) -> ParameterizedBenchmark<usize> {
-    ParameterizedBenchmark::new("encode", do_encode_bench, byte_sizes.iter().cloned())
+fn encode_benchmarks(c: &mut Criterion, label: &str, byte_sizes: &[usize]) {
+    let mut group = c.benchmark_group(label);
+    group
         .warm_up_time(std::time::Duration::from_millis(500))
-        .measurement_time(std::time::Duration::from_secs(3))
-        .throughput(|s| Throughput::Bytes(*s as u64))
-        .with_function("encode_display", do_encode_bench_display)
-        .with_function("encode_reuse_buf", do_encode_bench_reuse_buf)
-        .with_function("encode_slice", do_encode_bench_slice)
-        .with_function("encode_reuse_buf_stream", do_encode_bench_stream)
-        .with_function("encode_string_stream", do_encode_bench_string_stream)
-        .with_function(
-            "encode_string_reuse_buf_stream",
-            do_encode_bench_string_reuse_buf_stream,
-        )
-}
+        .measurement_time(std::time::Duration::from_secs(3));
+
+    for size in byte_sizes {
+        group
+            .throughput(Throughput::Bytes(*size as u64))
+            .bench_with_input(BenchmarkId::new("encode", size), size, do_encode_bench)
+            .bench_with_input(
+                BenchmarkId::new("encode_display", size),
+                size,
+                do_encode_bench_display,
+            )
+            .bench_with_input(
+                BenchmarkId::new("encode_reuse_buf", size),
+                size,
+                do_encode_bench_reuse_buf,
+            )
+            .bench_with_input(
+                BenchmarkId::new("encode_slice", size),
+                size,
+                do_encode_bench_slice,
+            )
+            .bench_with_input(
+                BenchmarkId::new("encode_reuse_buf_stream", size),
+                size,
+                do_encode_bench_stream,
+            )
+            .bench_with_input(
+                BenchmarkId::new("encode_string_stream", size),
+                size,
+                do_encode_bench_string_stream,
+            )
+            .bench_with_input(
+                BenchmarkId::new("encode_string_reuse_buf_stream", size),
+                size,
+                do_encode_bench_string_reuse_buf_stream,
+            );
+    }
 
-fn decode_benchmarks(byte_sizes: &[usize]) -> ParameterizedBenchmark<usize> {
-    ParameterizedBenchmark::new("decode", do_decode_bench, byte_sizes.iter().cloned())
-        .warm_up_time(std::time::Duration::from_millis(500))
-        .measurement_time(std::time::Duration::from_secs(3))
-        .throughput(|s| Throughput::Bytes(*s as u64))
-        .with_function("decode_reuse_buf", do_decode_bench_reuse_buf)
-        .with_function("decode_slice", do_decode_bench_slice)
-        .with_function("decode_stream", do_decode_bench_stream)
+    group.finish();
 }
 
-fn bench(c: &mut Criterion) {
-    c.bench("bench_small_input", encode_benchmarks(&BYTE_SIZES[..]));
-
-    c.bench(
-        "bench_large_input",
-        encode_benchmarks(&LARGE_BYTE_SIZES[..]).sample_size(10),
-    );
+fn decode_benchmarks(c: &mut Criterion, label: &str, byte_sizes: &[usize]) {
+    let mut group = c.benchmark_group(label);
+
+    for size in byte_sizes {
+        group
+            .warm_up_time(std::time::Duration::from_millis(500))
+            .measurement_time(std::time::Duration::from_secs(3))
+            .throughput(Throughput::Bytes(*size as u64))
+            .bench_with_input(BenchmarkId::new("decode", size), size, do_decode_bench)
+            .bench_with_input(
+                BenchmarkId::new("decode_reuse_buf", size),
+                size,
+                do_decode_bench_reuse_buf,
+            )
+            .bench_with_input(
+                BenchmarkId::new("decode_slice", size),
+                size,
+                do_decode_bench_slice,
+            )
+            .bench_with_input(
+                BenchmarkId::new("decode_stream", size),
+                size,
+                do_decode_bench_stream,
+            );
+    }
 
-    c.bench("bench_small_input", decode_benchmarks(&BYTE_SIZES[..]));
+    group.finish();
+}
 
-    c.bench(
-        "bench_large_input",
-        decode_benchmarks(&LARGE_BYTE_SIZES[..]).sample_size(10),
-    );
+fn bench(c: &mut Criterion) {
+    encode_benchmarks(c, "encode_small_input", &BYTE_SIZES[..]);
+    encode_benchmarks(c, "encode_large_input", &LARGE_BYTE_SIZES[..]);
+    decode_benchmarks(c, "decode_small_input", &BYTE_SIZES[..]);
+    decode_benchmarks(c, "decode_large_input", &LARGE_BYTE_SIZES[..]);
 }
 
 criterion_group!(benches, bench);

+ 1 - 0
zeroidc/vendor/base64/clippy.toml

@@ -0,0 +1 @@
+msrv = "1.57.0"

+ 25 - 25
zeroidc/vendor/base64/examples/base64.rs

@@ -4,37 +4,28 @@ use std::path::PathBuf;
 use std::process;
 use std::str::FromStr;
 
-use base64::{read, write};
+use base64::{alphabet, engine, read, write};
 use structopt::StructOpt;
 
 #[derive(Debug, StructOpt)]
-enum CharacterSet {
+enum Alphabet {
     Standard,
     UrlSafe,
 }
 
-impl Default for CharacterSet {
+impl Default for Alphabet {
     fn default() -> Self {
-        CharacterSet::Standard
+        Self::Standard
     }
 }
 
-impl Into<base64::Config> for CharacterSet {
-    fn into(self) -> base64::Config {
-        match self {
-            CharacterSet::Standard => base64::STANDARD,
-            CharacterSet::UrlSafe => base64::URL_SAFE,
-        }
-    }
-}
-
-impl FromStr for CharacterSet {
+impl FromStr for Alphabet {
     type Err = String;
-    fn from_str(s: &str) -> Result<CharacterSet, String> {
+    fn from_str(s: &str) -> Result<Self, String> {
         match s {
-            "standard" => Ok(CharacterSet::Standard),
-            "urlsafe" => Ok(CharacterSet::UrlSafe),
-            _ => Err(format!("charset '{}' unrecognized", s)),
+            "standard" => Ok(Self::Standard),
+            "urlsafe" => Ok(Self::UrlSafe),
+            _ => Err(format!("alphabet '{}' unrecognized", s)),
         }
     }
 }
@@ -45,10 +36,10 @@ struct Opt {
     /// decode data
     #[structopt(short = "d", long = "decode")]
     decode: bool,
-    /// The character set to choose. Defaults to the standard base64 character set.
-    /// Supported character sets include "standard" and "urlsafe".
-    #[structopt(long = "charset")]
-    charset: Option<CharacterSet>,
+    /// The alphabet to choose. Defaults to the standard base64 alphabet.
+    /// Supported alphabets include "standard" and "urlsafe".
+    #[structopt(long = "alphabet")]
+    alphabet: Option<Alphabet>,
     /// The file to encode/decode.
     #[structopt(parse(from_os_str))]
     file: Option<PathBuf>,
@@ -68,14 +59,23 @@ fn main() {
         }
         Some(f) => Box::new(File::open(f).unwrap()),
     };
-    let config = opt.charset.unwrap_or_default().into();
+
+    let alphabet = opt.alphabet.unwrap_or_default();
+    let engine = engine::GeneralPurpose::new(
+        &match alphabet {
+            Alphabet::Standard => alphabet::STANDARD,
+            Alphabet::UrlSafe => alphabet::URL_SAFE,
+        },
+        engine::general_purpose::PAD,
+    );
+
     let stdout = io::stdout();
     let mut stdout = stdout.lock();
     let r = if opt.decode {
-        let mut decoder = read::DecoderReader::new(&mut input, config);
+        let mut decoder = read::DecoderReader::new(&mut input, &engine);
         io::copy(&mut decoder, &mut stdout)
     } else {
-        let mut encoder = write::EncoderWriter::new(&mut stdout, config);
+        let mut encoder = write::EncoderWriter::new(&mut stdout, &engine);
         io::copy(&mut input, &mut encoder)
     };
     if let Err(e) = r {

+ 241 - 0
zeroidc/vendor/base64/src/alphabet.rs

@@ -0,0 +1,241 @@
+//! Provides [Alphabet] and constants for alphabets commonly used in the wild.
+
+use crate::PAD_BYTE;
+use core::fmt;
+#[cfg(any(feature = "std", test))]
+use std::error;
+
+const ALPHABET_SIZE: usize = 64;
+
+/// An alphabet defines the 64 ASCII characters (symbols) used for base64.
+///
+/// Common alphabets are provided as constants, and custom alphabets
+/// can be made via `from_str` or the `TryFrom<str>` implementation.
+///
+/// ```
+/// let custom = base64::alphabet::Alphabet::new("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/").unwrap();
+///
+/// let engine = base64::engine::GeneralPurpose::new(
+///     &custom,
+///     base64::engine::general_purpose::PAD);
+/// ```
+#[derive(Clone, Debug, Eq, PartialEq)]
+pub struct Alphabet {
+    pub(crate) symbols: [u8; ALPHABET_SIZE],
+}
+
+impl Alphabet {
+    /// Performs no checks so that it can be const.
+    /// Used only for known-valid strings.
+    const fn from_str_unchecked(alphabet: &str) -> Self {
+        let mut symbols = [0_u8; ALPHABET_SIZE];
+        let source_bytes = alphabet.as_bytes();
+
+        // a way to copy that's allowed in const fn
+        let mut index = 0;
+        while index < ALPHABET_SIZE {
+            symbols[index] = source_bytes[index];
+            index += 1;
+        }
+
+        Self { symbols }
+    }
+
+    /// Create an `Alphabet` from a string of 64 unique printable ASCII bytes.
+    ///
+    /// The `=` byte is not allowed as it is used for padding.
+    pub const fn new(alphabet: &str) -> Result<Self, ParseAlphabetError> {
+        let bytes = alphabet.as_bytes();
+        if bytes.len() != ALPHABET_SIZE {
+            return Err(ParseAlphabetError::InvalidLength);
+        }
+
+        {
+            let mut index = 0;
+            while index < ALPHABET_SIZE {
+                let byte = bytes[index];
+
+                // must be ascii printable. 127 (DEL) is commonly considered printable
+                // for some reason but clearly unsuitable for base64.
+                if !(byte >= 32_u8 && byte <= 126_u8) {
+                    return Err(ParseAlphabetError::UnprintableByte(byte));
+                }
+                // = is assumed to be padding, so cannot be used as a symbol
+                if byte == PAD_BYTE {
+                    return Err(ParseAlphabetError::ReservedByte(byte));
+                }
+
+                // Check for duplicates while staying within what const allows.
+                // It's n^2, but only over 64 hot bytes, and only once, so it's likely in the single digit
+                // microsecond range.
+
+                let mut probe_index = 0;
+                while probe_index < ALPHABET_SIZE {
+                    if probe_index == index {
+                        probe_index += 1;
+                        continue;
+                    }
+
+                    let probe_byte = bytes[probe_index];
+
+                    if byte == probe_byte {
+                        return Err(ParseAlphabetError::DuplicatedByte(byte));
+                    }
+
+                    probe_index += 1;
+                }
+
+                index += 1;
+            }
+        }
+
+        Ok(Self::from_str_unchecked(alphabet))
+    }
+}
+
+impl TryFrom<&str> for Alphabet {
+    type Error = ParseAlphabetError;
+
+    fn try_from(value: &str) -> Result<Self, Self::Error> {
+        Self::new(value)
+    }
+}
+
+/// Possible errors when constructing an [Alphabet] from a `str`.
+#[derive(Debug, Eq, PartialEq)]
+pub enum ParseAlphabetError {
+    /// Alphabets must be 64 ASCII bytes
+    InvalidLength,
+    /// All bytes must be unique
+    DuplicatedByte(u8),
+    /// All bytes must be printable (in the range `[32, 126]`).
+    UnprintableByte(u8),
+    /// `=` cannot be used
+    ReservedByte(u8),
+}
+
+impl fmt::Display for ParseAlphabetError {
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        match self {
+            Self::InvalidLength => write!(f, "Invalid length - must be 64 bytes"),
+            Self::DuplicatedByte(b) => write!(f, "Duplicated byte: {:#04x}", b),
+            Self::UnprintableByte(b) => write!(f, "Unprintable byte: {:#04x}", b),
+            Self::ReservedByte(b) => write!(f, "Reserved byte: {:#04x}", b),
+        }
+    }
+}
+
+#[cfg(any(feature = "std", test))]
+impl error::Error for ParseAlphabetError {}
+
+/// The standard alphabet (uses `+` and `/`).
+///
+/// See [RFC 3548](https://tools.ietf.org/html/rfc3548#section-3).
+pub const STANDARD: Alphabet = Alphabet::from_str_unchecked(
+    "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/",
+);
+
+/// The URL safe alphabet (uses `-` and `_`).
+///
+/// See [RFC 3548](https://tools.ietf.org/html/rfc3548#section-4).
+pub const URL_SAFE: Alphabet = Alphabet::from_str_unchecked(
+    "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-_",
+);
+
+/// The `crypt(3)` alphabet (uses `.` and `/` as the first two values).
+///
+/// Not standardized, but folk wisdom on the net asserts that this alphabet is what crypt uses.
+pub const CRYPT: Alphabet = Alphabet::from_str_unchecked(
+    "./0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz",
+);
+
+/// The bcrypt alphabet.
+pub const BCRYPT: Alphabet = Alphabet::from_str_unchecked(
+    "./ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789",
+);
+
+/// The alphabet used in IMAP-modified UTF-7 (uses `+` and `,`).
+///
+/// See [RFC 3501](https://tools.ietf.org/html/rfc3501#section-5.1.3)
+pub const IMAP_MUTF7: Alphabet = Alphabet::from_str_unchecked(
+    "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+,",
+);
+
+/// The alphabet used in BinHex 4.0 files.
+///
+/// See [BinHex 4.0 Definition](http://files.stairways.com/other/binhex-40-specs-info.txt)
+pub const BIN_HEX: Alphabet = Alphabet::from_str_unchecked(
+    "!\"#$%&'()*+,-0123456789@ABCDEFGHIJKLMNPQRSTUVXYZ[`abcdehijklmpqr",
+);
+
+#[cfg(test)]
+mod tests {
+    use crate::alphabet::*;
+    use std::convert::TryFrom as _;
+
+    #[test]
+    fn detects_duplicate_start() {
+        assert_eq!(
+            ParseAlphabetError::DuplicatedByte(b'A'),
+            Alphabet::new("AACDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/")
+                .unwrap_err()
+        );
+    }
+
+    #[test]
+    fn detects_duplicate_end() {
+        assert_eq!(
+            ParseAlphabetError::DuplicatedByte(b'/'),
+            Alphabet::new("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789//")
+                .unwrap_err()
+        );
+    }
+
+    #[test]
+    fn detects_duplicate_middle() {
+        assert_eq!(
+            ParseAlphabetError::DuplicatedByte(b'Z'),
+            Alphabet::new("ABCDEFGHIJKLMNOPQRSTUVWXYZZbcdefghijklmnopqrstuvwxyz0123456789+/")
+                .unwrap_err()
+        );
+    }
+
+    #[test]
+    fn detects_length() {
+        assert_eq!(
+            ParseAlphabetError::InvalidLength,
+            Alphabet::new(
+                "xxxxxxxxxABCDEFGHIJKLMNOPQRSTUVWXYZZbcdefghijklmnopqrstuvwxyz0123456789+/",
+            )
+            .unwrap_err()
+        );
+    }
+
+    #[test]
+    fn detects_padding() {
+        assert_eq!(
+            ParseAlphabetError::ReservedByte(b'='),
+            Alphabet::new("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+=")
+                .unwrap_err()
+        );
+    }
+
+    #[test]
+    fn detects_unprintable() {
+        // form feed
+        assert_eq!(
+            ParseAlphabetError::UnprintableByte(0xc),
+            Alphabet::new("\x0cBCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/")
+                .unwrap_err()
+        );
+    }
+
+    #[test]
+    fn same_as_unchecked() {
+        assert_eq!(
+            STANDARD,
+            Alphabet::try_from("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/")
+                .unwrap()
+        );
+    }
+}

+ 46 - 62
zeroidc/vendor/base64/src/chunked_encoder.rs

@@ -1,13 +1,12 @@
-use crate::{
-    encode::{add_padding, encode_to_slice},
-    Config,
-};
 #[cfg(any(feature = "alloc", feature = "std", test))]
 use alloc::string::String;
 use core::cmp;
 #[cfg(any(feature = "alloc", feature = "std", test))]
 use core::str;
 
+use crate::encode::add_padding;
+use crate::engine::{Config, Engine};
+
 /// The output mechanism for ChunkedEncoder's encoded bytes.
 pub trait Sink {
     type Error;
@@ -19,23 +18,21 @@ pub trait Sink {
 const BUF_SIZE: usize = 1024;
 
 /// A base64 encoder that emits encoded bytes in chunks without heap allocation.
-pub struct ChunkedEncoder {
-    config: Config,
+pub struct ChunkedEncoder<'e, E: Engine + ?Sized> {
+    engine: &'e E,
     max_input_chunk_len: usize,
 }
 
-impl ChunkedEncoder {
-    pub fn new(config: Config) -> ChunkedEncoder {
+impl<'e, E: Engine + ?Sized> ChunkedEncoder<'e, E> {
+    pub fn new(engine: &'e E) -> ChunkedEncoder<'e, E> {
         ChunkedEncoder {
-            config,
-            max_input_chunk_len: max_input_length(BUF_SIZE, config),
+            engine,
+            max_input_chunk_len: max_input_length(BUF_SIZE, engine.config().encode_padding()),
         }
     }
 
     pub fn encode<S: Sink>(&self, bytes: &[u8], sink: &mut S) -> Result<(), S::Error> {
         let mut encode_buf: [u8; BUF_SIZE] = [0; BUF_SIZE];
-        let encode_table = self.config.char_set.encode_table();
-
         let mut input_index = 0;
 
         while input_index < bytes.len() {
@@ -44,12 +41,12 @@ impl ChunkedEncoder {
 
             let chunk = &bytes[input_index..(input_index + input_chunk_len)];
 
-            let mut b64_bytes_written = encode_to_slice(chunk, &mut encode_buf, encode_table);
+            let mut b64_bytes_written = self.engine.internal_encode(chunk, &mut encode_buf);
 
             input_index += input_chunk_len;
             let more_input_left = input_index < bytes.len();
 
-            if self.config.pad && !more_input_left {
+            if self.engine.config().encode_padding() && !more_input_left {
                 // no more input, add padding if needed. Buffer will have room because
                 // max_input_length leaves room for it.
                 b64_bytes_written += add_padding(bytes.len(), &mut encode_buf[b64_bytes_written..]);
@@ -69,8 +66,8 @@ impl ChunkedEncoder {
 ///
 /// The input length will always be a multiple of 3 so that no encoding state has to be carried over
 /// between chunks.
-fn max_input_length(encoded_buf_len: usize, config: Config) -> usize {
-    let effective_buf_len = if config.pad {
+fn max_input_length(encoded_buf_len: usize, padded: bool) -> usize {
+    let effective_buf_len = if padded {
         // make room for padding
         encoded_buf_len
             .checked_sub(2)
@@ -109,26 +106,28 @@ impl<'a> Sink for StringSink<'a> {
 
 #[cfg(test)]
 pub mod tests {
-    use super::*;
-    use crate::{encode_config_buf, tests::random_config, CharacterSet, STANDARD};
-
     use rand::{
         distributions::{Distribution, Uniform},
-        FromEntropy, Rng,
+        Rng, SeedableRng,
     };
 
+    use crate::{
+        alphabet::STANDARD,
+        engine::general_purpose::{GeneralPurpose, GeneralPurposeConfig, PAD},
+        tests::random_engine,
+    };
+
+    use super::*;
+
     #[test]
     fn chunked_encode_empty() {
-        assert_eq!("", chunked_encode_str(&[], STANDARD));
+        assert_eq!("", chunked_encode_str(&[], PAD));
     }
 
     #[test]
     fn chunked_encode_intermediate_fast_loop() {
         // > 8 bytes input, will enter the pretty fast loop
-        assert_eq!(
-            "Zm9vYmFyYmF6cXV4",
-            chunked_encode_str(b"foobarbazqux", STANDARD)
-        );
+        assert_eq!("Zm9vYmFyYmF6cXV4", chunked_encode_str(b"foobarbazqux", PAD));
     }
 
     #[test]
@@ -136,14 +135,14 @@ pub mod tests {
         // > 32 bytes input, will enter the uber fast loop
         assert_eq!(
             "Zm9vYmFyYmF6cXV4cXV1eGNvcmdlZ3JhdWx0Z2FycGx5eg==",
-            chunked_encode_str(b"foobarbazquxquuxcorgegraultgarplyz", STANDARD)
+            chunked_encode_str(b"foobarbazquxquuxcorgegraultgarplyz", PAD)
         );
     }
 
     #[test]
     fn chunked_encode_slow_loop_only() {
         // < 8 bytes input, slow loop only
-        assert_eq!("Zm9vYmFy", chunked_encode_str(b"foobar", STANDARD));
+        assert_eq!("Zm9vYmFy", chunked_encode_str(b"foobar", PAD));
     }
 
     #[test]
@@ -154,32 +153,27 @@ pub mod tests {
 
     #[test]
     fn max_input_length_no_pad() {
-        let config = config_with_pad(false);
-        assert_eq!(768, max_input_length(1024, config));
+        assert_eq!(768, max_input_length(1024, false));
     }
 
     #[test]
     fn max_input_length_with_pad_decrements_one_triple() {
-        let config = config_with_pad(true);
-        assert_eq!(765, max_input_length(1024, config));
+        assert_eq!(765, max_input_length(1024, true));
     }
 
     #[test]
     fn max_input_length_with_pad_one_byte_short() {
-        let config = config_with_pad(true);
-        assert_eq!(765, max_input_length(1025, config));
+        assert_eq!(765, max_input_length(1025, true));
     }
 
     #[test]
     fn max_input_length_with_pad_fits_exactly() {
-        let config = config_with_pad(true);
-        assert_eq!(768, max_input_length(1026, config));
+        assert_eq!(768, max_input_length(1026, true));
     }
 
     #[test]
     fn max_input_length_cant_use_extra_single_encoded_byte() {
-        let config = Config::new(crate::CharacterSet::Standard, false);
-        assert_eq!(300, max_input_length(401, config));
+        assert_eq!(300, max_input_length(401, false));
     }
 
     pub fn chunked_encode_matches_normal_encode_random<S: SinkTestHelper>(sink_test_helper: &S) {
@@ -197,49 +191,39 @@ pub mod tests {
                 input_buf.push(rng.gen());
             }
 
-            let config = random_config(&mut rng);
+            let engine = random_engine(&mut rng);
 
-            let chunk_encoded_string = sink_test_helper.encode_to_string(config, &input_buf);
-            encode_config_buf(&input_buf, config, &mut output_buf);
+            let chunk_encoded_string = sink_test_helper.encode_to_string(&engine, &input_buf);
+            engine.encode_string(&input_buf, &mut output_buf);
 
-            assert_eq!(
-                output_buf, chunk_encoded_string,
-                "input len={}, config: pad={}",
-                buf_len, config.pad
-            );
+            assert_eq!(output_buf, chunk_encoded_string, "input len={}", buf_len);
         }
     }
 
-    fn chunked_encode_str(bytes: &[u8], config: Config) -> String {
+    fn chunked_encode_str(bytes: &[u8], config: GeneralPurposeConfig) -> String {
         let mut s = String::new();
-        {
-            let mut sink = StringSink::new(&mut s);
-            let encoder = ChunkedEncoder::new(config);
-            encoder.encode(bytes, &mut sink).unwrap();
-        }
 
-        return s;
-    }
+        let mut sink = StringSink::new(&mut s);
+        let engine = GeneralPurpose::new(&STANDARD, config);
+        let encoder = ChunkedEncoder::new(&engine);
+        encoder.encode(bytes, &mut sink).unwrap();
 
-    fn config_with_pad(pad: bool) -> Config {
-        Config::new(CharacterSet::Standard, pad)
+        s
     }
 
     // An abstraction around sinks so that we can have tests that easily to any sink implementation
     pub trait SinkTestHelper {
-        fn encode_to_string(&self, config: Config, bytes: &[u8]) -> String;
+        fn encode_to_string<E: Engine>(&self, engine: &E, bytes: &[u8]) -> String;
     }
 
     struct StringSinkTestHelper;
 
     impl SinkTestHelper for StringSinkTestHelper {
-        fn encode_to_string(&self, config: Config, bytes: &[u8]) -> String {
-            let encoder = ChunkedEncoder::new(config);
+        fn encode_to_string<E: Engine>(&self, engine: &E, bytes: &[u8]) -> String {
+            let encoder = ChunkedEncoder::new(engine);
             let mut s = String::new();
-            {
-                let mut sink = StringSink::new(&mut s);
-                encoder.encode(bytes, &mut sink).unwrap();
-            }
+            let mut sink = StringSink::new(&mut s);
+            encoder.encode(bytes, &mut sink).unwrap();
 
             s
         }

+ 172 - 696
zeroidc/vendor/base64/src/decode.rs

@@ -1,32 +1,15 @@
-use crate::{tables, Config, PAD_BYTE};
-
-#[cfg(any(feature = "alloc", feature = "std", test))]
-use crate::STANDARD;
+use crate::engine::{general_purpose::STANDARD, DecodeEstimate, Engine};
 #[cfg(any(feature = "alloc", feature = "std", test))]
 use alloc::vec::Vec;
 use core::fmt;
 #[cfg(any(feature = "std", test))]
 use std::error;
 
-// decode logic operates on chunks of 8 input bytes without padding
-const INPUT_CHUNK_LEN: usize = 8;
-const DECODED_CHUNK_LEN: usize = 6;
-// we read a u64 and write a u64, but a u64 of input only yields 6 bytes of output, so the last
-// 2 bytes of any output u64 should not be counted as written to (but must be available in a
-// slice).
-const DECODED_CHUNK_SUFFIX: usize = 2;
-
-// how many u64's of input to handle at a time
-const CHUNKS_PER_FAST_LOOP_BLOCK: usize = 4;
-const INPUT_BLOCK_LEN: usize = CHUNKS_PER_FAST_LOOP_BLOCK * INPUT_CHUNK_LEN;
-// includes the trailing 2 bytes for the final u64 write
-const DECODED_BLOCK_LEN: usize =
-    CHUNKS_PER_FAST_LOOP_BLOCK * DECODED_CHUNK_LEN + DECODED_CHUNK_SUFFIX;
-
 /// Errors that can occur while decoding.
 #[derive(Clone, Debug, PartialEq, Eq)]
 pub enum DecodeError {
     /// An invalid byte was found in the input. The offset and offending byte are provided.
+    /// Padding characters (`=`) interspersed in the encoded form will be treated as invalid bytes.
     InvalidByte(usize, u8),
     /// The length of the input is invalid.
     /// A typical cause of this is stray trailing whitespace or other separator bytes.
@@ -36,560 +19,159 @@ pub enum DecodeError {
     InvalidLength,
     /// The last non-padding input symbol's encoded 6 bits have nonzero bits that will be discarded.
     /// This is indicative of corrupted or truncated Base64.
-    /// Unlike InvalidByte, which reports symbols that aren't in the alphabet, this error is for
+    /// Unlike `InvalidByte`, which reports symbols that aren't in the alphabet, this error is for
     /// symbols that are in the alphabet but represent nonsensical encodings.
     InvalidLastSymbol(usize, u8),
+    /// The nature of the padding was not as configured: absent or incorrect when it must be
+    /// canonical, or present when it must be absent, etc.
+    InvalidPadding,
 }
 
 impl fmt::Display for DecodeError {
     fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
         match *self {
-            DecodeError::InvalidByte(index, byte) => {
-                write!(f, "Invalid byte {}, offset {}.", byte, index)
-            }
-            DecodeError::InvalidLength => write!(f, "Encoded text cannot have a 6-bit remainder."),
-            DecodeError::InvalidLastSymbol(index, byte) => {
+            Self::InvalidByte(index, byte) => write!(f, "Invalid byte {}, offset {}.", byte, index),
+            Self::InvalidLength => write!(f, "Encoded text cannot have a 6-bit remainder."),
+            Self::InvalidLastSymbol(index, byte) => {
                 write!(f, "Invalid last symbol {}, offset {}.", byte, index)
             }
+            Self::InvalidPadding => write!(f, "Invalid padding"),
         }
     }
 }
 
 #[cfg(any(feature = "std", test))]
 impl error::Error for DecodeError {
-    fn description(&self) -> &str {
-        match *self {
-            DecodeError::InvalidByte(_, _) => "invalid byte",
-            DecodeError::InvalidLength => "invalid length",
-            DecodeError::InvalidLastSymbol(_, _) => "invalid last symbol",
+    fn cause(&self) -> Option<&dyn error::Error> {
+        None
+    }
+}
+
+/// Errors that can occur while decoding into a slice.
+#[derive(Clone, Debug, PartialEq, Eq)]
+pub enum DecodeSliceError {
+    /// A [DecodeError] occurred
+    DecodeError(DecodeError),
+    /// The provided slice _may_ be too small.
+    ///
+    /// The check is conservative (assumes the last triplet of output bytes will all be needed).
+    OutputSliceTooSmall,
+}
+
+impl fmt::Display for DecodeSliceError {
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        match self {
+            Self::DecodeError(e) => write!(f, "DecodeError: {}", e),
+            Self::OutputSliceTooSmall => write!(f, "Output slice too small"),
         }
     }
+}
 
+#[cfg(any(feature = "std", test))]
+impl error::Error for DecodeSliceError {
     fn cause(&self) -> Option<&dyn error::Error> {
-        None
+        match self {
+            DecodeSliceError::DecodeError(e) => Some(e),
+            DecodeSliceError::OutputSliceTooSmall => None,
+        }
     }
 }
 
-///Decode from string reference as octets.
-///Returns a Result containing a Vec<u8>.
-///Convenience `decode_config(input, base64::STANDARD);`.
-///
-///# Example
-///
-///```rust
-///extern crate base64;
+impl From<DecodeError> for DecodeSliceError {
+    fn from(e: DecodeError) -> Self {
+        DecodeSliceError::DecodeError(e)
+    }
+}
+
+/// Decode base64 using the [`STANDARD` engine](STANDARD).
 ///
-///fn main() {
-///    let bytes = base64::decode("aGVsbG8gd29ybGQ=").unwrap();
-///    println!("{:?}", bytes);
-///}
-///```
+/// See [Engine::decode].
+#[deprecated(since = "0.21.0", note = "Use Engine::decode")]
 #[cfg(any(feature = "alloc", feature = "std", test))]
 pub fn decode<T: AsRef<[u8]>>(input: T) -> Result<Vec<u8>, DecodeError> {
-    decode_config(input, STANDARD)
+    STANDARD.decode(input)
 }
 
-///Decode from string reference as octets.
-///Returns a Result containing a Vec<u8>.
-///
-///# Example
-///
-///```rust
-///extern crate base64;
+/// Decode from string reference as octets using the specified [Engine].
 ///
-///fn main() {
-///    let bytes = base64::decode_config("aGVsbG8gd29ybGR+Cg==", base64::STANDARD).unwrap();
-///    println!("{:?}", bytes);
-///
-///    let bytes_url = base64::decode_config("aGVsbG8gaW50ZXJuZXR-Cg==", base64::URL_SAFE).unwrap();
-///    println!("{:?}", bytes_url);
-///}
-///```
+/// See [Engine::decode].
+///Returns a `Result` containing a `Vec<u8>`.
+#[deprecated(since = "0.21.0", note = "Use Engine::decode")]
 #[cfg(any(feature = "alloc", feature = "std", test))]
-pub fn decode_config<T: AsRef<[u8]>>(input: T, config: Config) -> Result<Vec<u8>, DecodeError> {
-    let mut buffer = Vec::<u8>::with_capacity(input.as_ref().len() * 4 / 3);
-
-    decode_config_buf(input, config, &mut buffer).map(|_| buffer)
+pub fn decode_engine<E: Engine, T: AsRef<[u8]>>(
+    input: T,
+    engine: &E,
+) -> Result<Vec<u8>, DecodeError> {
+    engine.decode(input)
 }
 
-///Decode from string reference as octets.
-///Writes into the supplied buffer to avoid allocation.
-///Returns a Result containing an empty tuple, aka ().
-///
-///# Example
-///
-///```rust
-///extern crate base64;
-///
-///fn main() {
-///    let mut buffer = Vec::<u8>::new();
-///    base64::decode_config_buf("aGVsbG8gd29ybGR+Cg==", base64::STANDARD, &mut buffer).unwrap();
-///    println!("{:?}", buffer);
+/// Decode from string reference as octets.
 ///
-///    buffer.clear();
-///
-///    base64::decode_config_buf("aGVsbG8gaW50ZXJuZXR-Cg==", base64::URL_SAFE, &mut buffer)
-///        .unwrap();
-///    println!("{:?}", buffer);
-///}
-///```
+/// See [Engine::decode_vec].
 #[cfg(any(feature = "alloc", feature = "std", test))]
-pub fn decode_config_buf<T: AsRef<[u8]>>(
+#[deprecated(since = "0.21.0", note = "Use Engine::decode_vec")]
+pub fn decode_engine_vec<E: Engine, T: AsRef<[u8]>>(
     input: T,
-    config: Config,
     buffer: &mut Vec<u8>,
+    engine: &E,
 ) -> Result<(), DecodeError> {
-    let input_bytes = input.as_ref();
-
-    let starting_output_len = buffer.len();
-
-    let num_chunks = num_chunks(input_bytes);
-    let decoded_len_estimate = num_chunks
-        .checked_mul(DECODED_CHUNK_LEN)
-        .and_then(|p| p.checked_add(starting_output_len))
-        .expect("Overflow when calculating output buffer length");
-    buffer.resize(decoded_len_estimate, 0);
-
-    let bytes_written;
-    {
-        let buffer_slice = &mut buffer.as_mut_slice()[starting_output_len..];
-        bytes_written = decode_helper(input_bytes, num_chunks, config, buffer_slice)?;
-    }
-
-    buffer.truncate(starting_output_len + bytes_written);
-
-    Ok(())
+    engine.decode_vec(input, buffer)
 }
 
 /// Decode the input into the provided output slice.
 ///
-/// This will not write any bytes past exactly what is decoded (no stray garbage bytes at the end).
-///
-/// If you don't know ahead of time what the decoded length should be, size your buffer with a
-/// conservative estimate for the decoded length of an input: 3 bytes of output for every 4 bytes of
-/// input, rounded up, or in other words `(input_len + 3) / 4 * 3`.
-///
-/// If the slice is not large enough, this will panic.
-pub fn decode_config_slice<T: AsRef<[u8]>>(
+/// See [Engine::decode_slice].
+#[deprecated(since = "0.21.0", note = "Use Engine::decode_slice")]
+pub fn decode_engine_slice<E: Engine, T: AsRef<[u8]>>(
     input: T,
-    config: Config,
     output: &mut [u8],
-) -> Result<usize, DecodeError> {
-    let input_bytes = input.as_ref();
-
-    decode_helper(input_bytes, num_chunks(input_bytes), config, output)
-}
-
-/// Return the number of input chunks (including a possibly partial final chunk) in the input
-fn num_chunks(input: &[u8]) -> usize {
-    input
-        .len()
-        .checked_add(INPUT_CHUNK_LEN - 1)
-        .expect("Overflow when calculating number of chunks in input")
-        / INPUT_CHUNK_LEN
+    engine: &E,
+) -> Result<usize, DecodeSliceError> {
+    engine.decode_slice(input, output)
 }
 
-/// Helper to avoid duplicating num_chunks calculation, which is costly on short inputs.
-/// Returns the number of bytes written, or an error.
-// We're on the fragile edge of compiler heuristics here. If this is not inlined, slow. If this is
-// inlined(always), a different slow. plain ol' inline makes the benchmarks happiest at the moment,
-// but this is fragile and the best setting changes with only minor code modifications.
-#[inline]
-fn decode_helper(
-    input: &[u8],
-    num_chunks: usize,
-    config: Config,
-    output: &mut [u8],
-) -> Result<usize, DecodeError> {
-    let char_set = config.char_set;
-    let decode_table = char_set.decode_table();
-
-    let remainder_len = input.len() % INPUT_CHUNK_LEN;
-
-    // Because the fast decode loop writes in groups of 8 bytes (unrolled to
-    // CHUNKS_PER_FAST_LOOP_BLOCK times 8 bytes, where possible) and outputs 8 bytes at a time (of
-    // which only 6 are valid data), we need to be sure that we stop using the fast decode loop
-    // soon enough that there will always be 2 more bytes of valid data written after that loop.
-    let trailing_bytes_to_skip = match remainder_len {
-        // if input is a multiple of the chunk size, ignore the last chunk as it may have padding,
-        // and the fast decode logic cannot handle padding
-        0 => INPUT_CHUNK_LEN,
-        // 1 and 5 trailing bytes are illegal: can't decode 6 bits of input into a byte
-        1 | 5 => {
-            // trailing whitespace is so common that it's worth it to check the last byte to
-            // possibly return a better error message
-            if let Some(b) = input.last() {
-                if *b != PAD_BYTE && decode_table[*b as usize] == tables::INVALID_VALUE {
-                    return Err(DecodeError::InvalidByte(input.len() - 1, *b));
-                }
-            }
-
-            return Err(DecodeError::InvalidLength);
-        }
-        // This will decode to one output byte, which isn't enough to overwrite the 2 extra bytes
-        // written by the fast decode loop. So, we have to ignore both these 2 bytes and the
-        // previous chunk.
-        2 => INPUT_CHUNK_LEN + 2,
-        // If this is 3 unpadded chars, then it would actually decode to 2 bytes. However, if this
-        // is an erroneous 2 chars + 1 pad char that would decode to 1 byte, then it should fail
-        // with an error, not panic from going past the bounds of the output slice, so we let it
-        // use stage 3 + 4.
-        3 => INPUT_CHUNK_LEN + 3,
-        // This can also decode to one output byte because it may be 2 input chars + 2 padding
-        // chars, which would decode to 1 byte.
-        4 => INPUT_CHUNK_LEN + 4,
-        // Everything else is a legal decode len (given that we don't require padding), and will
-        // decode to at least 2 bytes of output.
-        _ => remainder_len,
-    };
-
-    // rounded up to include partial chunks
-    let mut remaining_chunks = num_chunks;
-
-    let mut input_index = 0;
-    let mut output_index = 0;
-
-    {
-        let length_of_fast_decode_chunks = input.len().saturating_sub(trailing_bytes_to_skip);
-
-        // Fast loop, stage 1
-        // manual unroll to CHUNKS_PER_FAST_LOOP_BLOCK of u64s to amortize slice bounds checks
-        if let Some(max_start_index) = length_of_fast_decode_chunks.checked_sub(INPUT_BLOCK_LEN) {
-            while input_index <= max_start_index {
-                let input_slice = &input[input_index..(input_index + INPUT_BLOCK_LEN)];
-                let output_slice = &mut output[output_index..(output_index + DECODED_BLOCK_LEN)];
-
-                decode_chunk(
-                    &input_slice[0..],
-                    input_index,
-                    decode_table,
-                    &mut output_slice[0..],
-                )?;
-                decode_chunk(
-                    &input_slice[8..],
-                    input_index + 8,
-                    decode_table,
-                    &mut output_slice[6..],
-                )?;
-                decode_chunk(
-                    &input_slice[16..],
-                    input_index + 16,
-                    decode_table,
-                    &mut output_slice[12..],
-                )?;
-                decode_chunk(
-                    &input_slice[24..],
-                    input_index + 24,
-                    decode_table,
-                    &mut output_slice[18..],
-                )?;
-
-                input_index += INPUT_BLOCK_LEN;
-                output_index += DECODED_BLOCK_LEN - DECODED_CHUNK_SUFFIX;
-                remaining_chunks -= CHUNKS_PER_FAST_LOOP_BLOCK;
-            }
-        }
-
-        // Fast loop, stage 2 (aka still pretty fast loop)
-        // 8 bytes at a time for whatever we didn't do in stage 1.
-        if let Some(max_start_index) = length_of_fast_decode_chunks.checked_sub(INPUT_CHUNK_LEN) {
-            while input_index < max_start_index {
-                decode_chunk(
-                    &input[input_index..(input_index + INPUT_CHUNK_LEN)],
-                    input_index,
-                    decode_table,
-                    &mut output
-                        [output_index..(output_index + DECODED_CHUNK_LEN + DECODED_CHUNK_SUFFIX)],
-                )?;
-
-                output_index += DECODED_CHUNK_LEN;
-                input_index += INPUT_CHUNK_LEN;
-                remaining_chunks -= 1;
-            }
-        }
-    }
-
-    // Stage 3
-    // If input length was such that a chunk had to be deferred until after the fast loop
-    // because decoding it would have produced 2 trailing bytes that wouldn't then be
-    // overwritten, we decode that chunk here. This way is slower but doesn't write the 2
-    // trailing bytes.
-    // However, we still need to avoid the last chunk (partial or complete) because it could
-    // have padding, so we always do 1 fewer to avoid the last chunk.
-    for _ in 1..remaining_chunks {
-        decode_chunk_precise(
-            &input[input_index..],
-            input_index,
-            decode_table,
-            &mut output[output_index..(output_index + DECODED_CHUNK_LEN)],
-        )?;
-
-        input_index += INPUT_CHUNK_LEN;
-        output_index += DECODED_CHUNK_LEN;
-    }
-
-    // always have one more (possibly partial) block of 8 input
-    debug_assert!(input.len() - input_index > 1 || input.is_empty());
-    debug_assert!(input.len() - input_index <= 8);
-
-    // Stage 4
-    // Finally, decode any leftovers that aren't a complete input block of 8 bytes.
-    // Use a u64 as a stack-resident 8 byte buffer.
-    let mut leftover_bits: u64 = 0;
-    let mut morsels_in_leftover = 0;
-    let mut padding_bytes = 0;
-    let mut first_padding_index: usize = 0;
-    let mut last_symbol = 0_u8;
-    let start_of_leftovers = input_index;
-    for (i, b) in input[start_of_leftovers..].iter().enumerate() {
-        // '=' padding
-        if *b == PAD_BYTE {
-            // There can be bad padding in a few ways:
-            // 1 - Padding with non-padding characters after it
-            // 2 - Padding after zero or one non-padding characters before it
-            //     in the current quad.
-            // 3 - More than two characters of padding. If 3 or 4 padding chars
-            //     are in the same quad, that implies it will be caught by #2.
-            //     If it spreads from one quad to another, it will be caught by
-            //     #2 in the second quad.
-
-            if i % 4 < 2 {
-                // Check for case #2.
-                let bad_padding_index = start_of_leftovers
-                    + if padding_bytes > 0 {
-                        // If we've already seen padding, report the first padding index.
-                        // This is to be consistent with the faster logic above: it will report an
-                        // error on the first padding character (since it doesn't expect to see
-                        // anything but actual encoded data).
-                        first_padding_index
-                    } else {
-                        // haven't seen padding before, just use where we are now
-                        i
-                    };
-                return Err(DecodeError::InvalidByte(bad_padding_index, *b));
-            }
-
-            if padding_bytes == 0 {
-                first_padding_index = i;
-            }
-
-            padding_bytes += 1;
-            continue;
-        }
-
-        // Check for case #1.
-        // To make '=' handling consistent with the main loop, don't allow
-        // non-suffix '=' in trailing chunk either. Report error as first
-        // erroneous padding.
-        if padding_bytes > 0 {
-            return Err(DecodeError::InvalidByte(
-                start_of_leftovers + first_padding_index,
-                PAD_BYTE,
-            ));
-        }
-        last_symbol = *b;
-
-        // can use up to 8 * 6 = 48 bits of the u64, if last chunk has no padding.
-        // To minimize shifts, pack the leftovers from left to right.
-        let shift = 64 - (morsels_in_leftover + 1) * 6;
-        // tables are all 256 elements, lookup with a u8 index always succeeds
-        let morsel = decode_table[*b as usize];
-        if morsel == tables::INVALID_VALUE {
-            return Err(DecodeError::InvalidByte(start_of_leftovers + i, *b));
-        }
-
-        leftover_bits |= (morsel as u64) << shift;
-        morsels_in_leftover += 1;
-    }
-
-    let leftover_bits_ready_to_append = match morsels_in_leftover {
-        0 => 0,
-        2 => 8,
-        3 => 16,
-        4 => 24,
-        6 => 32,
-        7 => 40,
-        8 => 48,
-        _ => unreachable!(
-            "Impossible: must only have 0 to 8 input bytes in last chunk, with no invalid lengths"
-        ),
-    };
-
-    // if there are bits set outside the bits we care about, last symbol encodes trailing bits that
-    // will not be included in the output
-    let mask = !0 >> leftover_bits_ready_to_append;
-    if !config.decode_allow_trailing_bits && (leftover_bits & mask) != 0 {
-        // last morsel is at `morsels_in_leftover` - 1
-        return Err(DecodeError::InvalidLastSymbol(
-            start_of_leftovers + morsels_in_leftover - 1,
-            last_symbol,
-        ));
-    }
-
-    let mut leftover_bits_appended_to_buf = 0;
-    while leftover_bits_appended_to_buf < leftover_bits_ready_to_append {
-        // `as` simply truncates the higher bits, which is what we want here
-        let selected_bits = (leftover_bits >> (56 - leftover_bits_appended_to_buf)) as u8;
-        output[output_index] = selected_bits;
-        output_index += 1;
-
-        leftover_bits_appended_to_buf += 8;
-    }
-
-    Ok(output_index)
-}
-
-#[inline]
-fn write_u64(output: &mut [u8], value: u64) {
-    output[..8].copy_from_slice(&value.to_be_bytes());
-}
-
-/// Decode 8 bytes of input into 6 bytes of output. 8 bytes of output will be written, but only the
-/// first 6 of those contain meaningful data.
+/// Returns a conservative estimate of the decoded size of `encoded_len` base64 symbols (rounded up
+/// to the next group of 3 decoded bytes).
 ///
-/// `input` is the bytes to decode, of which the first 8 bytes will be processed.
-/// `index_at_start_of_input` is the offset in the overall input (used for reporting errors
-/// accurately)
-/// `decode_table` is the lookup table for the particular base64 alphabet.
-/// `output` will have its first 8 bytes overwritten, of which only the first 6 are valid decoded
-/// data.
-// yes, really inline (worth 30-50% speedup)
-#[inline(always)]
-fn decode_chunk(
-    input: &[u8],
-    index_at_start_of_input: usize,
-    decode_table: &[u8; 256],
-    output: &mut [u8],
-) -> Result<(), DecodeError> {
-    let mut accum: u64;
-
-    let morsel = decode_table[input[0] as usize];
-    if morsel == tables::INVALID_VALUE {
-        return Err(DecodeError::InvalidByte(index_at_start_of_input, input[0]));
-    }
-    accum = (morsel as u64) << 58;
-
-    let morsel = decode_table[input[1] as usize];
-    if morsel == tables::INVALID_VALUE {
-        return Err(DecodeError::InvalidByte(
-            index_at_start_of_input + 1,
-            input[1],
-        ));
-    }
-    accum |= (morsel as u64) << 52;
-
-    let morsel = decode_table[input[2] as usize];
-    if morsel == tables::INVALID_VALUE {
-        return Err(DecodeError::InvalidByte(
-            index_at_start_of_input + 2,
-            input[2],
-        ));
-    }
-    accum |= (morsel as u64) << 46;
-
-    let morsel = decode_table[input[3] as usize];
-    if morsel == tables::INVALID_VALUE {
-        return Err(DecodeError::InvalidByte(
-            index_at_start_of_input + 3,
-            input[3],
-        ));
-    }
-    accum |= (morsel as u64) << 40;
-
-    let morsel = decode_table[input[4] as usize];
-    if morsel == tables::INVALID_VALUE {
-        return Err(DecodeError::InvalidByte(
-            index_at_start_of_input + 4,
-            input[4],
-        ));
-    }
-    accum |= (morsel as u64) << 34;
-
-    let morsel = decode_table[input[5] as usize];
-    if morsel == tables::INVALID_VALUE {
-        return Err(DecodeError::InvalidByte(
-            index_at_start_of_input + 5,
-            input[5],
-        ));
-    }
-    accum |= (morsel as u64) << 28;
-
-    let morsel = decode_table[input[6] as usize];
-    if morsel == tables::INVALID_VALUE {
-        return Err(DecodeError::InvalidByte(
-            index_at_start_of_input + 6,
-            input[6],
-        ));
-    }
-    accum |= (morsel as u64) << 22;
-
-    let morsel = decode_table[input[7] as usize];
-    if morsel == tables::INVALID_VALUE {
-        return Err(DecodeError::InvalidByte(
-            index_at_start_of_input + 7,
-            input[7],
-        ));
-    }
-    accum |= (morsel as u64) << 16;
-
-    write_u64(output, accum);
-
-    Ok(())
-}
-
-/// Decode an 8-byte chunk, but only write the 6 bytes actually decoded instead of including 2
-/// trailing garbage bytes.
-#[inline]
-fn decode_chunk_precise(
-    input: &[u8],
-    index_at_start_of_input: usize,
-    decode_table: &[u8; 256],
-    output: &mut [u8],
-) -> Result<(), DecodeError> {
-    let mut tmp_buf = [0_u8; 8];
-
-    decode_chunk(
-        input,
-        index_at_start_of_input,
-        decode_table,
-        &mut tmp_buf[..],
-    )?;
-
-    output[0..6].copy_from_slice(&tmp_buf[0..6]);
-
-    Ok(())
+/// The resulting length will be a safe choice for the size of a decode buffer, but may have up to
+/// 2 trailing bytes that won't end up being needed.
+///
+/// # Examples
+///
+/// ```
+/// use base64::decoded_len_estimate;
+///
+/// assert_eq!(3, decoded_len_estimate(1));
+/// assert_eq!(3, decoded_len_estimate(2));
+/// assert_eq!(3, decoded_len_estimate(3));
+/// assert_eq!(3, decoded_len_estimate(4));
+/// // start of the next quad of encoded symbols
+/// assert_eq!(6, decoded_len_estimate(5));
+/// ```
+///
+/// # Panics
+///
+/// Panics if decoded length estimation overflows.
+/// This would happen for sizes within a few bytes of the maximum value of `usize`.
+pub fn decoded_len_estimate(encoded_len: usize) -> usize {
+    STANDARD
+        .internal_decoded_len_estimate(encoded_len)
+        .decoded_len_estimate()
 }
 
 #[cfg(test)]
 mod tests {
     use super::*;
     use crate::{
-        encode::encode_config_buf,
-        encode::encode_config_slice,
-        tests::{assert_encode_sanity, random_config},
+        alphabet,
+        engine::{general_purpose, Config, GeneralPurpose},
+        tests::{assert_encode_sanity, random_engine},
     };
-
     use rand::{
         distributions::{Distribution, Uniform},
-        FromEntropy, Rng,
+        Rng, SeedableRng,
     };
 
-    #[test]
-    fn decode_chunk_precise_writes_only_6_bytes() {
-        let input = b"Zm9vYmFy"; // "foobar"
-        let mut output = [0_u8, 1, 2, 3, 4, 5, 6, 7];
-        decode_chunk_precise(&input[..], 0, tables::STANDARD_DECODE, &mut output).unwrap();
-        assert_eq!(&vec![b'f', b'o', b'o', b'b', b'a', b'r', 6, 7], &output);
-    }
-
-    #[test]
-    fn decode_chunk_writes_8_bytes() {
-        let input = b"Zm9vYmFy"; // "foobar"
-        let mut output = [0_u8, 1, 2, 3, 4, 5, 6, 7];
-        decode_chunk(&input[..], 0, tables::STANDARD_DECODE, &mut output).unwrap();
-        assert_eq!(&vec![b'f', b'o', b'o', b'b', b'a', b'r', 0, 0], &output);
-    }
-
     #[test]
     fn decode_into_nonempty_vec_doesnt_clobber_existing_prefix() {
         let mut orig_data = Vec::new();
@@ -616,9 +198,9 @@ mod tests {
                 orig_data.push(rng.gen());
             }
 
-            let config = random_config(&mut rng);
-            encode_config_buf(&orig_data, config, &mut encoded_data);
-            assert_encode_sanity(&encoded_data, config, input_len);
+            let engine = random_engine(&mut rng);
+            engine.encode_string(&orig_data, &mut encoded_data);
+            assert_encode_sanity(&encoded_data, engine.config().encode_padding(), input_len);
 
             let prefix_len = prefix_len_range.sample(&mut rng);
 
@@ -631,9 +213,13 @@ mod tests {
             decoded_with_prefix.copy_from_slice(&prefix);
 
             // decode into the non-empty buf
-            decode_config_buf(&encoded_data, config, &mut decoded_with_prefix).unwrap();
+            engine
+                .decode_vec(&encoded_data, &mut decoded_with_prefix)
+                .unwrap();
             // also decode into the empty buf
-            decode_config_buf(&encoded_data, config, &mut decoded_without_prefix).unwrap();
+            engine
+                .decode_vec(&encoded_data, &mut decoded_without_prefix)
+                .unwrap();
 
             assert_eq!(
                 prefix_len + decoded_without_prefix.len(),
@@ -649,7 +235,66 @@ mod tests {
     }
 
     #[test]
-    fn decode_into_slice_doesnt_clobber_existing_prefix_or_suffix() {
+    fn decode_slice_doesnt_clobber_existing_prefix_or_suffix() {
+        do_decode_slice_doesnt_clobber_existing_prefix_or_suffix(|e, input, output| {
+            e.decode_slice(input, output).unwrap()
+        })
+    }
+
+    #[test]
+    fn decode_slice_unchecked_doesnt_clobber_existing_prefix_or_suffix() {
+        do_decode_slice_doesnt_clobber_existing_prefix_or_suffix(|e, input, output| {
+            e.decode_slice_unchecked(input, output).unwrap()
+        })
+    }
+
+    #[test]
+    fn decode_engine_estimation_works_for_various_lengths() {
+        let engine = GeneralPurpose::new(&alphabet::STANDARD, general_purpose::NO_PAD);
+        for num_prefix_quads in 0..100 {
+            for suffix in &["AA", "AAA", "AAAA"] {
+                let mut prefix = "AAAA".repeat(num_prefix_quads);
+                prefix.push_str(suffix);
+                // make sure no overflow (and thus a panic) occurs
+                let res = engine.decode(prefix);
+                assert!(res.is_ok());
+            }
+        }
+    }
+
+    #[test]
+    fn decode_slice_output_length_errors() {
+        for num_quads in 1..100 {
+            let input = "AAAA".repeat(num_quads);
+            let mut vec = vec![0; (num_quads - 1) * 3];
+            assert_eq!(
+                DecodeSliceError::OutputSliceTooSmall,
+                STANDARD.decode_slice(&input, &mut vec).unwrap_err()
+            );
+            vec.push(0);
+            assert_eq!(
+                DecodeSliceError::OutputSliceTooSmall,
+                STANDARD.decode_slice(&input, &mut vec).unwrap_err()
+            );
+            vec.push(0);
+            assert_eq!(
+                DecodeSliceError::OutputSliceTooSmall,
+                STANDARD.decode_slice(&input, &mut vec).unwrap_err()
+            );
+            vec.push(0);
+            // now it works
+            assert_eq!(
+                num_quads * 3,
+                STANDARD.decode_slice(&input, &mut vec).unwrap()
+            );
+        }
+    }
+
+    fn do_decode_slice_doesnt_clobber_existing_prefix_or_suffix<
+        F: Fn(&GeneralPurpose, &[u8], &mut [u8]) -> usize,
+    >(
+        call_decode: F,
+    ) {
         let mut orig_data = Vec::new();
         let mut encoded_data = String::new();
         let mut decode_buf = Vec::new();
@@ -671,9 +316,9 @@ mod tests {
                 orig_data.push(rng.gen());
             }
 
-            let config = random_config(&mut rng);
-            encode_config_buf(&orig_data, config, &mut encoded_data);
-            assert_encode_sanity(&encoded_data, config, input_len);
+            let engine = random_engine(&mut rng);
+            engine.encode_string(&orig_data, &mut encoded_data);
+            assert_encode_sanity(&encoded_data, engine.config().encode_padding(), input_len);
 
             // fill the buffer with random garbage, long enough to have some room before and after
             for _ in 0..5000 {
@@ -687,7 +332,7 @@ mod tests {
 
             // decode into the non-empty buf
             let decode_bytes_written =
-                decode_config_slice(&encoded_data, config, &mut decode_buf[offset..]).unwrap();
+                call_decode(&engine, encoded_data.as_bytes(), &mut decode_buf[offset..]);
 
             assert_eq!(orig_data.len(), decode_bytes_written);
             assert_eq!(
@@ -701,173 +346,4 @@ mod tests {
             );
         }
     }
-
-    #[test]
-    fn decode_into_slice_fits_in_precisely_sized_slice() {
-        let mut orig_data = Vec::new();
-        let mut encoded_data = String::new();
-        let mut decode_buf = Vec::new();
-
-        let input_len_range = Uniform::new(0, 1000);
-
-        let mut rng = rand::rngs::SmallRng::from_entropy();
-
-        for _ in 0..10_000 {
-            orig_data.clear();
-            encoded_data.clear();
-            decode_buf.clear();
-
-            let input_len = input_len_range.sample(&mut rng);
-
-            for _ in 0..input_len {
-                orig_data.push(rng.gen());
-            }
-
-            let config = random_config(&mut rng);
-            encode_config_buf(&orig_data, config, &mut encoded_data);
-            assert_encode_sanity(&encoded_data, config, input_len);
-
-            decode_buf.resize(input_len, 0);
-
-            // decode into the non-empty buf
-            let decode_bytes_written =
-                decode_config_slice(&encoded_data, config, &mut decode_buf[..]).unwrap();
-
-            assert_eq!(orig_data.len(), decode_bytes_written);
-            assert_eq!(orig_data, decode_buf);
-        }
-    }
-
-    #[test]
-    fn detect_invalid_last_symbol_two_bytes() {
-        let decode =
-            |input, forgiving| decode_config(input, STANDARD.decode_allow_trailing_bits(forgiving));
-
-        // example from https://github.com/marshallpierce/rust-base64/issues/75
-        assert!(decode("iYU=", false).is_ok());
-        // trailing 01
-        assert_eq!(
-            Err(DecodeError::InvalidLastSymbol(2, b'V')),
-            decode("iYV=", false)
-        );
-        assert_eq!(Ok(vec![137, 133]), decode("iYV=", true));
-        // trailing 10
-        assert_eq!(
-            Err(DecodeError::InvalidLastSymbol(2, b'W')),
-            decode("iYW=", false)
-        );
-        assert_eq!(Ok(vec![137, 133]), decode("iYV=", true));
-        // trailing 11
-        assert_eq!(
-            Err(DecodeError::InvalidLastSymbol(2, b'X')),
-            decode("iYX=", false)
-        );
-        assert_eq!(Ok(vec![137, 133]), decode("iYV=", true));
-
-        // also works when there are 2 quads in the last block
-        assert_eq!(
-            Err(DecodeError::InvalidLastSymbol(6, b'X')),
-            decode("AAAAiYX=", false)
-        );
-        assert_eq!(Ok(vec![0, 0, 0, 137, 133]), decode("AAAAiYX=", true));
-    }
-
-    #[test]
-    fn detect_invalid_last_symbol_one_byte() {
-        // 0xFF -> "/w==", so all letters > w, 0-9, and '+', '/' should get InvalidLastSymbol
-
-        assert!(decode("/w==").is_ok());
-        // trailing 01
-        assert_eq!(Err(DecodeError::InvalidLastSymbol(1, b'x')), decode("/x=="));
-        assert_eq!(Err(DecodeError::InvalidLastSymbol(1, b'z')), decode("/z=="));
-        assert_eq!(Err(DecodeError::InvalidLastSymbol(1, b'0')), decode("/0=="));
-        assert_eq!(Err(DecodeError::InvalidLastSymbol(1, b'9')), decode("/9=="));
-        assert_eq!(Err(DecodeError::InvalidLastSymbol(1, b'+')), decode("/+=="));
-        assert_eq!(Err(DecodeError::InvalidLastSymbol(1, b'/')), decode("//=="));
-
-        // also works when there are 2 quads in the last block
-        assert_eq!(
-            Err(DecodeError::InvalidLastSymbol(5, b'x')),
-            decode("AAAA/x==")
-        );
-    }
-
-    #[test]
-    fn detect_invalid_last_symbol_every_possible_three_symbols() {
-        let mut base64_to_bytes = ::std::collections::HashMap::new();
-
-        let mut bytes = [0_u8; 2];
-        for b1 in 0_u16..256 {
-            bytes[0] = b1 as u8;
-            for b2 in 0_u16..256 {
-                bytes[1] = b2 as u8;
-                let mut b64 = vec![0_u8; 4];
-                assert_eq!(4, encode_config_slice(&bytes, STANDARD, &mut b64[..]));
-                let mut v = ::std::vec::Vec::with_capacity(2);
-                v.extend_from_slice(&bytes[..]);
-
-                assert!(base64_to_bytes.insert(b64, v).is_none());
-            }
-        }
-
-        // every possible combination of symbols must either decode to 2 bytes or get InvalidLastSymbol
-
-        let mut symbols = [0_u8; 4];
-        for &s1 in STANDARD.char_set.encode_table().iter() {
-            symbols[0] = s1;
-            for &s2 in STANDARD.char_set.encode_table().iter() {
-                symbols[1] = s2;
-                for &s3 in STANDARD.char_set.encode_table().iter() {
-                    symbols[2] = s3;
-                    symbols[3] = PAD_BYTE;
-
-                    match base64_to_bytes.get(&symbols[..]) {
-                        Some(bytes) => {
-                            assert_eq!(Ok(bytes.to_vec()), decode_config(&symbols, STANDARD))
-                        }
-                        None => assert_eq!(
-                            Err(DecodeError::InvalidLastSymbol(2, s3)),
-                            decode_config(&symbols[..], STANDARD)
-                        ),
-                    }
-                }
-            }
-        }
-    }
-
-    #[test]
-    fn detect_invalid_last_symbol_every_possible_two_symbols() {
-        let mut base64_to_bytes = ::std::collections::HashMap::new();
-
-        for b in 0_u16..256 {
-            let mut b64 = vec![0_u8; 4];
-            assert_eq!(4, encode_config_slice(&[b as u8], STANDARD, &mut b64[..]));
-            let mut v = ::std::vec::Vec::with_capacity(1);
-            v.push(b as u8);
-
-            assert!(base64_to_bytes.insert(b64, v).is_none());
-        }
-
-        // every possible combination of symbols must either decode to 1 byte or get InvalidLastSymbol
-
-        let mut symbols = [0_u8; 4];
-        for &s1 in STANDARD.char_set.encode_table().iter() {
-            symbols[0] = s1;
-            for &s2 in STANDARD.char_set.encode_table().iter() {
-                symbols[1] = s2;
-                symbols[2] = PAD_BYTE;
-                symbols[3] = PAD_BYTE;
-
-                match base64_to_bytes.get(&symbols[..]) {
-                    Some(bytes) => {
-                        assert_eq!(Ok(bytes.to_vec()), decode_config(&symbols, STANDARD))
-                    }
-                    None => assert_eq!(
-                        Err(DecodeError::InvalidLastSymbol(1, s2)),
-                        decode_config(&symbols[..], STANDARD)
-                    ),
-                }
-            }
-        }
-    }
 }

+ 15 - 15
zeroidc/vendor/base64/src/display.rs

@@ -1,36 +1,36 @@
 //! Enables base64'd output anywhere you might use a `Display` implementation, like a format string.
 //!
 //! ```
-//! use base64::display::Base64Display;
+//! use base64::{display::Base64Display, engine::general_purpose::STANDARD};
 //!
 //! let data = vec![0x0, 0x1, 0x2, 0x3];
-//! let wrapper = Base64Display::with_config(&data, base64::STANDARD);
+//! let wrapper = Base64Display::new(&data, &STANDARD);
 //!
 //! assert_eq!("base64: AAECAw==", format!("base64: {}", wrapper));
 //! ```
 
 use super::chunked_encoder::ChunkedEncoder;
-use super::Config;
+use crate::engine::Engine;
 use core::fmt::{Display, Formatter};
 use core::{fmt, str};
 
 /// A convenience wrapper for base64'ing bytes into a format string without heap allocation.
-pub struct Base64Display<'a> {
+pub struct Base64Display<'a, 'e, E: Engine> {
     bytes: &'a [u8],
-    chunked_encoder: ChunkedEncoder,
+    chunked_encoder: ChunkedEncoder<'e, E>,
 }
 
-impl<'a> Base64Display<'a> {
-    /// Create a `Base64Display` with the provided config.
-    pub fn with_config(bytes: &[u8], config: Config) -> Base64Display {
+impl<'a, 'e, E: Engine> Base64Display<'a, 'e, E> {
+    /// Create a `Base64Display` with the provided engine.
+    pub fn new(bytes: &'a [u8], engine: &'e E) -> Base64Display<'a, 'e, E> {
         Base64Display {
             bytes,
-            chunked_encoder: ChunkedEncoder::new(config),
+            chunked_encoder: ChunkedEncoder::new(engine),
         }
     }
 }
 
-impl<'a> Display for Base64Display<'a> {
+impl<'a, 'e, E: Engine> Display for Base64Display<'a, 'e, E> {
     fn fmt(&self, formatter: &mut Formatter) -> Result<(), fmt::Error> {
         let mut sink = FormatterSink { f: formatter };
         self.chunked_encoder.encode(self.bytes, &mut sink)
@@ -57,18 +57,18 @@ mod tests {
     use super::super::chunked_encoder::tests::{
         chunked_encode_matches_normal_encode_random, SinkTestHelper,
     };
-    use super::super::*;
     use super::*;
+    use crate::engine::general_purpose::STANDARD;
 
     #[test]
     fn basic_display() {
         assert_eq!(
             "~$Zm9vYmFy#*",
-            format!("~${}#*", Base64Display::with_config(b"foobar", STANDARD))
+            format!("~${}#*", Base64Display::new(b"foobar", &STANDARD))
         );
         assert_eq!(
             "~$Zm9vYmFyZg==#*",
-            format!("~${}#*", Base64Display::with_config(b"foobarf", STANDARD))
+            format!("~${}#*", Base64Display::new(b"foobarf", &STANDARD))
         );
     }
 
@@ -81,8 +81,8 @@ mod tests {
     struct DisplaySinkTestHelper;
 
     impl SinkTestHelper for DisplaySinkTestHelper {
-        fn encode_to_string(&self, config: Config, bytes: &[u8]) -> String {
-            format!("{}", Base64Display::with_config(bytes, config))
+        fn encode_to_string<E: Engine>(&self, engine: &E, bytes: &[u8]) -> String {
+            format!("{}", Base64Display::new(bytes, engine))
         }
     }
 }

+ 169 - 356
zeroidc/vendor/base64/src/encode.rs

@@ -1,130 +1,59 @@
-use crate::{Config, PAD_BYTE};
 #[cfg(any(feature = "alloc", feature = "std", test))]
-use crate::{chunked_encoder, STANDARD};
+use alloc::string::String;
+use core::fmt;
+#[cfg(any(feature = "std", test))]
+use std::error;
+
 #[cfg(any(feature = "alloc", feature = "std", test))]
-use alloc::{string::String, vec};
-use core::convert::TryInto;
+use crate::engine::general_purpose::STANDARD;
+use crate::engine::{Config, Engine};
+use crate::PAD_BYTE;
 
-///Encode arbitrary octets as base64.
-///Returns a String.
-///Convenience for `encode_config(input, base64::STANDARD);`.
-///
-///# Example
+/// Encode arbitrary octets as base64 using the [`STANDARD` engine](STANDARD).
 ///
-///```rust
-///extern crate base64;
-///
-///fn main() {
-///    let b64 = base64::encode(b"hello world");
-///    println!("{}", b64);
-///}
-///```
+/// See [Engine::encode].
+#[allow(unused)]
+#[deprecated(since = "0.21.0", note = "Use Engine::encode")]
 #[cfg(any(feature = "alloc", feature = "std", test))]
 pub fn encode<T: AsRef<[u8]>>(input: T) -> String {
-    encode_config(input, STANDARD)
+    STANDARD.encode(input)
 }
 
-///Encode arbitrary octets as base64.
-///Returns a String.
-///
-///# Example
+///Encode arbitrary octets as base64 using the provided `Engine` into a new `String`.
 ///
-///```rust
-///extern crate base64;
-///
-///fn main() {
-///    let b64 = base64::encode_config(b"hello world~", base64::STANDARD);
-///    println!("{}", b64);
-///
-///    let b64_url = base64::encode_config(b"hello internet~", base64::URL_SAFE);
-///    println!("{}", b64_url);
-///}
-///```
+/// See [Engine::encode].
+#[allow(unused)]
+#[deprecated(since = "0.21.0", note = "Use Engine::encode")]
 #[cfg(any(feature = "alloc", feature = "std", test))]
-pub fn encode_config<T: AsRef<[u8]>>(input: T, config: Config) -> String {
-    let mut buf = match encoded_size(input.as_ref().len(), config) {
-        Some(n) => vec![0; n],
-        None => panic!("integer overflow when calculating buffer size"),
-    };
-
-    encode_with_padding(input.as_ref(), config, buf.len(), &mut buf[..]);
-
-    String::from_utf8(buf).expect("Invalid UTF8")
+pub fn encode_engine<E: Engine, T: AsRef<[u8]>>(input: T, engine: &E) -> String {
+    engine.encode(input)
 }
 
-///Encode arbitrary octets as base64.
-///Writes into the supplied output buffer, which will grow the buffer if needed.
-///
-///# Example
+///Encode arbitrary octets as base64 into a supplied `String`.
 ///
-///```rust
-///extern crate base64;
-///
-///fn main() {
-///    let mut buf = String::new();
-///    base64::encode_config_buf(b"hello world~", base64::STANDARD, &mut buf);
-///    println!("{}", buf);
-///
-///    buf.clear();
-///    base64::encode_config_buf(b"hello internet~", base64::URL_SAFE, &mut buf);
-///    println!("{}", buf);
-///}
-///```
+/// See [Engine::encode_string].
+#[allow(unused)]
+#[deprecated(since = "0.21.0", note = "Use Engine::encode_string")]
 #[cfg(any(feature = "alloc", feature = "std", test))]
-pub fn encode_config_buf<T: AsRef<[u8]>>(input: T, config: Config, buf: &mut String) {
-    let input_bytes = input.as_ref();
-
-    {
-        let mut sink = chunked_encoder::StringSink::new(buf);
-        let encoder = chunked_encoder::ChunkedEncoder::new(config);
-
-        encoder
-            .encode(input_bytes, &mut sink)
-            .expect("Writing to a String shouldn't fail")
-    }
+pub fn encode_engine_string<E: Engine, T: AsRef<[u8]>>(
+    input: T,
+    output_buf: &mut String,
+    engine: &E,
+) {
+    engine.encode_string(input, output_buf)
 }
 
-/// Encode arbitrary octets as base64.
-/// Writes into the supplied output buffer.
-///
-/// This is useful if you wish to avoid allocation entirely (e.g. encoding into a stack-resident
-/// or statically-allocated buffer).
-///
-/// # Panics
-///
-/// If `output` is too small to hold the encoded version of `input`, a panic will result.
-///
-/// # Example
-///
-/// ```rust
-/// extern crate base64;
+/// Encode arbitrary octets as base64 into a supplied slice.
 ///
-/// fn main() {
-///     let s = b"hello internet!";
-///     let mut buf = Vec::new();
-///     // make sure we'll have a slice big enough for base64 + padding
-///     buf.resize(s.len() * 4 / 3 + 4, 0);
-///
-///     let bytes_written = base64::encode_config_slice(s,
-///                             base64::STANDARD, &mut buf);
-///
-///     // shorten our vec down to just what was written
-///     buf.resize(bytes_written, 0);
-///
-///     assert_eq!(s, base64::decode(&buf).unwrap().as_slice());
-/// }
-/// ```
-pub fn encode_config_slice<T: AsRef<[u8]>>(input: T, config: Config, output: &mut [u8]) -> usize {
-    let input_bytes = input.as_ref();
-
-    let encoded_size = encoded_size(input_bytes.len(), config)
-        .expect("usize overflow when calculating buffer size");
-
-    let mut b64_output = &mut output[0..encoded_size];
-
-    encode_with_padding(&input_bytes, config, encoded_size, &mut b64_output);
-
-    encoded_size
+/// See [Engine::encode_slice].
+#[allow(unused)]
+#[deprecated(since = "0.21.0", note = "Use Engine::encode_slice")]
+pub fn encode_engine_slice<E: Engine, T: AsRef<[u8]>>(
+    input: T,
+    output_buf: &mut [u8],
+    engine: &E,
+) -> Result<usize, EncodeSliceError> {
+    engine.encode_slice(input, output_buf)
 }
 
 /// B64-encode and pad (if configured).
@@ -137,12 +66,17 @@ pub fn encode_config_slice<T: AsRef<[u8]>>(input: T, config: Config, output: &mu
 /// `output` must be of size `encoded_size`.
 ///
 /// All bytes in `output` will be written to since it is exactly the size of the output.
-fn encode_with_padding(input: &[u8], config: Config, encoded_size: usize, output: &mut [u8]) {
-    debug_assert_eq!(encoded_size, output.len());
+pub(crate) fn encode_with_padding<E: Engine + ?Sized>(
+    input: &[u8],
+    output: &mut [u8],
+    engine: &E,
+    expected_encoded_size: usize,
+) {
+    debug_assert_eq!(expected_encoded_size, output.len());
 
-    let b64_bytes_written = encode_to_slice(input, output, config.char_set.encode_table());
+    let b64_bytes_written = engine.internal_encode(input, output);
 
-    let padding_bytes = if config.pad {
+    let padding_bytes = if engine.config().encode_padding() {
         add_padding(input.len(), &mut output[b64_bytes_written..])
     } else {
         0
@@ -152,144 +86,22 @@ fn encode_with_padding(input: &[u8], config: Config, encoded_size: usize, output
         .checked_add(padding_bytes)
         .expect("usize overflow when calculating b64 length");
 
-    debug_assert_eq!(encoded_size, encoded_bytes);
-}
-
-#[inline]
-fn read_u64(s: &[u8]) -> u64 {
-    u64::from_be_bytes(s[..8].try_into().unwrap())
+    debug_assert_eq!(expected_encoded_size, encoded_bytes);
 }
 
-/// Encode input bytes to utf8 base64 bytes. Does not pad.
-/// `output` must be long enough to hold the encoded `input` without padding.
-/// Returns the number of bytes written.
-#[inline]
-pub fn encode_to_slice(input: &[u8], output: &mut [u8], encode_table: &[u8; 64]) -> usize {
-    let mut input_index: usize = 0;
-
-    const BLOCKS_PER_FAST_LOOP: usize = 4;
-    const LOW_SIX_BITS: u64 = 0x3F;
-
-    // we read 8 bytes at a time (u64) but only actually consume 6 of those bytes. Thus, we need
-    // 2 trailing bytes to be available to read..
-    let last_fast_index = input.len().saturating_sub(BLOCKS_PER_FAST_LOOP * 6 + 2);
-    let mut output_index = 0;
-
-    if last_fast_index > 0 {
-        while input_index <= last_fast_index {
-            // Major performance wins from letting the optimizer do the bounds check once, mostly
-            // on the output side
-            let input_chunk = &input[input_index..(input_index + (BLOCKS_PER_FAST_LOOP * 6 + 2))];
-            let output_chunk = &mut output[output_index..(output_index + BLOCKS_PER_FAST_LOOP * 8)];
-
-            // Hand-unrolling for 32 vs 16 or 8 bytes produces yields performance about equivalent
-            // to unsafe pointer code on a Xeon E5-1650v3. 64 byte unrolling was slightly better for
-            // large inputs but significantly worse for 50-byte input, unsurprisingly. I suspect
-            // that it's a not uncommon use case to encode smallish chunks of data (e.g. a 64-byte
-            // SHA-512 digest), so it would be nice if that fit in the unrolled loop at least once.
-            // Plus, single-digit percentage performance differences might well be quite different
-            // on different hardware.
-
-            let input_u64 = read_u64(&input_chunk[0..]);
-
-            output_chunk[0] = encode_table[((input_u64 >> 58) & LOW_SIX_BITS) as usize];
-            output_chunk[1] = encode_table[((input_u64 >> 52) & LOW_SIX_BITS) as usize];
-            output_chunk[2] = encode_table[((input_u64 >> 46) & LOW_SIX_BITS) as usize];
-            output_chunk[3] = encode_table[((input_u64 >> 40) & LOW_SIX_BITS) as usize];
-            output_chunk[4] = encode_table[((input_u64 >> 34) & LOW_SIX_BITS) as usize];
-            output_chunk[5] = encode_table[((input_u64 >> 28) & LOW_SIX_BITS) as usize];
-            output_chunk[6] = encode_table[((input_u64 >> 22) & LOW_SIX_BITS) as usize];
-            output_chunk[7] = encode_table[((input_u64 >> 16) & LOW_SIX_BITS) as usize];
-
-            let input_u64 = read_u64(&input_chunk[6..]);
-
-            output_chunk[8] = encode_table[((input_u64 >> 58) & LOW_SIX_BITS) as usize];
-            output_chunk[9] = encode_table[((input_u64 >> 52) & LOW_SIX_BITS) as usize];
-            output_chunk[10] = encode_table[((input_u64 >> 46) & LOW_SIX_BITS) as usize];
-            output_chunk[11] = encode_table[((input_u64 >> 40) & LOW_SIX_BITS) as usize];
-            output_chunk[12] = encode_table[((input_u64 >> 34) & LOW_SIX_BITS) as usize];
-            output_chunk[13] = encode_table[((input_u64 >> 28) & LOW_SIX_BITS) as usize];
-            output_chunk[14] = encode_table[((input_u64 >> 22) & LOW_SIX_BITS) as usize];
-            output_chunk[15] = encode_table[((input_u64 >> 16) & LOW_SIX_BITS) as usize];
-
-            let input_u64 = read_u64(&input_chunk[12..]);
-
-            output_chunk[16] = encode_table[((input_u64 >> 58) & LOW_SIX_BITS) as usize];
-            output_chunk[17] = encode_table[((input_u64 >> 52) & LOW_SIX_BITS) as usize];
-            output_chunk[18] = encode_table[((input_u64 >> 46) & LOW_SIX_BITS) as usize];
-            output_chunk[19] = encode_table[((input_u64 >> 40) & LOW_SIX_BITS) as usize];
-            output_chunk[20] = encode_table[((input_u64 >> 34) & LOW_SIX_BITS) as usize];
-            output_chunk[21] = encode_table[((input_u64 >> 28) & LOW_SIX_BITS) as usize];
-            output_chunk[22] = encode_table[((input_u64 >> 22) & LOW_SIX_BITS) as usize];
-            output_chunk[23] = encode_table[((input_u64 >> 16) & LOW_SIX_BITS) as usize];
-
-            let input_u64 = read_u64(&input_chunk[18..]);
-
-            output_chunk[24] = encode_table[((input_u64 >> 58) & LOW_SIX_BITS) as usize];
-            output_chunk[25] = encode_table[((input_u64 >> 52) & LOW_SIX_BITS) as usize];
-            output_chunk[26] = encode_table[((input_u64 >> 46) & LOW_SIX_BITS) as usize];
-            output_chunk[27] = encode_table[((input_u64 >> 40) & LOW_SIX_BITS) as usize];
-            output_chunk[28] = encode_table[((input_u64 >> 34) & LOW_SIX_BITS) as usize];
-            output_chunk[29] = encode_table[((input_u64 >> 28) & LOW_SIX_BITS) as usize];
-            output_chunk[30] = encode_table[((input_u64 >> 22) & LOW_SIX_BITS) as usize];
-            output_chunk[31] = encode_table[((input_u64 >> 16) & LOW_SIX_BITS) as usize];
-
-            output_index += BLOCKS_PER_FAST_LOOP * 8;
-            input_index += BLOCKS_PER_FAST_LOOP * 6;
-        }
-    }
-
-    // Encode what's left after the fast loop.
-
-    const LOW_SIX_BITS_U8: u8 = 0x3F;
-
-    let rem = input.len() % 3;
-    let start_of_rem = input.len() - rem;
-
-    // start at the first index not handled by fast loop, which may be 0.
-
-    while input_index < start_of_rem {
-        let input_chunk = &input[input_index..(input_index + 3)];
-        let output_chunk = &mut output[output_index..(output_index + 4)];
-
-        output_chunk[0] = encode_table[(input_chunk[0] >> 2) as usize];
-        output_chunk[1] =
-            encode_table[((input_chunk[0] << 4 | input_chunk[1] >> 4) & LOW_SIX_BITS_U8) as usize];
-        output_chunk[2] =
-            encode_table[((input_chunk[1] << 2 | input_chunk[2] >> 6) & LOW_SIX_BITS_U8) as usize];
-        output_chunk[3] = encode_table[(input_chunk[2] & LOW_SIX_BITS_U8) as usize];
-
-        input_index += 3;
-        output_index += 4;
-    }
-
-    if rem == 2 {
-        output[output_index] = encode_table[(input[start_of_rem] >> 2) as usize];
-        output[output_index + 1] = encode_table[((input[start_of_rem] << 4
-            | input[start_of_rem + 1] >> 4)
-            & LOW_SIX_BITS_U8) as usize];
-        output[output_index + 2] =
-            encode_table[((input[start_of_rem + 1] << 2) & LOW_SIX_BITS_U8) as usize];
-        output_index += 3;
-    } else if rem == 1 {
-        output[output_index] = encode_table[(input[start_of_rem] >> 2) as usize];
-        output[output_index + 1] =
-            encode_table[((input[start_of_rem] << 4) & LOW_SIX_BITS_U8) as usize];
-        output_index += 2;
-    }
-
-    output_index
-}
-
-/// calculate the base64 encoded string size, including padding if appropriate
-pub fn encoded_size(bytes_len: usize, config: Config) -> Option<usize> {
+/// Calculate the base64 encoded length for a given input length, optionally including any
+/// appropriate padding bytes.
+///
+/// Returns `None` if the encoded length can't be represented in `usize`. This will happen for
+/// input lengths in approximately the top quarter of the range of `usize`.
+pub fn encoded_len(bytes_len: usize, padding: bool) -> Option<usize> {
     let rem = bytes_len % 3;
 
     let complete_input_chunks = bytes_len / 3;
     let complete_chunk_output = complete_input_chunks.checked_mul(4);
 
     if rem > 0 {
-        if config.pad {
+        if padding {
             complete_chunk_output.and_then(|c| c.checked_add(4))
         } else {
             let encoded_rem = match rem {
@@ -305,10 +117,12 @@ pub fn encoded_size(bytes_len: usize, config: Config) -> Option<usize> {
 }
 
 /// Write padding characters.
+/// `input_len` is the size of the original, not encoded, input.
 /// `output` is the slice where padding should be written, of length at least 2.
 ///
 /// Returns the number of padding bytes written.
-pub fn add_padding(input_len: usize, output: &mut [u8]) -> usize {
+pub(crate) fn add_padding(input_len: usize, output: &mut [u8]) -> usize {
+    // TODO base on encoded len to use cheaper mod by 4 (aka & 7)
     let rem = input_len % 3;
     let mut bytes_written = 0;
     for _ in 0..((3 - rem) % 3) {
@@ -319,79 +133,102 @@ pub fn add_padding(input_len: usize, output: &mut [u8]) -> usize {
     bytes_written
 }
 
+/// Errors that can occur while encoding into a slice.
+#[derive(Clone, Debug, PartialEq, Eq)]
+pub enum EncodeSliceError {
+    /// The provided slice is too small.
+    OutputSliceTooSmall,
+}
+
+impl fmt::Display for EncodeSliceError {
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        match self {
+            Self::OutputSliceTooSmall => write!(f, "Output slice too small"),
+        }
+    }
+}
+
+#[cfg(any(feature = "std", test))]
+impl error::Error for EncodeSliceError {
+    fn cause(&self) -> Option<&dyn error::Error> {
+        None
+    }
+}
+
 #[cfg(test)]
 mod tests {
     use super::*;
+
     use crate::{
-        decode::decode_config_buf,
-        tests::{assert_encode_sanity, random_config},
-        Config, STANDARD, URL_SAFE_NO_PAD,
+        alphabet,
+        engine::general_purpose::{GeneralPurpose, NO_PAD, STANDARD},
+        tests::{assert_encode_sanity, random_config, random_engine},
     };
-
     use rand::{
         distributions::{Distribution, Uniform},
-        FromEntropy, Rng,
+        Rng, SeedableRng,
     };
-    use std;
     use std::str;
 
+    const URL_SAFE_NO_PAD_ENGINE: GeneralPurpose = GeneralPurpose::new(&alphabet::URL_SAFE, NO_PAD);
+
     #[test]
     fn encoded_size_correct_standard() {
-        assert_encoded_length(0, 0, STANDARD);
+        assert_encoded_length(0, 0, &STANDARD, true);
 
-        assert_encoded_length(1, 4, STANDARD);
-        assert_encoded_length(2, 4, STANDARD);
-        assert_encoded_length(3, 4, STANDARD);
+        assert_encoded_length(1, 4, &STANDARD, true);
+        assert_encoded_length(2, 4, &STANDARD, true);
+        assert_encoded_length(3, 4, &STANDARD, true);
 
-        assert_encoded_length(4, 8, STANDARD);
-        assert_encoded_length(5, 8, STANDARD);
-        assert_encoded_length(6, 8, STANDARD);
+        assert_encoded_length(4, 8, &STANDARD, true);
+        assert_encoded_length(5, 8, &STANDARD, true);
+        assert_encoded_length(6, 8, &STANDARD, true);
 
-        assert_encoded_length(7, 12, STANDARD);
-        assert_encoded_length(8, 12, STANDARD);
-        assert_encoded_length(9, 12, STANDARD);
+        assert_encoded_length(7, 12, &STANDARD, true);
+        assert_encoded_length(8, 12, &STANDARD, true);
+        assert_encoded_length(9, 12, &STANDARD, true);
 
-        assert_encoded_length(54, 72, STANDARD);
+        assert_encoded_length(54, 72, &STANDARD, true);
 
-        assert_encoded_length(55, 76, STANDARD);
-        assert_encoded_length(56, 76, STANDARD);
-        assert_encoded_length(57, 76, STANDARD);
+        assert_encoded_length(55, 76, &STANDARD, true);
+        assert_encoded_length(56, 76, &STANDARD, true);
+        assert_encoded_length(57, 76, &STANDARD, true);
 
-        assert_encoded_length(58, 80, STANDARD);
+        assert_encoded_length(58, 80, &STANDARD, true);
     }
 
     #[test]
     fn encoded_size_correct_no_pad() {
-        assert_encoded_length(0, 0, URL_SAFE_NO_PAD);
+        assert_encoded_length(0, 0, &URL_SAFE_NO_PAD_ENGINE, false);
 
-        assert_encoded_length(1, 2, URL_SAFE_NO_PAD);
-        assert_encoded_length(2, 3, URL_SAFE_NO_PAD);
-        assert_encoded_length(3, 4, URL_SAFE_NO_PAD);
+        assert_encoded_length(1, 2, &URL_SAFE_NO_PAD_ENGINE, false);
+        assert_encoded_length(2, 3, &URL_SAFE_NO_PAD_ENGINE, false);
+        assert_encoded_length(3, 4, &URL_SAFE_NO_PAD_ENGINE, false);
 
-        assert_encoded_length(4, 6, URL_SAFE_NO_PAD);
-        assert_encoded_length(5, 7, URL_SAFE_NO_PAD);
-        assert_encoded_length(6, 8, URL_SAFE_NO_PAD);
+        assert_encoded_length(4, 6, &URL_SAFE_NO_PAD_ENGINE, false);
+        assert_encoded_length(5, 7, &URL_SAFE_NO_PAD_ENGINE, false);
+        assert_encoded_length(6, 8, &URL_SAFE_NO_PAD_ENGINE, false);
 
-        assert_encoded_length(7, 10, URL_SAFE_NO_PAD);
-        assert_encoded_length(8, 11, URL_SAFE_NO_PAD);
-        assert_encoded_length(9, 12, URL_SAFE_NO_PAD);
+        assert_encoded_length(7, 10, &URL_SAFE_NO_PAD_ENGINE, false);
+        assert_encoded_length(8, 11, &URL_SAFE_NO_PAD_ENGINE, false);
+        assert_encoded_length(9, 12, &URL_SAFE_NO_PAD_ENGINE, false);
 
-        assert_encoded_length(54, 72, URL_SAFE_NO_PAD);
+        assert_encoded_length(54, 72, &URL_SAFE_NO_PAD_ENGINE, false);
 
-        assert_encoded_length(55, 74, URL_SAFE_NO_PAD);
-        assert_encoded_length(56, 75, URL_SAFE_NO_PAD);
-        assert_encoded_length(57, 76, URL_SAFE_NO_PAD);
+        assert_encoded_length(55, 74, &URL_SAFE_NO_PAD_ENGINE, false);
+        assert_encoded_length(56, 75, &URL_SAFE_NO_PAD_ENGINE, false);
+        assert_encoded_length(57, 76, &URL_SAFE_NO_PAD_ENGINE, false);
 
-        assert_encoded_length(58, 78, URL_SAFE_NO_PAD);
+        assert_encoded_length(58, 78, &URL_SAFE_NO_PAD_ENGINE, false);
     }
 
     #[test]
     fn encoded_size_overflow() {
-        assert_eq!(None, encoded_size(std::usize::MAX, STANDARD));
+        assert_eq!(None, encoded_len(usize::MAX, true));
     }
 
     #[test]
-    fn encode_config_buf_into_nonempty_buffer_doesnt_clobber_prefix() {
+    fn encode_engine_string_into_nonempty_buffer_doesnt_clobber_prefix() {
         let mut orig_data = Vec::new();
         let mut prefix = String::new();
         let mut encoded_data_no_prefix = String::new();
@@ -424,29 +261,39 @@ mod tests {
             }
             encoded_data_with_prefix.push_str(&prefix);
 
-            let config = random_config(&mut rng);
-            encode_config_buf(&orig_data, config, &mut encoded_data_no_prefix);
-            encode_config_buf(&orig_data, config, &mut encoded_data_with_prefix);
+            let engine = random_engine(&mut rng);
+            engine.encode_string(&orig_data, &mut encoded_data_no_prefix);
+            engine.encode_string(&orig_data, &mut encoded_data_with_prefix);
 
             assert_eq!(
                 encoded_data_no_prefix.len() + prefix_len,
                 encoded_data_with_prefix.len()
             );
-            assert_encode_sanity(&encoded_data_no_prefix, config, input_len);
-            assert_encode_sanity(&encoded_data_with_prefix[prefix_len..], config, input_len);
+            assert_encode_sanity(
+                &encoded_data_no_prefix,
+                engine.config().encode_padding(),
+                input_len,
+            );
+            assert_encode_sanity(
+                &encoded_data_with_prefix[prefix_len..],
+                engine.config().encode_padding(),
+                input_len,
+            );
 
             // append plain encode onto prefix
-            prefix.push_str(&mut encoded_data_no_prefix);
+            prefix.push_str(&encoded_data_no_prefix);
 
             assert_eq!(prefix, encoded_data_with_prefix);
 
-            decode_config_buf(&encoded_data_no_prefix, config, &mut decoded).unwrap();
+            engine
+                .decode_vec(&encoded_data_no_prefix, &mut decoded)
+                .unwrap();
             assert_eq!(orig_data, decoded);
         }
     }
 
     #[test]
-    fn encode_config_slice_into_nonempty_buffer_doesnt_clobber_suffix() {
+    fn encode_engine_slice_into_nonempty_buffer_doesnt_clobber_suffix() {
         let mut orig_data = Vec::new();
         let mut encoded_data = Vec::new();
         let mut encoded_data_original_state = Vec::new();
@@ -475,18 +322,18 @@ mod tests {
 
             encoded_data_original_state.extend_from_slice(&encoded_data);
 
-            let config = random_config(&mut rng);
+            let engine = random_engine(&mut rng);
 
-            let encoded_size = encoded_size(input_len, config).unwrap();
+            let encoded_size = encoded_len(input_len, engine.config().encode_padding()).unwrap();
 
             assert_eq!(
                 encoded_size,
-                encode_config_slice(&orig_data, config, &mut encoded_data)
+                engine.encode_slice(&orig_data, &mut encoded_data).unwrap()
             );
 
             assert_encode_sanity(
-                std::str::from_utf8(&encoded_data[0..encoded_size]).unwrap(),
-                config,
+                str::from_utf8(&encoded_data[0..encoded_size]).unwrap(),
+                engine.config().encode_padding(),
                 input_len,
             );
 
@@ -495,50 +342,9 @@ mod tests {
                 &encoded_data_original_state[encoded_size..]
             );
 
-            decode_config_buf(&encoded_data[0..encoded_size], config, &mut decoded).unwrap();
-            assert_eq!(orig_data, decoded);
-        }
-    }
-
-    #[test]
-    fn encode_config_slice_fits_into_precisely_sized_slice() {
-        let mut orig_data = Vec::new();
-        let mut encoded_data = Vec::new();
-        let mut decoded = Vec::new();
-
-        let input_len_range = Uniform::new(0, 1000);
-
-        let mut rng = rand::rngs::SmallRng::from_entropy();
-
-        for _ in 0..10_000 {
-            orig_data.clear();
-            encoded_data.clear();
-            decoded.clear();
-
-            let input_len = input_len_range.sample(&mut rng);
-
-            for _ in 0..input_len {
-                orig_data.push(rng.gen());
-            }
-
-            let config = random_config(&mut rng);
-
-            let encoded_size = encoded_size(input_len, config).unwrap();
-
-            encoded_data.resize(encoded_size, 0);
-
-            assert_eq!(
-                encoded_size,
-                encode_config_slice(&orig_data, config, &mut encoded_data)
-            );
-
-            assert_encode_sanity(
-                std::str::from_utf8(&encoded_data[0..encoded_size]).unwrap(),
-                config,
-                input_len,
-            );
-
-            decode_config_buf(&encoded_data[0..encoded_size], config, &mut decoded).unwrap();
+            engine
+                .decode_vec(&encoded_data[0..encoded_size], &mut decoded)
+                .unwrap();
             assert_eq!(orig_data, decoded);
         }
     }
@@ -563,17 +369,17 @@ mod tests {
             }
 
             let config = random_config(&mut rng);
+            let engine = random_engine(&mut rng);
 
             // fill up the output buffer with garbage
-            let encoded_size = encoded_size(input_len, config).unwrap();
+            let encoded_size = encoded_len(input_len, config.encode_padding()).unwrap();
             for _ in 0..encoded_size {
                 output.push(rng.gen());
             }
 
-            let orig_output_buf = output.to_vec();
+            let orig_output_buf = output.clone();
 
-            let bytes_written =
-                encode_to_slice(&input, &mut output, config.char_set.encode_table());
+            let bytes_written = engine.internal_encode(&input, &mut output);
 
             // make sure the part beyond bytes_written is the same garbage it was before
             assert_eq!(orig_output_buf[bytes_written..], output[bytes_written..]);
@@ -602,17 +408,17 @@ mod tests {
                 input.push(rng.gen());
             }
 
-            let config = random_config(&mut rng);
+            let engine = random_engine(&mut rng);
 
             // fill up the output buffer with garbage
-            let encoded_size = encoded_size(input_len, config).unwrap();
+            let encoded_size = encoded_len(input_len, engine.config().encode_padding()).unwrap();
             for _ in 0..encoded_size + 1000 {
                 output.push(rng.gen());
             }
 
-            let orig_output_buf = output.to_vec();
+            let orig_output_buf = output.clone();
 
-            encode_with_padding(&input, config, encoded_size, &mut output[0..encoded_size]);
+            encode_with_padding(&input, &mut output[0..encoded_size], &engine, encoded_size);
 
             // make sure the part beyond b64 is the same garbage it was before
             assert_eq!(orig_output_buf[encoded_size..], output[encoded_size..]);
@@ -637,7 +443,7 @@ mod tests {
                 output.push(rng.gen());
             }
 
-            let orig_output_buf = output.to_vec();
+            let orig_output_buf = output.clone();
 
             let bytes_written = add_padding(input_len, &mut output);
 
@@ -649,8 +455,13 @@ mod tests {
         }
     }
 
-    fn assert_encoded_length(input_len: usize, encoded_len: usize, config: Config) {
-        assert_eq!(encoded_len, encoded_size(input_len, config).unwrap());
+    fn assert_encoded_length<E: Engine>(
+        input_len: usize,
+        enc_len: usize,
+        engine: &E,
+        padded: bool,
+    ) {
+        assert_eq!(enc_len, encoded_len(input_len, padded).unwrap());
 
         let mut bytes: Vec<u8> = Vec::new();
         let mut rng = rand::rngs::SmallRng::from_entropy();
@@ -659,17 +470,19 @@ mod tests {
             bytes.push(rng.gen());
         }
 
-        let encoded = encode_config(&bytes, config);
-        assert_encode_sanity(&encoded, config, input_len);
+        let encoded = engine.encode(&bytes);
+        assert_encode_sanity(&encoded, padded, input_len);
 
-        assert_eq!(encoded_len, encoded.len());
+        assert_eq!(enc_len, encoded.len());
     }
 
     #[test]
     fn encode_imap() {
         assert_eq!(
-            encode_config(b"\xFB\xFF", crate::IMAP_MUTF7),
-            encode_config(b"\xFB\xFF", crate::STANDARD_NO_PAD).replace("/", ",")
+            &GeneralPurpose::new(&alphabet::IMAP_MUTF7, NO_PAD).encode(b"\xFB\xFF"),
+            &GeneralPurpose::new(&alphabet::STANDARD, NO_PAD)
+                .encode(b"\xFB\xFF")
+                .replace('/', ",")
         );
     }
 }

+ 348 - 0
zeroidc/vendor/base64/src/engine/general_purpose/decode.rs

@@ -0,0 +1,348 @@
+use crate::{
+    engine::{general_purpose::INVALID_VALUE, DecodeEstimate, DecodePaddingMode},
+    DecodeError, PAD_BYTE,
+};
+
+// decode logic operates on chunks of 8 input bytes without padding
+const INPUT_CHUNK_LEN: usize = 8;
+const DECODED_CHUNK_LEN: usize = 6;
+
+// we read a u64 and write a u64, but a u64 of input only yields 6 bytes of output, so the last
+// 2 bytes of any output u64 should not be counted as written to (but must be available in a
+// slice).
+const DECODED_CHUNK_SUFFIX: usize = 2;
+
+// how many u64's of input to handle at a time
+const CHUNKS_PER_FAST_LOOP_BLOCK: usize = 4;
+
+const INPUT_BLOCK_LEN: usize = CHUNKS_PER_FAST_LOOP_BLOCK * INPUT_CHUNK_LEN;
+
+// includes the trailing 2 bytes for the final u64 write
+const DECODED_BLOCK_LEN: usize =
+    CHUNKS_PER_FAST_LOOP_BLOCK * DECODED_CHUNK_LEN + DECODED_CHUNK_SUFFIX;
+
+#[doc(hidden)]
+pub struct GeneralPurposeEstimate {
+    /// Total number of decode chunks, including a possibly partial last chunk
+    num_chunks: usize,
+    decoded_len_estimate: usize,
+}
+
+impl GeneralPurposeEstimate {
+    pub(crate) fn new(encoded_len: usize) -> Self {
+        Self {
+            num_chunks: encoded_len
+                .checked_add(INPUT_CHUNK_LEN - 1)
+                .expect("Overflow when calculating number of chunks in input")
+                / INPUT_CHUNK_LEN,
+            decoded_len_estimate: encoded_len
+                .checked_add(3)
+                .expect("Overflow when calculating decoded len estimate")
+                / 4
+                * 3,
+        }
+    }
+}
+
+impl DecodeEstimate for GeneralPurposeEstimate {
+    fn decoded_len_estimate(&self) -> usize {
+        self.decoded_len_estimate
+    }
+}
+
+/// Helper to avoid duplicating num_chunks calculation, which is costly on short inputs.
+/// Returns the number of bytes written, or an error.
+// We're on the fragile edge of compiler heuristics here. If this is not inlined, slow. If this is
+// inlined(always), a different slow. plain ol' inline makes the benchmarks happiest at the moment,
+// but this is fragile and the best setting changes with only minor code modifications.
+#[inline]
+pub(crate) fn decode_helper(
+    input: &[u8],
+    estimate: GeneralPurposeEstimate,
+    output: &mut [u8],
+    decode_table: &[u8; 256],
+    decode_allow_trailing_bits: bool,
+    padding_mode: DecodePaddingMode,
+) -> Result<usize, DecodeError> {
+    let remainder_len = input.len() % INPUT_CHUNK_LEN;
+
+    // Because the fast decode loop writes in groups of 8 bytes (unrolled to
+    // CHUNKS_PER_FAST_LOOP_BLOCK times 8 bytes, where possible) and outputs 8 bytes at a time (of
+    // which only 6 are valid data), we need to be sure that we stop using the fast decode loop
+    // soon enough that there will always be 2 more bytes of valid data written after that loop.
+    let trailing_bytes_to_skip = match remainder_len {
+        // if input is a multiple of the chunk size, ignore the last chunk as it may have padding,
+        // and the fast decode logic cannot handle padding
+        0 => INPUT_CHUNK_LEN,
+        // 1 and 5 trailing bytes are illegal: can't decode 6 bits of input into a byte
+        1 | 5 => {
+            // trailing whitespace is so common that it's worth it to check the last byte to
+            // possibly return a better error message
+            if let Some(b) = input.last() {
+                if *b != PAD_BYTE && decode_table[*b as usize] == INVALID_VALUE {
+                    return Err(DecodeError::InvalidByte(input.len() - 1, *b));
+                }
+            }
+
+            return Err(DecodeError::InvalidLength);
+        }
+        // This will decode to one output byte, which isn't enough to overwrite the 2 extra bytes
+        // written by the fast decode loop. So, we have to ignore both these 2 bytes and the
+        // previous chunk.
+        2 => INPUT_CHUNK_LEN + 2,
+        // If this is 3 un-padded chars, then it would actually decode to 2 bytes. However, if this
+        // is an erroneous 2 chars + 1 pad char that would decode to 1 byte, then it should fail
+        // with an error, not panic from going past the bounds of the output slice, so we let it
+        // use stage 3 + 4.
+        3 => INPUT_CHUNK_LEN + 3,
+        // This can also decode to one output byte because it may be 2 input chars + 2 padding
+        // chars, which would decode to 1 byte.
+        4 => INPUT_CHUNK_LEN + 4,
+        // Everything else is a legal decode len (given that we don't require padding), and will
+        // decode to at least 2 bytes of output.
+        _ => remainder_len,
+    };
+
+    // rounded up to include partial chunks
+    let mut remaining_chunks = estimate.num_chunks;
+
+    let mut input_index = 0;
+    let mut output_index = 0;
+
+    {
+        let length_of_fast_decode_chunks = input.len().saturating_sub(trailing_bytes_to_skip);
+
+        // Fast loop, stage 1
+        // manual unroll to CHUNKS_PER_FAST_LOOP_BLOCK of u64s to amortize slice bounds checks
+        if let Some(max_start_index) = length_of_fast_decode_chunks.checked_sub(INPUT_BLOCK_LEN) {
+            while input_index <= max_start_index {
+                let input_slice = &input[input_index..(input_index + INPUT_BLOCK_LEN)];
+                let output_slice = &mut output[output_index..(output_index + DECODED_BLOCK_LEN)];
+
+                decode_chunk(
+                    &input_slice[0..],
+                    input_index,
+                    decode_table,
+                    &mut output_slice[0..],
+                )?;
+                decode_chunk(
+                    &input_slice[8..],
+                    input_index + 8,
+                    decode_table,
+                    &mut output_slice[6..],
+                )?;
+                decode_chunk(
+                    &input_slice[16..],
+                    input_index + 16,
+                    decode_table,
+                    &mut output_slice[12..],
+                )?;
+                decode_chunk(
+                    &input_slice[24..],
+                    input_index + 24,
+                    decode_table,
+                    &mut output_slice[18..],
+                )?;
+
+                input_index += INPUT_BLOCK_LEN;
+                output_index += DECODED_BLOCK_LEN - DECODED_CHUNK_SUFFIX;
+                remaining_chunks -= CHUNKS_PER_FAST_LOOP_BLOCK;
+            }
+        }
+
+        // Fast loop, stage 2 (aka still pretty fast loop)
+        // 8 bytes at a time for whatever we didn't do in stage 1.
+        if let Some(max_start_index) = length_of_fast_decode_chunks.checked_sub(INPUT_CHUNK_LEN) {
+            while input_index < max_start_index {
+                decode_chunk(
+                    &input[input_index..(input_index + INPUT_CHUNK_LEN)],
+                    input_index,
+                    decode_table,
+                    &mut output
+                        [output_index..(output_index + DECODED_CHUNK_LEN + DECODED_CHUNK_SUFFIX)],
+                )?;
+
+                output_index += DECODED_CHUNK_LEN;
+                input_index += INPUT_CHUNK_LEN;
+                remaining_chunks -= 1;
+            }
+        }
+    }
+
+    // Stage 3
+    // If input length was such that a chunk had to be deferred until after the fast loop
+    // because decoding it would have produced 2 trailing bytes that wouldn't then be
+    // overwritten, we decode that chunk here. This way is slower but doesn't write the 2
+    // trailing bytes.
+    // However, we still need to avoid the last chunk (partial or complete) because it could
+    // have padding, so we always do 1 fewer to avoid the last chunk.
+    for _ in 1..remaining_chunks {
+        decode_chunk_precise(
+            &input[input_index..],
+            input_index,
+            decode_table,
+            &mut output[output_index..(output_index + DECODED_CHUNK_LEN)],
+        )?;
+
+        input_index += INPUT_CHUNK_LEN;
+        output_index += DECODED_CHUNK_LEN;
+    }
+
+    // always have one more (possibly partial) block of 8 input
+    debug_assert!(input.len() - input_index > 1 || input.is_empty());
+    debug_assert!(input.len() - input_index <= 8);
+
+    super::decode_suffix::decode_suffix(
+        input,
+        input_index,
+        output,
+        output_index,
+        decode_table,
+        decode_allow_trailing_bits,
+        padding_mode,
+    )
+}
+
+/// Decode 8 bytes of input into 6 bytes of output. 8 bytes of output will be written, but only the
+/// first 6 of those contain meaningful data.
+///
+/// `input` is the bytes to decode, of which the first 8 bytes will be processed.
+/// `index_at_start_of_input` is the offset in the overall input (used for reporting errors
+/// accurately)
+/// `decode_table` is the lookup table for the particular base64 alphabet.
+/// `output` will have its first 8 bytes overwritten, of which only the first 6 are valid decoded
+/// data.
+// yes, really inline (worth 30-50% speedup)
+#[inline(always)]
+fn decode_chunk(
+    input: &[u8],
+    index_at_start_of_input: usize,
+    decode_table: &[u8; 256],
+    output: &mut [u8],
+) -> Result<(), DecodeError> {
+    let morsel = decode_table[input[0] as usize];
+    if morsel == INVALID_VALUE {
+        return Err(DecodeError::InvalidByte(index_at_start_of_input, input[0]));
+    }
+    let mut accum = (morsel as u64) << 58;
+
+    let morsel = decode_table[input[1] as usize];
+    if morsel == INVALID_VALUE {
+        return Err(DecodeError::InvalidByte(
+            index_at_start_of_input + 1,
+            input[1],
+        ));
+    }
+    accum |= (morsel as u64) << 52;
+
+    let morsel = decode_table[input[2] as usize];
+    if morsel == INVALID_VALUE {
+        return Err(DecodeError::InvalidByte(
+            index_at_start_of_input + 2,
+            input[2],
+        ));
+    }
+    accum |= (morsel as u64) << 46;
+
+    let morsel = decode_table[input[3] as usize];
+    if morsel == INVALID_VALUE {
+        return Err(DecodeError::InvalidByte(
+            index_at_start_of_input + 3,
+            input[3],
+        ));
+    }
+    accum |= (morsel as u64) << 40;
+
+    let morsel = decode_table[input[4] as usize];
+    if morsel == INVALID_VALUE {
+        return Err(DecodeError::InvalidByte(
+            index_at_start_of_input + 4,
+            input[4],
+        ));
+    }
+    accum |= (morsel as u64) << 34;
+
+    let morsel = decode_table[input[5] as usize];
+    if morsel == INVALID_VALUE {
+        return Err(DecodeError::InvalidByte(
+            index_at_start_of_input + 5,
+            input[5],
+        ));
+    }
+    accum |= (morsel as u64) << 28;
+
+    let morsel = decode_table[input[6] as usize];
+    if morsel == INVALID_VALUE {
+        return Err(DecodeError::InvalidByte(
+            index_at_start_of_input + 6,
+            input[6],
+        ));
+    }
+    accum |= (morsel as u64) << 22;
+
+    let morsel = decode_table[input[7] as usize];
+    if morsel == INVALID_VALUE {
+        return Err(DecodeError::InvalidByte(
+            index_at_start_of_input + 7,
+            input[7],
+        ));
+    }
+    accum |= (morsel as u64) << 16;
+
+    write_u64(output, accum);
+
+    Ok(())
+}
+
+/// Decode an 8-byte chunk, but only write the 6 bytes actually decoded instead of including 2
+/// trailing garbage bytes.
+#[inline]
+fn decode_chunk_precise(
+    input: &[u8],
+    index_at_start_of_input: usize,
+    decode_table: &[u8; 256],
+    output: &mut [u8],
+) -> Result<(), DecodeError> {
+    let mut tmp_buf = [0_u8; 8];
+
+    decode_chunk(
+        input,
+        index_at_start_of_input,
+        decode_table,
+        &mut tmp_buf[..],
+    )?;
+
+    output[0..6].copy_from_slice(&tmp_buf[0..6]);
+
+    Ok(())
+}
+
+#[inline]
+fn write_u64(output: &mut [u8], value: u64) {
+    output[..8].copy_from_slice(&value.to_be_bytes());
+}
+
+#[cfg(test)]
+mod tests {
+    use super::*;
+
+    use crate::engine::general_purpose::STANDARD;
+
+    #[test]
+    fn decode_chunk_precise_writes_only_6_bytes() {
+        let input = b"Zm9vYmFy"; // "foobar"
+        let mut output = [0_u8, 1, 2, 3, 4, 5, 6, 7];
+
+        decode_chunk_precise(&input[..], 0, &STANDARD.decode_table, &mut output).unwrap();
+        assert_eq!(&vec![b'f', b'o', b'o', b'b', b'a', b'r', 6, 7], &output);
+    }
+
+    #[test]
+    fn decode_chunk_writes_8_bytes() {
+        let input = b"Zm9vYmFy"; // "foobar"
+        let mut output = [0_u8, 1, 2, 3, 4, 5, 6, 7];
+
+        decode_chunk(&input[..], 0, &STANDARD.decode_table, &mut output).unwrap();
+        assert_eq!(&vec![b'f', b'o', b'o', b'b', b'a', b'r', 0, 0], &output);
+    }
+}

+ 161 - 0
zeroidc/vendor/base64/src/engine/general_purpose/decode_suffix.rs

@@ -0,0 +1,161 @@
+use crate::{
+    engine::{general_purpose::INVALID_VALUE, DecodePaddingMode},
+    DecodeError, PAD_BYTE,
+};
+
+/// Decode the last 1-8 bytes, checking for trailing set bits and padding per the provided
+/// parameters.
+///
+/// Returns the total number of bytes decoded, including the ones indicated as already written by
+/// `output_index`.
+pub(crate) fn decode_suffix(
+    input: &[u8],
+    input_index: usize,
+    output: &mut [u8],
+    mut output_index: usize,
+    decode_table: &[u8; 256],
+    decode_allow_trailing_bits: bool,
+    padding_mode: DecodePaddingMode,
+) -> Result<usize, DecodeError> {
+    // Decode any leftovers that aren't a complete input block of 8 bytes.
+    // Use a u64 as a stack-resident 8 byte buffer.
+    let mut leftover_bits: u64 = 0;
+    let mut morsels_in_leftover = 0;
+    let mut padding_bytes = 0;
+    let mut first_padding_index: usize = 0;
+    let mut last_symbol = 0_u8;
+    let start_of_leftovers = input_index;
+
+    for (i, &b) in input[start_of_leftovers..].iter().enumerate() {
+        // '=' padding
+        if b == PAD_BYTE {
+            // There can be bad padding bytes in a few ways:
+            // 1 - Padding with non-padding characters after it
+            // 2 - Padding after zero or one characters in the current quad (should only
+            //     be after 2 or 3 chars)
+            // 3 - More than two characters of padding. If 3 or 4 padding chars
+            //     are in the same quad, that implies it will be caught by #2.
+            //     If it spreads from one quad to another, it will be an invalid byte
+            //     in the first quad.
+            // 4 - Non-canonical padding -- 1 byte when it should be 2, etc.
+            //     Per config, non-canonical but still functional non- or partially-padded base64
+            //     may be treated as an error condition.
+
+            if i % 4 < 2 {
+                // Check for case #2.
+                let bad_padding_index = start_of_leftovers
+                    + if padding_bytes > 0 {
+                        // If we've already seen padding, report the first padding index.
+                        // This is to be consistent with the normal decode logic: it will report an
+                        // error on the first padding character (since it doesn't expect to see
+                        // anything but actual encoded data).
+                        // This could only happen if the padding started in the previous quad since
+                        // otherwise this case would have been hit at i % 4 == 0 if it was the same
+                        // quad.
+                        first_padding_index
+                    } else {
+                        // haven't seen padding before, just use where we are now
+                        i
+                    };
+                return Err(DecodeError::InvalidByte(bad_padding_index, b));
+            }
+
+            if padding_bytes == 0 {
+                first_padding_index = i;
+            }
+
+            padding_bytes += 1;
+            continue;
+        }
+
+        // Check for case #1.
+        // To make '=' handling consistent with the main loop, don't allow
+        // non-suffix '=' in trailing chunk either. Report error as first
+        // erroneous padding.
+        if padding_bytes > 0 {
+            return Err(DecodeError::InvalidByte(
+                start_of_leftovers + first_padding_index,
+                PAD_BYTE,
+            ));
+        }
+
+        last_symbol = b;
+
+        // can use up to 8 * 6 = 48 bits of the u64, if last chunk has no padding.
+        // Pack the leftovers from left to right.
+        let shift = 64 - (morsels_in_leftover + 1) * 6;
+        let morsel = decode_table[b as usize];
+        if morsel == INVALID_VALUE {
+            return Err(DecodeError::InvalidByte(start_of_leftovers + i, b));
+        }
+
+        leftover_bits |= (morsel as u64) << shift;
+        morsels_in_leftover += 1;
+    }
+
+    match padding_mode {
+        DecodePaddingMode::Indifferent => { /* everything we care about was already checked */ }
+        DecodePaddingMode::RequireCanonical => {
+            if (padding_bytes + morsels_in_leftover) % 4 != 0 {
+                return Err(DecodeError::InvalidPadding);
+            }
+        }
+        DecodePaddingMode::RequireNone => {
+            if padding_bytes > 0 {
+                // check at the end to make sure we let the cases of padding that should be InvalidByte
+                // get hit
+                return Err(DecodeError::InvalidPadding);
+            }
+        }
+    }
+
+    // When encoding 1 trailing byte (e.g. 0xFF), 2 base64 bytes ("/w") are needed.
+    // / is the symbol for 63 (0x3F, bottom 6 bits all set) and w is 48 (0x30, top 2 bits
+    // of bottom 6 bits set).
+    // When decoding two symbols back to one trailing byte, any final symbol higher than
+    // w would still decode to the original byte because we only care about the top two
+    // bits in the bottom 6, but would be a non-canonical encoding. So, we calculate a
+    // mask based on how many bits are used for just the canonical encoding, and optionally
+    // error if any other bits are set. In the example of one encoded byte -> 2 symbols,
+    // 2 symbols can technically encode 12 bits, but the last 4 are non canonical, and
+    // useless since there are no more symbols to provide the necessary 4 additional bits
+    // to finish the second original byte.
+
+    let leftover_bits_ready_to_append = match morsels_in_leftover {
+        0 => 0,
+        2 => 8,
+        3 => 16,
+        4 => 24,
+        6 => 32,
+        7 => 40,
+        8 => 48,
+        // can also be detected as case #2 bad padding above
+        _ => unreachable!(
+            "Impossible: must only have 0 to 8 input bytes in last chunk, with no invalid lengths"
+        ),
+    };
+
+    // if there are bits set outside the bits we care about, last symbol encodes trailing bits that
+    // will not be included in the output
+    let mask = !0 >> leftover_bits_ready_to_append;
+    if !decode_allow_trailing_bits && (leftover_bits & mask) != 0 {
+        // last morsel is at `morsels_in_leftover` - 1
+        return Err(DecodeError::InvalidLastSymbol(
+            start_of_leftovers + morsels_in_leftover - 1,
+            last_symbol,
+        ));
+    }
+
+    // TODO benchmark simply converting to big endian bytes
+    let mut leftover_bits_appended_to_buf = 0;
+    while leftover_bits_appended_to_buf < leftover_bits_ready_to_append {
+        // `as` simply truncates the higher bits, which is what we want here
+        let selected_bits = (leftover_bits >> (56 - leftover_bits_appended_to_buf)) as u8;
+        output[output_index] = selected_bits;
+        output_index += 1;
+
+        leftover_bits_appended_to_buf += 8;
+    }
+
+    Ok(output_index)
+}

+ 349 - 0
zeroidc/vendor/base64/src/engine/general_purpose/mod.rs

@@ -0,0 +1,349 @@
+//! Provides the [GeneralPurpose] engine and associated config types.
+use crate::{
+    alphabet,
+    alphabet::Alphabet,
+    engine::{Config, DecodePaddingMode},
+    DecodeError,
+};
+use core::convert::TryInto;
+
+mod decode;
+pub(crate) mod decode_suffix;
+pub use decode::GeneralPurposeEstimate;
+
+pub(crate) const INVALID_VALUE: u8 = 255;
+
+/// A general-purpose base64 engine.
+///
+/// - It uses no vector CPU instructions, so it will work on any system.
+/// - It is reasonably fast (~2-3GiB/s).
+/// - It is not constant-time, though, so it is vulnerable to timing side-channel attacks. For loading cryptographic keys, etc, it is suggested to use the forthcoming constant-time implementation.
+pub struct GeneralPurpose {
+    encode_table: [u8; 64],
+    decode_table: [u8; 256],
+    config: GeneralPurposeConfig,
+}
+
+impl GeneralPurpose {
+    /// Create a `GeneralPurpose` engine from an [Alphabet].
+    ///
+    /// While not very expensive to initialize, ideally these should be cached
+    /// if the engine will be used repeatedly.
+    pub const fn new(alphabet: &Alphabet, config: GeneralPurposeConfig) -> Self {
+        Self {
+            encode_table: encode_table(alphabet),
+            decode_table: decode_table(alphabet),
+            config,
+        }
+    }
+}
+
+impl super::Engine for GeneralPurpose {
+    type Config = GeneralPurposeConfig;
+    type DecodeEstimate = GeneralPurposeEstimate;
+
+    fn internal_encode(&self, input: &[u8], output: &mut [u8]) -> usize {
+        let mut input_index: usize = 0;
+
+        const BLOCKS_PER_FAST_LOOP: usize = 4;
+        const LOW_SIX_BITS: u64 = 0x3F;
+
+        // we read 8 bytes at a time (u64) but only actually consume 6 of those bytes. Thus, we need
+        // 2 trailing bytes to be available to read..
+        let last_fast_index = input.len().saturating_sub(BLOCKS_PER_FAST_LOOP * 6 + 2);
+        let mut output_index = 0;
+
+        if last_fast_index > 0 {
+            while input_index <= last_fast_index {
+                // Major performance wins from letting the optimizer do the bounds check once, mostly
+                // on the output side
+                let input_chunk =
+                    &input[input_index..(input_index + (BLOCKS_PER_FAST_LOOP * 6 + 2))];
+                let output_chunk =
+                    &mut output[output_index..(output_index + BLOCKS_PER_FAST_LOOP * 8)];
+
+                // Hand-unrolling for 32 vs 16 or 8 bytes produces yields performance about equivalent
+                // to unsafe pointer code on a Xeon E5-1650v3. 64 byte unrolling was slightly better for
+                // large inputs but significantly worse for 50-byte input, unsurprisingly. I suspect
+                // that it's a not uncommon use case to encode smallish chunks of data (e.g. a 64-byte
+                // SHA-512 digest), so it would be nice if that fit in the unrolled loop at least once.
+                // Plus, single-digit percentage performance differences might well be quite different
+                // on different hardware.
+
+                let input_u64 = read_u64(&input_chunk[0..]);
+
+                output_chunk[0] = self.encode_table[((input_u64 >> 58) & LOW_SIX_BITS) as usize];
+                output_chunk[1] = self.encode_table[((input_u64 >> 52) & LOW_SIX_BITS) as usize];
+                output_chunk[2] = self.encode_table[((input_u64 >> 46) & LOW_SIX_BITS) as usize];
+                output_chunk[3] = self.encode_table[((input_u64 >> 40) & LOW_SIX_BITS) as usize];
+                output_chunk[4] = self.encode_table[((input_u64 >> 34) & LOW_SIX_BITS) as usize];
+                output_chunk[5] = self.encode_table[((input_u64 >> 28) & LOW_SIX_BITS) as usize];
+                output_chunk[6] = self.encode_table[((input_u64 >> 22) & LOW_SIX_BITS) as usize];
+                output_chunk[7] = self.encode_table[((input_u64 >> 16) & LOW_SIX_BITS) as usize];
+
+                let input_u64 = read_u64(&input_chunk[6..]);
+
+                output_chunk[8] = self.encode_table[((input_u64 >> 58) & LOW_SIX_BITS) as usize];
+                output_chunk[9] = self.encode_table[((input_u64 >> 52) & LOW_SIX_BITS) as usize];
+                output_chunk[10] = self.encode_table[((input_u64 >> 46) & LOW_SIX_BITS) as usize];
+                output_chunk[11] = self.encode_table[((input_u64 >> 40) & LOW_SIX_BITS) as usize];
+                output_chunk[12] = self.encode_table[((input_u64 >> 34) & LOW_SIX_BITS) as usize];
+                output_chunk[13] = self.encode_table[((input_u64 >> 28) & LOW_SIX_BITS) as usize];
+                output_chunk[14] = self.encode_table[((input_u64 >> 22) & LOW_SIX_BITS) as usize];
+                output_chunk[15] = self.encode_table[((input_u64 >> 16) & LOW_SIX_BITS) as usize];
+
+                let input_u64 = read_u64(&input_chunk[12..]);
+
+                output_chunk[16] = self.encode_table[((input_u64 >> 58) & LOW_SIX_BITS) as usize];
+                output_chunk[17] = self.encode_table[((input_u64 >> 52) & LOW_SIX_BITS) as usize];
+                output_chunk[18] = self.encode_table[((input_u64 >> 46) & LOW_SIX_BITS) as usize];
+                output_chunk[19] = self.encode_table[((input_u64 >> 40) & LOW_SIX_BITS) as usize];
+                output_chunk[20] = self.encode_table[((input_u64 >> 34) & LOW_SIX_BITS) as usize];
+                output_chunk[21] = self.encode_table[((input_u64 >> 28) & LOW_SIX_BITS) as usize];
+                output_chunk[22] = self.encode_table[((input_u64 >> 22) & LOW_SIX_BITS) as usize];
+                output_chunk[23] = self.encode_table[((input_u64 >> 16) & LOW_SIX_BITS) as usize];
+
+                let input_u64 = read_u64(&input_chunk[18..]);
+
+                output_chunk[24] = self.encode_table[((input_u64 >> 58) & LOW_SIX_BITS) as usize];
+                output_chunk[25] = self.encode_table[((input_u64 >> 52) & LOW_SIX_BITS) as usize];
+                output_chunk[26] = self.encode_table[((input_u64 >> 46) & LOW_SIX_BITS) as usize];
+                output_chunk[27] = self.encode_table[((input_u64 >> 40) & LOW_SIX_BITS) as usize];
+                output_chunk[28] = self.encode_table[((input_u64 >> 34) & LOW_SIX_BITS) as usize];
+                output_chunk[29] = self.encode_table[((input_u64 >> 28) & LOW_SIX_BITS) as usize];
+                output_chunk[30] = self.encode_table[((input_u64 >> 22) & LOW_SIX_BITS) as usize];
+                output_chunk[31] = self.encode_table[((input_u64 >> 16) & LOW_SIX_BITS) as usize];
+
+                output_index += BLOCKS_PER_FAST_LOOP * 8;
+                input_index += BLOCKS_PER_FAST_LOOP * 6;
+            }
+        }
+
+        // Encode what's left after the fast loop.
+
+        const LOW_SIX_BITS_U8: u8 = 0x3F;
+
+        let rem = input.len() % 3;
+        let start_of_rem = input.len() - rem;
+
+        // start at the first index not handled by fast loop, which may be 0.
+
+        while input_index < start_of_rem {
+            let input_chunk = &input[input_index..(input_index + 3)];
+            let output_chunk = &mut output[output_index..(output_index + 4)];
+
+            output_chunk[0] = self.encode_table[(input_chunk[0] >> 2) as usize];
+            output_chunk[1] = self.encode_table
+                [((input_chunk[0] << 4 | input_chunk[1] >> 4) & LOW_SIX_BITS_U8) as usize];
+            output_chunk[2] = self.encode_table
+                [((input_chunk[1] << 2 | input_chunk[2] >> 6) & LOW_SIX_BITS_U8) as usize];
+            output_chunk[3] = self.encode_table[(input_chunk[2] & LOW_SIX_BITS_U8) as usize];
+
+            input_index += 3;
+            output_index += 4;
+        }
+
+        if rem == 2 {
+            output[output_index] = self.encode_table[(input[start_of_rem] >> 2) as usize];
+            output[output_index + 1] =
+                self.encode_table[((input[start_of_rem] << 4 | input[start_of_rem + 1] >> 4)
+                    & LOW_SIX_BITS_U8) as usize];
+            output[output_index + 2] =
+                self.encode_table[((input[start_of_rem + 1] << 2) & LOW_SIX_BITS_U8) as usize];
+            output_index += 3;
+        } else if rem == 1 {
+            output[output_index] = self.encode_table[(input[start_of_rem] >> 2) as usize];
+            output[output_index + 1] =
+                self.encode_table[((input[start_of_rem] << 4) & LOW_SIX_BITS_U8) as usize];
+            output_index += 2;
+        }
+
+        output_index
+    }
+
+    fn internal_decoded_len_estimate(&self, input_len: usize) -> Self::DecodeEstimate {
+        GeneralPurposeEstimate::new(input_len)
+    }
+
+    fn internal_decode(
+        &self,
+        input: &[u8],
+        output: &mut [u8],
+        estimate: Self::DecodeEstimate,
+    ) -> Result<usize, DecodeError> {
+        decode::decode_helper(
+            input,
+            estimate,
+            output,
+            &self.decode_table,
+            self.config.decode_allow_trailing_bits,
+            self.config.decode_padding_mode,
+        )
+    }
+
+    fn config(&self) -> &Self::Config {
+        &self.config
+    }
+}
+
+/// Returns a table mapping a 6-bit index to the ASCII byte encoding of the index
+pub(crate) const fn encode_table(alphabet: &Alphabet) -> [u8; 64] {
+    // the encode table is just the alphabet:
+    // 6-bit index lookup -> printable byte
+    let mut encode_table = [0_u8; 64];
+    {
+        let mut index = 0;
+        while index < 64 {
+            encode_table[index] = alphabet.symbols[index];
+            index += 1;
+        }
+    }
+
+    encode_table
+}
+
+/// Returns a table mapping base64 bytes as the lookup index to either:
+/// - [INVALID_VALUE] for bytes that aren't members of the alphabet
+/// - a byte whose lower 6 bits are the value that was encoded into the index byte
+pub(crate) const fn decode_table(alphabet: &Alphabet) -> [u8; 256] {
+    let mut decode_table = [INVALID_VALUE; 256];
+
+    // Since the table is full of `INVALID_VALUE` already, we only need to overwrite
+    // the parts that are valid.
+    let mut index = 0;
+    while index < 64 {
+        // The index in the alphabet is the 6-bit value we care about.
+        // Since the index is in 0-63, it is safe to cast to u8.
+        decode_table[alphabet.symbols[index] as usize] = index as u8;
+        index += 1;
+    }
+
+    decode_table
+}
+
+#[inline]
+fn read_u64(s: &[u8]) -> u64 {
+    u64::from_be_bytes(s[..8].try_into().unwrap())
+}
+
+/// Contains configuration parameters for base64 encoding and decoding.
+///
+/// ```
+/// # use base64::engine::GeneralPurposeConfig;
+/// let config = GeneralPurposeConfig::new()
+///     .with_encode_padding(false);
+///     // further customize using `.with_*` methods as needed
+/// ```
+///
+/// The constants [PAD] and [NO_PAD] cover most use cases.
+///
+/// To specify the characters used, see [Alphabet].
+#[derive(Clone, Copy, Debug)]
+pub struct GeneralPurposeConfig {
+    encode_padding: bool,
+    decode_allow_trailing_bits: bool,
+    decode_padding_mode: DecodePaddingMode,
+}
+
+impl GeneralPurposeConfig {
+    /// Create a new config with `padding` = `true`, `decode_allow_trailing_bits` = `false`, and
+    /// `decode_padding_mode = DecodePaddingMode::RequireCanonicalPadding`.
+    ///
+    /// This probably matches most people's expectations, but consider disabling padding to save
+    /// a few bytes unless you specifically need it for compatibility with some legacy system.
+    pub const fn new() -> Self {
+        Self {
+            // RFC states that padding must be applied by default
+            encode_padding: true,
+            decode_allow_trailing_bits: false,
+            decode_padding_mode: DecodePaddingMode::RequireCanonical,
+        }
+    }
+
+    /// Create a new config based on `self` with an updated `padding` setting.
+    ///
+    /// If `padding` is `true`, encoding will append either 1 or 2 `=` padding characters as needed
+    /// to produce an output whose length is a multiple of 4.
+    ///
+    /// Padding is not needed for correct decoding and only serves to waste bytes, but it's in the
+    /// [spec](https://datatracker.ietf.org/doc/html/rfc4648#section-3.2).
+    ///
+    /// For new applications, consider not using padding if the decoders you're using don't require
+    /// padding to be present.
+    pub const fn with_encode_padding(self, padding: bool) -> Self {
+        Self {
+            encode_padding: padding,
+            ..self
+        }
+    }
+
+    /// Create a new config based on `self` with an updated `decode_allow_trailing_bits` setting.
+    ///
+    /// Most users will not need to configure this. It's useful if you need to decode base64
+    /// produced by a buggy encoder that has bits set in the unused space on the last base64
+    /// character as per [forgiving-base64 decode](https://infra.spec.whatwg.org/#forgiving-base64-decode).
+    /// If invalid trailing bits are present and this is `true`, those bits will
+    /// be silently ignored, else `DecodeError::InvalidLastSymbol` will be emitted.
+    pub const fn with_decode_allow_trailing_bits(self, allow: bool) -> Self {
+        Self {
+            decode_allow_trailing_bits: allow,
+            ..self
+        }
+    }
+
+    /// Create a new config based on `self` with an updated `decode_padding_mode` setting.
+    ///
+    /// Padding is not useful in terms of representing encoded data -- it makes no difference to
+    /// the decoder if padding is present or not, so if you have some un-padded input to decode, it
+    /// is perfectly fine to use `DecodePaddingMode::Indifferent` to prevent errors from being
+    /// emitted.
+    ///
+    /// However, since in practice
+    /// [people who learned nothing from BER vs DER seem to expect base64 to have one canonical encoding](https://eprint.iacr.org/2022/361),
+    /// the default setting is the stricter `DecodePaddingMode::RequireCanonicalPadding`.
+    ///
+    /// Or, if "canonical" in your circumstance means _no_ padding rather than padding to the
+    /// next multiple of four, there's `DecodePaddingMode::RequireNoPadding`.
+    pub const fn with_decode_padding_mode(self, mode: DecodePaddingMode) -> Self {
+        Self {
+            decode_padding_mode: mode,
+            ..self
+        }
+    }
+}
+
+impl Default for GeneralPurposeConfig {
+    /// Delegates to [GeneralPurposeConfig::new].
+    fn default() -> Self {
+        Self::new()
+    }
+}
+
+impl Config for GeneralPurposeConfig {
+    fn encode_padding(&self) -> bool {
+        self.encode_padding
+    }
+}
+
+/// A [GeneralPurpose] engine using the [alphabet::STANDARD] base64 alphabet and [PAD] config.
+pub const STANDARD: GeneralPurpose = GeneralPurpose::new(&alphabet::STANDARD, PAD);
+
+/// A [GeneralPurpose] engine using the [alphabet::STANDARD] base64 alphabet and [NO_PAD] config.
+pub const STANDARD_NO_PAD: GeneralPurpose = GeneralPurpose::new(&alphabet::STANDARD, NO_PAD);
+
+/// A [GeneralPurpose] engine using the [alphabet::URL_SAFE] base64 alphabet and [PAD] config.
+pub const URL_SAFE: GeneralPurpose = GeneralPurpose::new(&alphabet::URL_SAFE, PAD);
+
+/// A [GeneralPurpose] engine using the [alphabet::URL_SAFE] base64 alphabet and [NO_PAD] config.
+pub const URL_SAFE_NO_PAD: GeneralPurpose = GeneralPurpose::new(&alphabet::URL_SAFE, NO_PAD);
+
+/// Include padding bytes when encoding, and require that they be present when decoding.
+///
+/// This is the standard per the base64 RFC, but consider using [NO_PAD] instead as padding serves
+/// little purpose in practice.
+pub const PAD: GeneralPurposeConfig = GeneralPurposeConfig::new();
+
+/// Don't add padding when encoding, and require no padding when decoding.
+pub const NO_PAD: GeneralPurposeConfig = GeneralPurposeConfig::new()
+    .with_encode_padding(false)
+    .with_decode_padding_mode(DecodePaddingMode::RequireNone);

+ 410 - 0
zeroidc/vendor/base64/src/engine/mod.rs

@@ -0,0 +1,410 @@
+//! Provides the [Engine] abstraction and out of the box implementations.
+#[cfg(any(feature = "alloc", feature = "std", test))]
+use crate::chunked_encoder;
+use crate::{
+    encode::{encode_with_padding, EncodeSliceError},
+    encoded_len, DecodeError, DecodeSliceError,
+};
+#[cfg(any(feature = "alloc", feature = "std", test))]
+use alloc::vec::Vec;
+
+#[cfg(any(feature = "alloc", feature = "std", test))]
+use alloc::{string::String, vec};
+
+pub mod general_purpose;
+
+#[cfg(test)]
+mod naive;
+
+#[cfg(test)]
+mod tests;
+
+pub use general_purpose::{GeneralPurpose, GeneralPurposeConfig};
+
+/// An `Engine` provides low-level encoding and decoding operations that all other higher-level parts of the API use. Users of the library will generally not need to implement this.
+///
+/// Different implementations offer different characteristics. The library currently ships with
+/// [GeneralPurpose] that offers good speed and works on any CPU, with more choices
+/// coming later, like a constant-time one when side channel resistance is called for, and vendor-specific vectorized ones for more speed.
+///
+/// See [general_purpose::STANDARD_NO_PAD] if you just want standard base64. Otherwise, when possible, it's
+/// recommended to store the engine in a `const` so that references to it won't pose any lifetime
+/// issues, and to avoid repeating the cost of engine setup.
+///
+/// Since almost nobody will need to implement `Engine`, docs for internal methods are hidden.
+// When adding an implementation of Engine, include them in the engine test suite:
+// - add an implementation of [engine::tests::EngineWrapper]
+// - add the implementation to the `all_engines` macro
+// All tests run on all engines listed in the macro.
+pub trait Engine: Send + Sync {
+    /// The config type used by this engine
+    type Config: Config;
+    /// The decode estimate used by this engine
+    type DecodeEstimate: DecodeEstimate;
+
+    /// This is not meant to be called directly; it is only for `Engine` implementors.
+    /// See the other `encode*` functions on this trait.
+    ///
+    /// Encode the `input` bytes into the `output` buffer based on the mapping in `encode_table`.
+    ///
+    /// `output` will be long enough to hold the encoded data.
+    ///
+    /// Returns the number of bytes written.
+    ///
+    /// No padding should be written; that is handled separately.
+    ///
+    /// Must not write any bytes into the output slice other than the encoded data.
+    #[doc(hidden)]
+    fn internal_encode(&self, input: &[u8], output: &mut [u8]) -> usize;
+
+    /// This is not meant to be called directly; it is only for `Engine` implementors.
+    ///
+    /// As an optimization to prevent the decoded length from being calculated twice, it is
+    /// sometimes helpful to have a conservative estimate of the decoded size before doing the
+    /// decoding, so this calculation is done separately and passed to [Engine::decode()] as needed.
+    ///
+    /// # Panics
+    ///
+    /// Panics if decoded length estimation overflows.
+    #[doc(hidden)]
+    fn internal_decoded_len_estimate(&self, input_len: usize) -> Self::DecodeEstimate;
+
+    /// This is not meant to be called directly; it is only for `Engine` implementors.
+    /// See the other `decode*` functions on this trait.
+    ///
+    /// Decode `input` base64 bytes into the `output` buffer.
+    ///
+    /// `decode_estimate` is the result of [Engine::internal_decoded_len_estimate()], which is passed in to avoid
+    /// calculating it again (expensive on short inputs).`
+    ///
+    /// Returns the number of bytes written to `output`.
+    ///
+    /// Each complete 4-byte chunk of encoded data decodes to 3 bytes of decoded data, but this
+    /// function must also handle the final possibly partial chunk.
+    /// If the input length is not a multiple of 4, or uses padding bytes to reach a multiple of 4,
+    /// the trailing 2 or 3 bytes must decode to 1 or 2 bytes, respectively, as per the
+    /// [RFC](https://tools.ietf.org/html/rfc4648#section-3.5).
+    ///
+    /// Decoding must not write any bytes into the output slice other than the decoded data.
+    ///
+    /// Non-canonical trailing bits in the final tokens or non-canonical padding must be reported as
+    /// errors unless the engine is configured otherwise.
+    ///
+    /// # Panics
+    ///
+    /// Panics if `output` is too small.
+    #[doc(hidden)]
+    fn internal_decode(
+        &self,
+        input: &[u8],
+        output: &mut [u8],
+        decode_estimate: Self::DecodeEstimate,
+    ) -> Result<usize, DecodeError>;
+
+    /// Returns the config for this engine.
+    fn config(&self) -> &Self::Config;
+
+    /// Encode arbitrary octets as base64 using the provided `Engine`.
+    /// Returns a `String`.
+    ///
+    /// # Example
+    ///
+    /// ```rust
+    /// use base64::{Engine as _, engine::{self, general_purpose}, alphabet};
+    ///
+    /// let b64 = general_purpose::STANDARD.encode(b"hello world~");
+    /// println!("{}", b64);
+    ///
+    /// const CUSTOM_ENGINE: engine::GeneralPurpose =
+    ///     engine::GeneralPurpose::new(&alphabet::URL_SAFE, general_purpose::NO_PAD);
+    ///
+    /// let b64_url = CUSTOM_ENGINE.encode(b"hello internet~");
+    #[cfg(any(feature = "alloc", feature = "std", test))]
+    fn encode<T: AsRef<[u8]>>(&self, input: T) -> String {
+        let encoded_size = encoded_len(input.as_ref().len(), self.config().encode_padding())
+            .expect("integer overflow when calculating buffer size");
+        let mut buf = vec![0; encoded_size];
+
+        encode_with_padding(input.as_ref(), &mut buf[..], self, encoded_size);
+
+        String::from_utf8(buf).expect("Invalid UTF8")
+    }
+
+    /// Encode arbitrary octets as base64 into a supplied `String`.
+    /// Writes into the supplied `String`, which may allocate if its internal buffer isn't big enough.
+    ///
+    /// # Example
+    ///
+    /// ```rust
+    /// use base64::{Engine as _, engine::{self, general_purpose}, alphabet};
+    /// const CUSTOM_ENGINE: engine::GeneralPurpose =
+    ///     engine::GeneralPurpose::new(&alphabet::URL_SAFE, general_purpose::NO_PAD);
+    ///
+    /// fn main() {
+    ///     let mut buf = String::new();
+    ///     general_purpose::STANDARD.encode_string(b"hello world~", &mut buf);
+    ///     println!("{}", buf);
+    ///
+    ///     buf.clear();
+    ///     CUSTOM_ENGINE.encode_string(b"hello internet~", &mut buf);
+    ///     println!("{}", buf);
+    /// }
+    /// ```
+    #[cfg(any(feature = "alloc", feature = "std", test))]
+    fn encode_string<T: AsRef<[u8]>>(&self, input: T, output_buf: &mut String) {
+        let input_bytes = input.as_ref();
+
+        {
+            let mut sink = chunked_encoder::StringSink::new(output_buf);
+
+            chunked_encoder::ChunkedEncoder::new(self)
+                .encode(input_bytes, &mut sink)
+                .expect("Writing to a String shouldn't fail");
+        }
+    }
+
+    /// Encode arbitrary octets as base64 into a supplied slice.
+    /// Writes into the supplied output buffer.
+    ///
+    /// This is useful if you wish to avoid allocation entirely (e.g. encoding into a stack-resident
+    /// or statically-allocated buffer).
+    ///
+    /// # Example
+    ///
+    /// ```rust
+    /// use base64::{Engine as _, engine::general_purpose};
+    /// let s = b"hello internet!";
+    /// let mut buf = Vec::new();
+    /// // make sure we'll have a slice big enough for base64 + padding
+    /// buf.resize(s.len() * 4 / 3 + 4, 0);
+    ///
+    /// let bytes_written = general_purpose::STANDARD.encode_slice(s, &mut buf).unwrap();
+    ///
+    /// // shorten our vec down to just what was written
+    /// buf.truncate(bytes_written);
+    ///
+    /// assert_eq!(s, general_purpose::STANDARD.decode(&buf).unwrap().as_slice());
+    /// ```
+    fn encode_slice<T: AsRef<[u8]>>(
+        &self,
+        input: T,
+        output_buf: &mut [u8],
+    ) -> Result<usize, EncodeSliceError> {
+        let input_bytes = input.as_ref();
+
+        let encoded_size = encoded_len(input_bytes.len(), self.config().encode_padding())
+            .expect("usize overflow when calculating buffer size");
+
+        if output_buf.len() < encoded_size {
+            return Err(EncodeSliceError::OutputSliceTooSmall);
+        }
+
+        let b64_output = &mut output_buf[0..encoded_size];
+
+        encode_with_padding(input_bytes, b64_output, self, encoded_size);
+
+        Ok(encoded_size)
+    }
+
+    /// Decode from string reference as octets using the specified [Engine].
+    /// Returns a `Result` containing a `Vec<u8>`.
+    ///
+    /// # Example
+    ///
+    /// ```rust
+    /// use base64::{Engine as _, alphabet, engine::{self, general_purpose}};
+    ///
+    /// let bytes = general_purpose::STANDARD
+    ///     .decode("aGVsbG8gd29ybGR+Cg==").unwrap();
+    /// println!("{:?}", bytes);
+    ///
+    /// // custom engine setup
+    /// let bytes_url = engine::GeneralPurpose::new(
+    ///              &alphabet::URL_SAFE,
+    ///              general_purpose::NO_PAD)
+    ///     .decode("aGVsbG8gaW50ZXJuZXR-Cg").unwrap();
+    /// println!("{:?}", bytes_url);
+    /// ```
+    ///
+    /// # Panics
+    ///
+    /// Panics if decoded length estimation overflows.
+    /// This would happen for sizes within a few bytes of the maximum value of `usize`.
+    #[cfg(any(feature = "alloc", feature = "std", test))]
+    fn decode<T: AsRef<[u8]>>(&self, input: T) -> Result<Vec<u8>, DecodeError> {
+        let input_bytes = input.as_ref();
+
+        let estimate = self.internal_decoded_len_estimate(input_bytes.len());
+        let mut buffer = vec![0; estimate.decoded_len_estimate()];
+
+        let bytes_written = self.internal_decode(input_bytes, &mut buffer, estimate)?;
+        buffer.truncate(bytes_written);
+
+        Ok(buffer)
+    }
+
+    /// Decode from string reference as octets.
+    /// Writes into the supplied `Vec`, which may allocate if its internal buffer isn't big enough.
+    /// Returns a `Result` containing an empty tuple, aka `()`.
+    ///
+    /// # Example
+    ///
+    /// ```rust
+    /// use base64::{Engine as _, alphabet, engine::{self, general_purpose}};
+    /// const CUSTOM_ENGINE: engine::GeneralPurpose =
+    ///     engine::GeneralPurpose::new(&alphabet::URL_SAFE, general_purpose::PAD);
+    ///
+    /// fn main() {
+    ///     use base64::Engine;
+    ///     let mut buffer = Vec::<u8>::new();
+    ///     // with the default engine
+    ///     general_purpose::STANDARD
+    ///         .decode_vec("aGVsbG8gd29ybGR+Cg==", &mut buffer,).unwrap();
+    ///     println!("{:?}", buffer);
+    ///
+    ///     buffer.clear();
+    ///
+    ///     // with a custom engine
+    ///     CUSTOM_ENGINE.decode_vec(
+    ///         "aGVsbG8gaW50ZXJuZXR-Cg==",
+    ///         &mut buffer,
+    ///     ).unwrap();
+    ///     println!("{:?}", buffer);
+    /// }
+    /// ```
+    ///
+    /// # Panics
+    ///
+    /// Panics if decoded length estimation overflows.
+    /// This would happen for sizes within a few bytes of the maximum value of `usize`.
+    #[cfg(any(feature = "alloc", feature = "std", test))]
+    fn decode_vec<T: AsRef<[u8]>>(
+        &self,
+        input: T,
+        buffer: &mut Vec<u8>,
+    ) -> Result<(), DecodeError> {
+        let input_bytes = input.as_ref();
+
+        let starting_output_len = buffer.len();
+
+        let estimate = self.internal_decoded_len_estimate(input_bytes.len());
+        let total_len_estimate = estimate
+            .decoded_len_estimate()
+            .checked_add(starting_output_len)
+            .expect("Overflow when calculating output buffer length");
+        buffer.resize(total_len_estimate, 0);
+
+        let buffer_slice = &mut buffer.as_mut_slice()[starting_output_len..];
+        let bytes_written = self.internal_decode(input_bytes, buffer_slice, estimate)?;
+
+        buffer.truncate(starting_output_len + bytes_written);
+
+        Ok(())
+    }
+
+    /// Decode the input into the provided output slice.
+    ///
+    /// Returns an error if `output` is smaller than the estimated decoded length.
+    ///
+    /// This will not write any bytes past exactly what is decoded (no stray garbage bytes at the end).
+    ///
+    /// See [crate::decoded_len_estimate] for calculating buffer sizes.
+    ///
+    /// See [Engine::decode_slice_unchecked] for a version that panics instead of returning an error
+    /// if the output buffer is too small.
+    ///
+    /// # Panics
+    ///
+    /// Panics if decoded length estimation overflows.
+    /// This would happen for sizes within a few bytes of the maximum value of `usize`.
+    fn decode_slice<T: AsRef<[u8]>>(
+        &self,
+        input: T,
+        output: &mut [u8],
+    ) -> Result<usize, DecodeSliceError> {
+        let input_bytes = input.as_ref();
+
+        let estimate = self.internal_decoded_len_estimate(input_bytes.len());
+        if output.len() < estimate.decoded_len_estimate() {
+            return Err(DecodeSliceError::OutputSliceTooSmall);
+        }
+
+        self.internal_decode(input_bytes, output, estimate)
+            .map_err(|e| e.into())
+    }
+
+    /// Decode the input into the provided output slice.
+    ///
+    /// This will not write any bytes past exactly what is decoded (no stray garbage bytes at the end).
+    ///
+    /// See [crate::decoded_len_estimate] for calculating buffer sizes.
+    ///
+    /// See [Engine::decode_slice] for a version that returns an error instead of panicking if the output
+    /// buffer is too small.
+    ///
+    /// # Panics
+    ///
+    /// Panics if decoded length estimation overflows.
+    /// This would happen for sizes within a few bytes of the maximum value of `usize`.
+    ///
+    /// Panics if the provided output buffer is too small for the decoded data.
+    fn decode_slice_unchecked<T: AsRef<[u8]>>(
+        &self,
+        input: T,
+        output: &mut [u8],
+    ) -> Result<usize, DecodeError> {
+        let input_bytes = input.as_ref();
+
+        self.internal_decode(
+            input_bytes,
+            output,
+            self.internal_decoded_len_estimate(input_bytes.len()),
+        )
+    }
+}
+
+/// The minimal level of configuration that engines must support.
+pub trait Config {
+    /// Returns `true` if padding should be added after the encoded output.
+    ///
+    /// Padding is added outside the engine's encode() since the engine may be used
+    /// to encode only a chunk of the overall output, so it can't always know when
+    /// the output is "done" and would therefore need padding (if configured).
+    // It could be provided as a separate parameter when encoding, but that feels like
+    // leaking an implementation detail to the user, and it's hopefully more convenient
+    // to have to only pass one thing (the engine) to any part of the API.
+    fn encode_padding(&self) -> bool;
+}
+
+/// The decode estimate used by an engine implementation. Users do not need to interact with this;
+/// it is only for engine implementors.
+///
+/// Implementors may store relevant data here when constructing this to avoid having to calculate
+/// them again during actual decoding.
+pub trait DecodeEstimate {
+    /// Returns a conservative (err on the side of too big) estimate of the decoded length to use
+    /// for pre-allocating buffers, etc.
+    ///
+    /// The estimate must be no larger than the next largest complete triple of decoded bytes.
+    /// That is, the final quad of tokens to decode may be assumed to be complete with no padding.
+    ///
+    /// # Panics
+    ///
+    /// Panics if decoded length estimation overflows.
+    /// This would happen for sizes within a few bytes of the maximum value of `usize`.
+    fn decoded_len_estimate(&self) -> usize;
+}
+
+/// Controls how pad bytes are handled when decoding.
+///
+/// Each [Engine] must support at least the behavior indicated by
+/// [DecodePaddingMode::RequireCanonical], and may support other modes.
+#[derive(Clone, Copy, Debug, PartialEq, Eq)]
+pub enum DecodePaddingMode {
+    /// Canonical padding is allowed, but any fewer padding bytes than that is also allowed.
+    Indifferent,
+    /// Padding must be canonical (0, 1, or 2 `=` as needed to produce a 4 byte suffix).
+    RequireCanonical,
+    /// Padding must be absent -- for when you want predictable padding, without any wasted bytes.
+    RequireNone,
+}

+ 219 - 0
zeroidc/vendor/base64/src/engine/naive.rs

@@ -0,0 +1,219 @@
+use crate::{
+    alphabet::Alphabet,
+    engine::{
+        general_purpose::{self, decode_table, encode_table},
+        Config, DecodeEstimate, DecodePaddingMode, Engine,
+    },
+    DecodeError, PAD_BYTE,
+};
+use alloc::ops::BitOr;
+use std::ops::{BitAnd, Shl, Shr};
+
+/// Comparatively simple implementation that can be used as something to compare against in tests
+pub struct Naive {
+    encode_table: [u8; 64],
+    decode_table: [u8; 256],
+    config: NaiveConfig,
+}
+
+impl Naive {
+    const ENCODE_INPUT_CHUNK_SIZE: usize = 3;
+    const DECODE_INPUT_CHUNK_SIZE: usize = 4;
+
+    pub const fn new(alphabet: &Alphabet, config: NaiveConfig) -> Self {
+        Self {
+            encode_table: encode_table(alphabet),
+            decode_table: decode_table(alphabet),
+            config,
+        }
+    }
+
+    fn decode_byte_into_u32(&self, offset: usize, byte: u8) -> Result<u32, DecodeError> {
+        let decoded = self.decode_table[byte as usize];
+
+        if decoded == general_purpose::INVALID_VALUE {
+            return Err(DecodeError::InvalidByte(offset, byte));
+        }
+
+        Ok(decoded as u32)
+    }
+}
+
+impl Engine for Naive {
+    type Config = NaiveConfig;
+    type DecodeEstimate = NaiveEstimate;
+
+    fn internal_encode(&self, input: &[u8], output: &mut [u8]) -> usize {
+        // complete chunks first
+
+        const LOW_SIX_BITS: u32 = 0x3F;
+
+        let rem = input.len() % Self::ENCODE_INPUT_CHUNK_SIZE;
+        // will never underflow
+        let complete_chunk_len = input.len() - rem;
+
+        let mut input_index = 0_usize;
+        let mut output_index = 0_usize;
+        if let Some(last_complete_chunk_index) =
+            complete_chunk_len.checked_sub(Self::ENCODE_INPUT_CHUNK_SIZE)
+        {
+            while input_index <= last_complete_chunk_index {
+                let chunk = &input[input_index..input_index + Self::ENCODE_INPUT_CHUNK_SIZE];
+
+                // populate low 24 bits from 3 bytes
+                let chunk_int: u32 =
+                    (chunk[0] as u32).shl(16) | (chunk[1] as u32).shl(8) | (chunk[2] as u32);
+                // encode 4x 6-bit output bytes
+                output[output_index] = self.encode_table[chunk_int.shr(18) as usize];
+                output[output_index + 1] =
+                    self.encode_table[chunk_int.shr(12_u8).bitand(LOW_SIX_BITS) as usize];
+                output[output_index + 2] =
+                    self.encode_table[chunk_int.shr(6_u8).bitand(LOW_SIX_BITS) as usize];
+                output[output_index + 3] =
+                    self.encode_table[chunk_int.bitand(LOW_SIX_BITS) as usize];
+
+                input_index += Self::ENCODE_INPUT_CHUNK_SIZE;
+                output_index += 4;
+            }
+        }
+
+        // then leftovers
+        if rem == 2 {
+            let chunk = &input[input_index..input_index + 2];
+
+            // high six bits of chunk[0]
+            output[output_index] = self.encode_table[chunk[0].shr(2) as usize];
+            // bottom 2 bits of [0], high 4 bits of [1]
+            output[output_index + 1] =
+                self.encode_table[(chunk[0].shl(4_u8).bitor(chunk[1].shr(4_u8)) as u32)
+                    .bitand(LOW_SIX_BITS) as usize];
+            // bottom 4 bits of [1], with the 2 bottom bits as zero
+            output[output_index + 2] =
+                self.encode_table[(chunk[1].shl(2_u8) as u32).bitand(LOW_SIX_BITS) as usize];
+
+            output_index += 3;
+        } else if rem == 1 {
+            let byte = input[input_index];
+            output[output_index] = self.encode_table[byte.shr(2) as usize];
+            output[output_index + 1] =
+                self.encode_table[(byte.shl(4_u8) as u32).bitand(LOW_SIX_BITS) as usize];
+            output_index += 2;
+        }
+
+        output_index
+    }
+
+    fn internal_decoded_len_estimate(&self, input_len: usize) -> Self::DecodeEstimate {
+        NaiveEstimate::new(input_len)
+    }
+
+    fn internal_decode(
+        &self,
+        input: &[u8],
+        output: &mut [u8],
+        estimate: Self::DecodeEstimate,
+    ) -> Result<usize, DecodeError> {
+        if estimate.rem == 1 {
+            // trailing whitespace is so common that it's worth it to check the last byte to
+            // possibly return a better error message
+            if let Some(b) = input.last() {
+                if *b != PAD_BYTE
+                    && self.decode_table[*b as usize] == general_purpose::INVALID_VALUE
+                {
+                    return Err(DecodeError::InvalidByte(input.len() - 1, *b));
+                }
+            }
+
+            return Err(DecodeError::InvalidLength);
+        }
+
+        let mut input_index = 0_usize;
+        let mut output_index = 0_usize;
+        const BOTTOM_BYTE: u32 = 0xFF;
+
+        // can only use the main loop on non-trailing chunks
+        if input.len() > Self::DECODE_INPUT_CHUNK_SIZE {
+            // skip the last chunk, whether it's partial or full, since it might
+            // have padding, and start at the beginning of the chunk before that
+            let last_complete_chunk_start_index = estimate.complete_chunk_len
+                - if estimate.rem == 0 {
+                    // Trailing chunk is also full chunk, so there must be at least 2 chunks, and
+                    // this won't underflow
+                    Self::DECODE_INPUT_CHUNK_SIZE * 2
+                } else {
+                    // Trailing chunk is partial, so it's already excluded in
+                    // complete_chunk_len
+                    Self::DECODE_INPUT_CHUNK_SIZE
+                };
+
+            while input_index <= last_complete_chunk_start_index {
+                let chunk = &input[input_index..input_index + Self::DECODE_INPUT_CHUNK_SIZE];
+                let decoded_int: u32 = self.decode_byte_into_u32(input_index, chunk[0])?.shl(18)
+                    | self
+                        .decode_byte_into_u32(input_index + 1, chunk[1])?
+                        .shl(12)
+                    | self.decode_byte_into_u32(input_index + 2, chunk[2])?.shl(6)
+                    | self.decode_byte_into_u32(input_index + 3, chunk[3])?;
+
+                output[output_index] = decoded_int.shr(16_u8).bitand(BOTTOM_BYTE) as u8;
+                output[output_index + 1] = decoded_int.shr(8_u8).bitand(BOTTOM_BYTE) as u8;
+                output[output_index + 2] = decoded_int.bitand(BOTTOM_BYTE) as u8;
+
+                input_index += Self::DECODE_INPUT_CHUNK_SIZE;
+                output_index += 3;
+            }
+        }
+
+        general_purpose::decode_suffix::decode_suffix(
+            input,
+            input_index,
+            output,
+            output_index,
+            &self.decode_table,
+            self.config.decode_allow_trailing_bits,
+            self.config.decode_padding_mode,
+        )
+    }
+
+    fn config(&self) -> &Self::Config {
+        &self.config
+    }
+}
+
+pub struct NaiveEstimate {
+    /// remainder from dividing input by `Naive::DECODE_CHUNK_SIZE`
+    rem: usize,
+    /// Length of input that is in complete `Naive::DECODE_CHUNK_SIZE`-length chunks
+    complete_chunk_len: usize,
+}
+
+impl NaiveEstimate {
+    fn new(input_len: usize) -> Self {
+        let rem = input_len % Naive::DECODE_INPUT_CHUNK_SIZE;
+        let complete_chunk_len = input_len - rem;
+
+        Self {
+            rem,
+            complete_chunk_len,
+        }
+    }
+}
+
+impl DecodeEstimate for NaiveEstimate {
+    fn decoded_len_estimate(&self) -> usize {
+        ((self.complete_chunk_len / 4) + ((self.rem > 0) as usize)) * 3
+    }
+}
+
+#[derive(Clone, Copy, Debug)]
+pub struct NaiveConfig {
+    pub encode_padding: bool,
+    pub decode_allow_trailing_bits: bool,
+    pub decode_padding_mode: DecodePaddingMode,
+}
+
+impl Config for NaiveConfig {
+    fn encode_padding(&self) -> bool {
+        self.encode_padding
+    }
+}

+ 1430 - 0
zeroidc/vendor/base64/src/engine/tests.rs

@@ -0,0 +1,1430 @@
+// rstest_reuse template functions have unused variables
+#![allow(unused_variables)]
+
+use rand::{
+    self,
+    distributions::{self, Distribution as _},
+    rngs, Rng as _, SeedableRng as _,
+};
+use rstest::rstest;
+use rstest_reuse::{apply, template};
+use std::{collections, fmt};
+
+use crate::{
+    alphabet::{Alphabet, STANDARD},
+    encode::add_padding,
+    encoded_len,
+    engine::{general_purpose, naive, Config, DecodeEstimate, DecodePaddingMode, Engine},
+    tests::{assert_encode_sanity, random_alphabet, random_config},
+    DecodeError, PAD_BYTE,
+};
+
+// the case::foo syntax includes the "foo" in the generated test method names
+#[template]
+#[rstest(engine_wrapper,
+case::general_purpose(GeneralPurposeWrapper {}),
+case::naive(NaiveWrapper {}),
+)]
+fn all_engines<E: EngineWrapper>(engine_wrapper: E) {}
+
+#[apply(all_engines)]
+fn rfc_test_vectors_std_alphabet<E: EngineWrapper>(engine_wrapper: E) {
+    let data = vec![
+        ("", ""),
+        ("f", "Zg=="),
+        ("fo", "Zm8="),
+        ("foo", "Zm9v"),
+        ("foob", "Zm9vYg=="),
+        ("fooba", "Zm9vYmE="),
+        ("foobar", "Zm9vYmFy"),
+    ];
+
+    let engine = E::standard();
+    let engine_no_padding = E::standard_unpadded();
+
+    for (orig, encoded) in &data {
+        let encoded_without_padding = encoded.trim_end_matches('=');
+
+        // unpadded
+        {
+            let mut encode_buf = [0_u8; 8];
+            let mut decode_buf = [0_u8; 6];
+
+            let encode_len =
+                engine_no_padding.internal_encode(orig.as_bytes(), &mut encode_buf[..]);
+            assert_eq!(
+                &encoded_without_padding,
+                &std::str::from_utf8(&encode_buf[0..encode_len]).unwrap()
+            );
+            let decode_len = engine_no_padding
+                .decode_slice_unchecked(encoded_without_padding.as_bytes(), &mut decode_buf[..])
+                .unwrap();
+            assert_eq!(orig.len(), decode_len);
+
+            assert_eq!(
+                orig,
+                &std::str::from_utf8(&decode_buf[0..decode_len]).unwrap()
+            );
+
+            // if there was any padding originally, the no padding engine won't decode it
+            if encoded.as_bytes().contains(&PAD_BYTE) {
+                assert_eq!(
+                    Err(DecodeError::InvalidPadding),
+                    engine_no_padding.decode(encoded)
+                )
+            }
+        }
+
+        // padded
+        {
+            let mut encode_buf = [0_u8; 8];
+            let mut decode_buf = [0_u8; 6];
+
+            let encode_len = engine.internal_encode(orig.as_bytes(), &mut encode_buf[..]);
+            assert_eq!(
+                // doesn't have padding added yet
+                &encoded_without_padding,
+                &std::str::from_utf8(&encode_buf[0..encode_len]).unwrap()
+            );
+            let pad_len = add_padding(orig.len(), &mut encode_buf[encode_len..]);
+            assert_eq!(encoded.as_bytes(), &encode_buf[..encode_len + pad_len]);
+
+            let decode_len = engine
+                .decode_slice_unchecked(encoded.as_bytes(), &mut decode_buf[..])
+                .unwrap();
+            assert_eq!(orig.len(), decode_len);
+
+            assert_eq!(
+                orig,
+                &std::str::from_utf8(&decode_buf[0..decode_len]).unwrap()
+            );
+
+            // if there was (canonical) padding, and we remove it, the standard engine won't decode
+            if encoded.as_bytes().contains(&PAD_BYTE) {
+                assert_eq!(
+                    Err(DecodeError::InvalidPadding),
+                    engine.decode(encoded_without_padding)
+                )
+            }
+        }
+    }
+}
+
+#[apply(all_engines)]
+fn roundtrip_random<E: EngineWrapper>(engine_wrapper: E) {
+    let mut rng = seeded_rng();
+
+    let mut orig_data = Vec::<u8>::new();
+    let mut encode_buf = Vec::<u8>::new();
+    let mut decode_buf = Vec::<u8>::new();
+
+    let len_range = distributions::Uniform::new(1, 1_000);
+
+    for _ in 0..10_000 {
+        let engine = E::random(&mut rng);
+
+        orig_data.clear();
+        encode_buf.clear();
+        decode_buf.clear();
+
+        let (orig_len, _, encoded_len) = generate_random_encoded_data(
+            &engine,
+            &mut orig_data,
+            &mut encode_buf,
+            &mut rng,
+            &len_range,
+        );
+
+        // exactly the right size
+        decode_buf.resize(orig_len, 0);
+
+        let dec_len = engine
+            .decode_slice_unchecked(&encode_buf[0..encoded_len], &mut decode_buf[..])
+            .unwrap();
+
+        assert_eq!(orig_len, dec_len);
+        assert_eq!(&orig_data[..], &decode_buf[..dec_len]);
+    }
+}
+
+#[apply(all_engines)]
+fn encode_doesnt_write_extra_bytes<E: EngineWrapper>(engine_wrapper: E) {
+    let mut rng = seeded_rng();
+
+    let mut orig_data = Vec::<u8>::new();
+    let mut encode_buf = Vec::<u8>::new();
+    let mut encode_buf_backup = Vec::<u8>::new();
+
+    let input_len_range = distributions::Uniform::new(0, 1000);
+
+    for _ in 0..10_000 {
+        let engine = E::random(&mut rng);
+        let padded = engine.config().encode_padding();
+
+        orig_data.clear();
+        encode_buf.clear();
+        encode_buf_backup.clear();
+
+        let orig_len = fill_rand(&mut orig_data, &mut rng, &input_len_range);
+
+        let prefix_len = 1024;
+        // plenty of prefix and suffix
+        fill_rand_len(&mut encode_buf, &mut rng, prefix_len * 2 + orig_len * 2);
+        encode_buf_backup.extend_from_slice(&encode_buf[..]);
+
+        let expected_encode_len_no_pad = encoded_len(orig_len, false).unwrap();
+
+        let encoded_len_no_pad =
+            engine.internal_encode(&orig_data[..], &mut encode_buf[prefix_len..]);
+        assert_eq!(expected_encode_len_no_pad, encoded_len_no_pad);
+
+        // no writes past what it claimed to write
+        assert_eq!(&encode_buf_backup[..prefix_len], &encode_buf[..prefix_len]);
+        assert_eq!(
+            &encode_buf_backup[(prefix_len + encoded_len_no_pad)..],
+            &encode_buf[(prefix_len + encoded_len_no_pad)..]
+        );
+
+        let encoded_data = &encode_buf[prefix_len..(prefix_len + encoded_len_no_pad)];
+        assert_encode_sanity(
+            std::str::from_utf8(encoded_data).unwrap(),
+            // engines don't pad
+            false,
+            orig_len,
+        );
+
+        // pad so we can decode it in case our random engine requires padding
+        let pad_len = if padded {
+            add_padding(orig_len, &mut encode_buf[prefix_len + encoded_len_no_pad..])
+        } else {
+            0
+        };
+
+        assert_eq!(
+            orig_data,
+            engine
+                .decode(&encode_buf[prefix_len..(prefix_len + encoded_len_no_pad + pad_len)],)
+                .unwrap()
+        );
+    }
+}
+
+#[apply(all_engines)]
+fn encode_engine_slice_fits_into_precisely_sized_slice<E: EngineWrapper>(engine_wrapper: E) {
+    let mut orig_data = Vec::new();
+    let mut encoded_data = Vec::new();
+    let mut decoded = Vec::new();
+
+    let input_len_range = distributions::Uniform::new(0, 1000);
+
+    let mut rng = rngs::SmallRng::from_entropy();
+
+    for _ in 0..10_000 {
+        orig_data.clear();
+        encoded_data.clear();
+        decoded.clear();
+
+        let input_len = input_len_range.sample(&mut rng);
+
+        for _ in 0..input_len {
+            orig_data.push(rng.gen());
+        }
+
+        let engine = E::random(&mut rng);
+
+        let encoded_size = encoded_len(input_len, engine.config().encode_padding()).unwrap();
+
+        encoded_data.resize(encoded_size, 0);
+
+        assert_eq!(
+            encoded_size,
+            engine.encode_slice(&orig_data, &mut encoded_data).unwrap()
+        );
+
+        assert_encode_sanity(
+            std::str::from_utf8(&encoded_data[0..encoded_size]).unwrap(),
+            engine.config().encode_padding(),
+            input_len,
+        );
+
+        engine
+            .decode_vec(&encoded_data[0..encoded_size], &mut decoded)
+            .unwrap();
+        assert_eq!(orig_data, decoded);
+    }
+}
+
+#[apply(all_engines)]
+fn decode_doesnt_write_extra_bytes<E>(engine_wrapper: E)
+where
+    E: EngineWrapper,
+    <<E as EngineWrapper>::Engine as Engine>::Config: fmt::Debug,
+{
+    let mut rng = seeded_rng();
+
+    let mut orig_data = Vec::<u8>::new();
+    let mut encode_buf = Vec::<u8>::new();
+    let mut decode_buf = Vec::<u8>::new();
+    let mut decode_buf_backup = Vec::<u8>::new();
+
+    let len_range = distributions::Uniform::new(1, 1_000);
+
+    for _ in 0..10_000 {
+        let engine = E::random(&mut rng);
+
+        orig_data.clear();
+        encode_buf.clear();
+        decode_buf.clear();
+        decode_buf_backup.clear();
+
+        let orig_len = fill_rand(&mut orig_data, &mut rng, &len_range);
+        encode_buf.resize(orig_len * 2 + 100, 0);
+
+        let encoded_len = engine
+            .encode_slice(&orig_data[..], &mut encode_buf[..])
+            .unwrap();
+        encode_buf.truncate(encoded_len);
+
+        // oversize decode buffer so we can easily tell if it writes anything more than
+        // just the decoded data
+        let prefix_len = 1024;
+        // plenty of prefix and suffix
+        fill_rand_len(&mut decode_buf, &mut rng, prefix_len * 2 + orig_len * 2);
+        decode_buf_backup.extend_from_slice(&decode_buf[..]);
+
+        let dec_len = engine
+            .decode_slice_unchecked(&encode_buf, &mut decode_buf[prefix_len..])
+            .unwrap();
+
+        assert_eq!(orig_len, dec_len);
+        assert_eq!(
+            &orig_data[..],
+            &decode_buf[prefix_len..prefix_len + dec_len]
+        );
+        assert_eq!(&decode_buf_backup[..prefix_len], &decode_buf[..prefix_len]);
+        assert_eq!(
+            &decode_buf_backup[prefix_len + dec_len..],
+            &decode_buf[prefix_len + dec_len..]
+        );
+    }
+}
+
+#[apply(all_engines)]
+fn decode_detect_invalid_last_symbol<E: EngineWrapper>(engine_wrapper: E) {
+    // 0xFF -> "/w==", so all letters > w, 0-9, and '+', '/' should get InvalidLastSymbol
+    let engine = E::standard();
+
+    assert_eq!(Ok(vec![0x89, 0x85]), engine.decode("iYU="));
+    assert_eq!(Ok(vec![0xFF]), engine.decode("/w=="));
+
+    for (suffix, offset) in vec![
+        // suffix, offset of bad byte from start of suffix
+        ("/x==", 1_usize),
+        ("/z==", 1_usize),
+        ("/0==", 1_usize),
+        ("/9==", 1_usize),
+        ("/+==", 1_usize),
+        ("//==", 1_usize),
+        // trailing 01
+        ("iYV=", 2_usize),
+        // trailing 10
+        ("iYW=", 2_usize),
+        // trailing 11
+        ("iYX=", 2_usize),
+    ] {
+        for prefix_quads in 0..256 {
+            let mut encoded = "AAAA".repeat(prefix_quads);
+            encoded.push_str(suffix);
+
+            assert_eq!(
+                Err(DecodeError::InvalidLastSymbol(
+                    encoded.len() - 4 + offset,
+                    suffix.as_bytes()[offset],
+                )),
+                engine.decode(encoded.as_str())
+            );
+        }
+    }
+}
+
+#[apply(all_engines)]
+fn decode_detect_invalid_last_symbol_when_length_is_also_invalid<E: EngineWrapper>(
+    engine_wrapper: E,
+) {
+    let mut rng = seeded_rng();
+
+    // check across enough lengths that it would likely cover any implementation's various internal
+    // small/large input division
+    for len in (0_usize..256).map(|len| len * 4 + 1) {
+        let engine = E::random_alphabet(&mut rng, &STANDARD);
+
+        let mut input = vec![b'A'; len];
+
+        // with a valid last char, it's InvalidLength
+        assert_eq!(Err(DecodeError::InvalidLength), engine.decode(&input));
+        // after mangling the last char, it's InvalidByte
+        input[len - 1] = b'"';
+        assert_eq!(
+            Err(DecodeError::InvalidByte(len - 1, b'"')),
+            engine.decode(&input)
+        );
+    }
+}
+
+#[apply(all_engines)]
+fn decode_detect_invalid_last_symbol_every_possible_two_symbols<E: EngineWrapper>(
+    engine_wrapper: E,
+) {
+    let engine = E::standard();
+
+    let mut base64_to_bytes = collections::HashMap::new();
+
+    for b in 0_u8..=255 {
+        let mut b64 = vec![0_u8; 4];
+        assert_eq!(2, engine.internal_encode(&[b], &mut b64[..]));
+        let _ = add_padding(1, &mut b64[2..]);
+
+        assert!(base64_to_bytes.insert(b64, vec![b]).is_none());
+    }
+
+    // every possible combination of trailing symbols must either decode to 1 byte or get InvalidLastSymbol, with or without any leading chunks
+
+    let mut prefix = Vec::new();
+    for _ in 0..256 {
+        let mut clone = prefix.clone();
+
+        let mut symbols = [0_u8; 4];
+        for &s1 in STANDARD.symbols.iter() {
+            symbols[0] = s1;
+            for &s2 in STANDARD.symbols.iter() {
+                symbols[1] = s2;
+                symbols[2] = PAD_BYTE;
+                symbols[3] = PAD_BYTE;
+
+                // chop off previous symbols
+                clone.truncate(prefix.len());
+                clone.extend_from_slice(&symbols[..]);
+                let decoded_prefix_len = prefix.len() / 4 * 3;
+
+                match base64_to_bytes.get(&symbols[..]) {
+                    Some(bytes) => {
+                        let res = engine
+                            .decode(&clone)
+                            // remove prefix
+                            .map(|decoded| decoded[decoded_prefix_len..].to_vec());
+
+                        assert_eq!(Ok(bytes.clone()), res);
+                    }
+                    None => assert_eq!(
+                        Err(DecodeError::InvalidLastSymbol(1, s2)),
+                        engine.decode(&symbols[..])
+                    ),
+                }
+            }
+        }
+
+        prefix.extend_from_slice(b"AAAA");
+    }
+}
+
+#[apply(all_engines)]
+fn decode_detect_invalid_last_symbol_every_possible_three_symbols<E: EngineWrapper>(
+    engine_wrapper: E,
+) {
+    let engine = E::standard();
+
+    let mut base64_to_bytes = collections::HashMap::new();
+
+    let mut bytes = [0_u8; 2];
+    for b1 in 0_u8..=255 {
+        bytes[0] = b1;
+        for b2 in 0_u8..=255 {
+            bytes[1] = b2;
+            let mut b64 = vec![0_u8; 4];
+            assert_eq!(3, engine.internal_encode(&bytes, &mut b64[..]));
+            let _ = add_padding(2, &mut b64[3..]);
+
+            let mut v = Vec::with_capacity(2);
+            v.extend_from_slice(&bytes[..]);
+
+            assert!(base64_to_bytes.insert(b64, v).is_none());
+        }
+    }
+
+    // every possible combination of symbols must either decode to 2 bytes or get InvalidLastSymbol, with or without any leading chunks
+
+    let mut prefix = Vec::new();
+    for _ in 0..256 {
+        let mut input = prefix.clone();
+
+        let mut symbols = [0_u8; 4];
+        for &s1 in STANDARD.symbols.iter() {
+            symbols[0] = s1;
+            for &s2 in STANDARD.symbols.iter() {
+                symbols[1] = s2;
+                for &s3 in STANDARD.symbols.iter() {
+                    symbols[2] = s3;
+                    symbols[3] = PAD_BYTE;
+
+                    // chop off previous symbols
+                    input.truncate(prefix.len());
+                    input.extend_from_slice(&symbols[..]);
+                    let decoded_prefix_len = prefix.len() / 4 * 3;
+
+                    match base64_to_bytes.get(&symbols[..]) {
+                        Some(bytes) => {
+                            let res = engine
+                                .decode(&input)
+                                // remove prefix
+                                .map(|decoded| decoded[decoded_prefix_len..].to_vec());
+
+                            assert_eq!(Ok(bytes.clone()), res);
+                        }
+                        None => assert_eq!(
+                            Err(DecodeError::InvalidLastSymbol(2, s3)),
+                            engine.decode(&symbols[..])
+                        ),
+                    }
+                }
+            }
+        }
+        prefix.extend_from_slice(b"AAAA");
+    }
+}
+
+#[apply(all_engines)]
+fn decode_invalid_trailing_bits_ignored_when_configured<E: EngineWrapper>(engine_wrapper: E) {
+    let strict = E::standard();
+    let forgiving = E::standard_allow_trailing_bits();
+
+    fn assert_tolerant_decode<E: Engine>(
+        engine: &E,
+        input: &mut String,
+        b64_prefix_len: usize,
+        expected_decode_bytes: Vec<u8>,
+        data: &str,
+    ) {
+        let prefixed = prefixed_data(input, b64_prefix_len, data);
+        let decoded = engine.decode(prefixed);
+        // prefix is always complete chunks
+        let decoded_prefix_len = b64_prefix_len / 4 * 3;
+        assert_eq!(
+            Ok(expected_decode_bytes),
+            decoded.map(|v| v[decoded_prefix_len..].to_vec())
+        );
+    }
+
+    let mut prefix = String::new();
+    for _ in 0..256 {
+        let mut input = prefix.clone();
+
+        // example from https://github.com/marshallpierce/rust-base64/issues/75
+        assert!(strict
+            .decode(prefixed_data(&mut input, prefix.len(), "/w=="))
+            .is_ok());
+        assert!(strict
+            .decode(prefixed_data(&mut input, prefix.len(), "iYU="))
+            .is_ok());
+        // trailing 01
+        assert_tolerant_decode(&forgiving, &mut input, prefix.len(), vec![255], "/x==");
+        assert_tolerant_decode(&forgiving, &mut input, prefix.len(), vec![137, 133], "iYV=");
+        // trailing 10
+        assert_tolerant_decode(&forgiving, &mut input, prefix.len(), vec![255], "/y==");
+        assert_tolerant_decode(&forgiving, &mut input, prefix.len(), vec![137, 133], "iYW=");
+        // trailing 11
+        assert_tolerant_decode(&forgiving, &mut input, prefix.len(), vec![255], "/z==");
+        assert_tolerant_decode(&forgiving, &mut input, prefix.len(), vec![137, 133], "iYX=");
+
+        prefix.push_str("AAAA");
+    }
+}
+
+#[apply(all_engines)]
+fn decode_invalid_byte_error<E: EngineWrapper>(engine_wrapper: E) {
+    let mut rng = seeded_rng();
+
+    let mut orig_data = Vec::<u8>::new();
+    let mut encode_buf = Vec::<u8>::new();
+    let mut decode_buf = Vec::<u8>::new();
+
+    let len_range = distributions::Uniform::new(1, 1_000);
+
+    for _ in 0..10_000 {
+        let alphabet = random_alphabet(&mut rng);
+        let engine = E::random_alphabet(&mut rng, alphabet);
+
+        orig_data.clear();
+        encode_buf.clear();
+        decode_buf.clear();
+
+        let (orig_len, encoded_len_just_data, encoded_len_with_padding) =
+            generate_random_encoded_data(
+                &engine,
+                &mut orig_data,
+                &mut encode_buf,
+                &mut rng,
+                &len_range,
+            );
+
+        // exactly the right size
+        decode_buf.resize(orig_len, 0);
+
+        // replace one encoded byte with an invalid byte
+        let invalid_byte: u8 = loop {
+            let byte: u8 = rng.gen();
+
+            if alphabet.symbols.contains(&byte) {
+                continue;
+            } else {
+                break byte;
+            }
+        };
+
+        let invalid_range = distributions::Uniform::new(0, orig_len);
+        let invalid_index = invalid_range.sample(&mut rng);
+        encode_buf[invalid_index] = invalid_byte;
+
+        assert_eq!(
+            Err(DecodeError::InvalidByte(invalid_index, invalid_byte)),
+            engine.decode_slice_unchecked(
+                &encode_buf[0..encoded_len_with_padding],
+                &mut decode_buf[..],
+            )
+        );
+    }
+}
+
+/// Any amount of padding anywhere before the final non padding character = invalid byte at first
+/// pad byte.
+/// From this, we know padding must extend to the end of the input.
+#[apply(all_engines)]
+fn decode_padding_before_final_non_padding_char_error_invalid_byte<E: EngineWrapper>(
+    engine_wrapper: E,
+) {
+    let mut rng = seeded_rng();
+
+    // the different amounts of proper padding, w/ offset from end for the last non-padding char
+    let suffixes = vec![("/w==", 2), ("iYu=", 1), ("zzzz", 0)];
+
+    let prefix_quads_range = distributions::Uniform::from(0..=256);
+
+    for mode in all_pad_modes() {
+        // we don't encode so we don't care about encode padding
+        let engine = E::standard_with_pad_mode(true, mode);
+
+        for _ in 0..100_000 {
+            for (suffix, offset) in suffixes.iter() {
+                let mut s = "ABCD".repeat(prefix_quads_range.sample(&mut rng));
+                s.push_str(suffix);
+                let mut encoded = s.into_bytes();
+
+                // calculate a range to write padding into that leaves at least one non padding char
+                let last_non_padding_offset = encoded.len() - 1 - offset;
+
+                // don't include last non padding char as it must stay not padding
+                let padding_end = rng.gen_range(0..last_non_padding_offset);
+
+                // don't use more than 100 bytes of padding, but also use shorter lengths when
+                // padding_end is near the start of the encoded data to avoid biasing to padding
+                // the entire prefix on short lengths
+                let padding_len = rng.gen_range(1..=usize::min(100, padding_end + 1));
+                let padding_start = padding_end.saturating_sub(padding_len);
+
+                encoded[padding_start..=padding_end].fill(PAD_BYTE);
+
+                assert_eq!(
+                    Err(DecodeError::InvalidByte(padding_start, PAD_BYTE)),
+                    engine.decode(&encoded),
+                );
+            }
+        }
+    }
+}
+
+/// Any amount of padding before final chunk that crosses over into final chunk with 1-4 bytes =
+/// invalid byte at first pad byte (except for 1 byte suffix = invalid length).
+/// From this we know the padding must start in the final chunk.
+#[apply(all_engines)]
+fn decode_padding_starts_before_final_chunk_error_invalid_byte<E: EngineWrapper>(
+    engine_wrapper: E,
+) {
+    let mut rng = seeded_rng();
+
+    // must have at least one prefix quad
+    let prefix_quads_range = distributions::Uniform::from(1..256);
+    // including 1 just to make sure that it really does produce invalid length
+    let suffix_pad_len_range = distributions::Uniform::from(1..=4);
+    for mode in all_pad_modes() {
+        // we don't encode so we don't care about encode padding
+        let engine = E::standard_with_pad_mode(true, mode);
+        for _ in 0..100_000 {
+            let suffix_len = suffix_pad_len_range.sample(&mut rng);
+            let mut encoded = "ABCD"
+                .repeat(prefix_quads_range.sample(&mut rng))
+                .into_bytes();
+            encoded.resize(encoded.len() + suffix_len, PAD_BYTE);
+
+            // amount of padding must be long enough to extend back from suffix into previous
+            // quads
+            let padding_len = rng.gen_range(suffix_len + 1..encoded.len());
+            // no non-padding after padding in this test, so padding goes to the end
+            let padding_start = encoded.len() - padding_len;
+            encoded[padding_start..].fill(PAD_BYTE);
+
+            if suffix_len == 1 {
+                assert_eq!(Err(DecodeError::InvalidLength), engine.decode(&encoded),);
+            } else {
+                assert_eq!(
+                    Err(DecodeError::InvalidByte(padding_start, PAD_BYTE)),
+                    engine.decode(&encoded),
+                );
+            }
+        }
+    }
+}
+
+/// 0-1 bytes of data before any amount of padding in final chunk = invalid byte, since padding
+/// is not valid data (consistent with error for pad bytes in earlier chunks).
+/// From this we know there must be 2-3 bytes of data before padding
+#[apply(all_engines)]
+fn decode_too_little_data_before_padding_error_invalid_byte<E: EngineWrapper>(engine_wrapper: E) {
+    let mut rng = seeded_rng();
+
+    // want to test no prefix quad case, so start at 0
+    let prefix_quads_range = distributions::Uniform::from(0_usize..256);
+    let suffix_data_len_range = distributions::Uniform::from(0_usize..=1);
+    for mode in all_pad_modes() {
+        // we don't encode so we don't care about encode padding
+        let engine = E::standard_with_pad_mode(true, mode);
+        for _ in 0..100_000 {
+            let suffix_data_len = suffix_data_len_range.sample(&mut rng);
+            let prefix_quad_len = prefix_quads_range.sample(&mut rng);
+
+            // ensure there is a suffix quad
+            let min_padding = usize::from(suffix_data_len == 0);
+
+            // for all possible padding lengths
+            for padding_len in min_padding..=(4 - suffix_data_len) {
+                let mut encoded = "ABCD".repeat(prefix_quad_len).into_bytes();
+                encoded.resize(encoded.len() + suffix_data_len, b'A');
+                encoded.resize(encoded.len() + padding_len, PAD_BYTE);
+
+                if suffix_data_len + padding_len == 1 {
+                    assert_eq!(Err(DecodeError::InvalidLength), engine.decode(&encoded),);
+                } else {
+                    assert_eq!(
+                        Err(DecodeError::InvalidByte(
+                            prefix_quad_len * 4 + suffix_data_len,
+                            PAD_BYTE,
+                        )),
+                        engine.decode(&encoded),
+                        "suffix data len {} pad len {}",
+                        suffix_data_len,
+                        padding_len
+                    );
+                }
+            }
+        }
+    }
+}
+
+// https://eprint.iacr.org/2022/361.pdf table 2, test 1
+#[apply(all_engines)]
+fn decode_malleability_test_case_3_byte_suffix_valid<E: EngineWrapper>(engine_wrapper: E) {
+    assert_eq!(
+        b"Hello".as_slice(),
+        &E::standard().decode("SGVsbG8=").unwrap()
+    );
+}
+
+// https://eprint.iacr.org/2022/361.pdf table 2, test 2
+#[apply(all_engines)]
+fn decode_malleability_test_case_3_byte_suffix_invalid_trailing_symbol<E: EngineWrapper>(
+    engine_wrapper: E,
+) {
+    assert_eq!(
+        DecodeError::InvalidLastSymbol(6, 0x39),
+        E::standard().decode("SGVsbG9=").unwrap_err()
+    );
+}
+
+// https://eprint.iacr.org/2022/361.pdf table 2, test 3
+#[apply(all_engines)]
+fn decode_malleability_test_case_3_byte_suffix_no_padding<E: EngineWrapper>(engine_wrapper: E) {
+    assert_eq!(
+        DecodeError::InvalidPadding,
+        E::standard().decode("SGVsbG9").unwrap_err()
+    );
+}
+
+// https://eprint.iacr.org/2022/361.pdf table 2, test 4
+#[apply(all_engines)]
+fn decode_malleability_test_case_2_byte_suffix_valid_two_padding_symbols<E: EngineWrapper>(
+    engine_wrapper: E,
+) {
+    assert_eq!(
+        b"Hell".as_slice(),
+        &E::standard().decode("SGVsbA==").unwrap()
+    );
+}
+
+// https://eprint.iacr.org/2022/361.pdf table 2, test 5
+#[apply(all_engines)]
+fn decode_malleability_test_case_2_byte_suffix_short_padding<E: EngineWrapper>(engine_wrapper: E) {
+    assert_eq!(
+        DecodeError::InvalidPadding,
+        E::standard().decode("SGVsbA=").unwrap_err()
+    );
+}
+
+// https://eprint.iacr.org/2022/361.pdf table 2, test 6
+#[apply(all_engines)]
+fn decode_malleability_test_case_2_byte_suffix_no_padding<E: EngineWrapper>(engine_wrapper: E) {
+    assert_eq!(
+        DecodeError::InvalidPadding,
+        E::standard().decode("SGVsbA").unwrap_err()
+    );
+}
+
+// https://eprint.iacr.org/2022/361.pdf table 2, test 7
+#[apply(all_engines)]
+fn decode_malleability_test_case_2_byte_suffix_too_much_padding<E: EngineWrapper>(
+    engine_wrapper: E,
+) {
+    assert_eq!(
+        DecodeError::InvalidByte(6, PAD_BYTE),
+        E::standard().decode("SGVsbA====").unwrap_err()
+    );
+}
+
+/// Requires canonical padding -> accepts 2 + 2, 3 + 1, 4 + 0 final quad configurations
+#[apply(all_engines)]
+fn decode_pad_mode_requires_canonical_accepts_canonical<E: EngineWrapper>(engine_wrapper: E) {
+    assert_all_suffixes_ok(
+        E::standard_with_pad_mode(true, DecodePaddingMode::RequireCanonical),
+        vec!["/w==", "iYU=", "AAAA"],
+    );
+}
+
+/// Requires canonical padding -> rejects 2 + 0-1, 3 + 0 final chunk configurations
+#[apply(all_engines)]
+fn decode_pad_mode_requires_canonical_rejects_non_canonical<E: EngineWrapper>(engine_wrapper: E) {
+    let engine = E::standard_with_pad_mode(true, DecodePaddingMode::RequireCanonical);
+
+    let suffixes = vec!["/w", "/w=", "iYU"];
+    for num_prefix_quads in 0..256 {
+        for &suffix in suffixes.iter() {
+            let mut encoded = "AAAA".repeat(num_prefix_quads);
+            encoded.push_str(suffix);
+
+            let res = engine.decode(&encoded);
+
+            assert_eq!(Err(DecodeError::InvalidPadding), res);
+        }
+    }
+}
+
+/// Requires no padding -> accepts 2 + 0, 3 + 0, 4 + 0 final chunk configuration
+#[apply(all_engines)]
+fn decode_pad_mode_requires_no_padding_accepts_no_padding<E: EngineWrapper>(engine_wrapper: E) {
+    assert_all_suffixes_ok(
+        E::standard_with_pad_mode(true, DecodePaddingMode::RequireNone),
+        vec!["/w", "iYU", "AAAA"],
+    );
+}
+
+/// Requires no padding -> rejects 2 + 1-2, 3 + 1 final chunk configuration
+#[apply(all_engines)]
+fn decode_pad_mode_requires_no_padding_rejects_any_padding<E: EngineWrapper>(engine_wrapper: E) {
+    let engine = E::standard_with_pad_mode(true, DecodePaddingMode::RequireNone);
+
+    let suffixes = vec!["/w=", "/w==", "iYU="];
+    for num_prefix_quads in 0..256 {
+        for &suffix in suffixes.iter() {
+            let mut encoded = "AAAA".repeat(num_prefix_quads);
+            encoded.push_str(suffix);
+
+            let res = engine.decode(&encoded);
+
+            assert_eq!(Err(DecodeError::InvalidPadding), res);
+        }
+    }
+}
+
+/// Indifferent padding accepts 2 + 0-2, 3 + 0-1, 4 + 0 final chunk configuration
+#[apply(all_engines)]
+fn decode_pad_mode_indifferent_padding_accepts_anything<E: EngineWrapper>(engine_wrapper: E) {
+    assert_all_suffixes_ok(
+        E::standard_with_pad_mode(true, DecodePaddingMode::Indifferent),
+        vec!["/w", "/w=", "/w==", "iYU", "iYU=", "AAAA"],
+    );
+}
+
+//this is a MAY in the rfc: https://tools.ietf.org/html/rfc4648#section-3.3
+#[apply(all_engines)]
+fn decode_pad_byte_in_penultimate_quad_error<E: EngineWrapper>(engine_wrapper: E) {
+    for mode in all_pad_modes() {
+        // we don't encode so we don't care about encode padding
+        let engine = E::standard_with_pad_mode(true, mode);
+
+        for num_prefix_quads in 0..256 {
+            // leave room for at least one pad byte in penultimate quad
+            for num_valid_bytes_penultimate_quad in 0..4 {
+                // can't have 1 or it would be invalid length
+                for num_pad_bytes_in_final_quad in 2..=4 {
+                    let mut s: String = "ABCD".repeat(num_prefix_quads);
+
+                    // varying amounts of padding in the penultimate quad
+                    for _ in 0..num_valid_bytes_penultimate_quad {
+                        s.push('A');
+                    }
+                    // finish penultimate quad with padding
+                    for _ in num_valid_bytes_penultimate_quad..4 {
+                        s.push('=');
+                    }
+                    // and more padding in the final quad
+                    for _ in 0..num_pad_bytes_in_final_quad {
+                        s.push('=');
+                    }
+
+                    // padding should be an invalid byte before the final quad.
+                    // Could argue that the *next* padding byte (in the next quad) is technically the first
+                    // erroneous one, but reporting that accurately is more complex and probably nobody cares
+                    assert_eq!(
+                        DecodeError::InvalidByte(
+                            num_prefix_quads * 4 + num_valid_bytes_penultimate_quad,
+                            b'=',
+                        ),
+                        engine.decode(&s).unwrap_err()
+                    );
+                }
+            }
+        }
+    }
+}
+
+#[apply(all_engines)]
+fn decode_bytes_after_padding_in_final_quad_error<E: EngineWrapper>(engine_wrapper: E) {
+    for mode in all_pad_modes() {
+        // we don't encode so we don't care about encode padding
+        let engine = E::standard_with_pad_mode(true, mode);
+
+        for num_prefix_quads in 0..256 {
+            // leave at least one byte in the quad for padding
+            for bytes_after_padding in 1..4 {
+                let mut s: String = "ABCD".repeat(num_prefix_quads);
+
+                // every invalid padding position with a 3-byte final quad: 1 to 3 bytes after padding
+                for _ in 0..(3 - bytes_after_padding) {
+                    s.push('A');
+                }
+                s.push('=');
+                for _ in 0..bytes_after_padding {
+                    s.push('A');
+                }
+
+                // First (and only) padding byte is invalid.
+                assert_eq!(
+                    DecodeError::InvalidByte(
+                        num_prefix_quads * 4 + (3 - bytes_after_padding),
+                        b'='
+                    ),
+                    engine.decode(&s).unwrap_err()
+                );
+            }
+        }
+    }
+}
+
+#[apply(all_engines)]
+fn decode_absurd_pad_error<E: EngineWrapper>(engine_wrapper: E) {
+    for mode in all_pad_modes() {
+        // we don't encode so we don't care about encode padding
+        let engine = E::standard_with_pad_mode(true, mode);
+
+        for num_prefix_quads in 0..256 {
+            let mut s: String = "ABCD".repeat(num_prefix_quads);
+            s.push_str("==Y=Wx===pY=2U=====");
+
+            // first padding byte
+            assert_eq!(
+                DecodeError::InvalidByte(num_prefix_quads * 4, b'='),
+                engine.decode(&s).unwrap_err()
+            );
+        }
+    }
+}
+
+#[apply(all_engines)]
+fn decode_too_much_padding_returns_error<E: EngineWrapper>(engine_wrapper: E) {
+    for mode in all_pad_modes() {
+        // we don't encode so we don't care about encode padding
+        let engine = E::standard_with_pad_mode(true, mode);
+
+        for num_prefix_quads in 0..256 {
+            // add enough padding to ensure that we'll hit all decode stages at the different lengths
+            for pad_bytes in 1..=64 {
+                let mut s: String = "ABCD".repeat(num_prefix_quads);
+                let padding: String = "=".repeat(pad_bytes);
+                s.push_str(&padding);
+
+                if pad_bytes % 4 == 1 {
+                    assert_eq!(DecodeError::InvalidLength, engine.decode(&s).unwrap_err());
+                } else {
+                    assert_eq!(
+                        DecodeError::InvalidByte(num_prefix_quads * 4, b'='),
+                        engine.decode(&s).unwrap_err()
+                    );
+                }
+            }
+        }
+    }
+}
+
+#[apply(all_engines)]
+fn decode_padding_followed_by_non_padding_returns_error<E: EngineWrapper>(engine_wrapper: E) {
+    for mode in all_pad_modes() {
+        // we don't encode so we don't care about encode padding
+        let engine = E::standard_with_pad_mode(true, mode);
+
+        for num_prefix_quads in 0..256 {
+            for pad_bytes in 0..=32 {
+                let mut s: String = "ABCD".repeat(num_prefix_quads);
+                let padding: String = "=".repeat(pad_bytes);
+                s.push_str(&padding);
+                s.push('E');
+
+                if pad_bytes % 4 == 0 {
+                    assert_eq!(DecodeError::InvalidLength, engine.decode(&s).unwrap_err());
+                } else {
+                    assert_eq!(
+                        DecodeError::InvalidByte(num_prefix_quads * 4, b'='),
+                        engine.decode(&s).unwrap_err()
+                    );
+                }
+            }
+        }
+    }
+}
+
+#[apply(all_engines)]
+fn decode_one_char_in_final_quad_with_padding_error<E: EngineWrapper>(engine_wrapper: E) {
+    for mode in all_pad_modes() {
+        // we don't encode so we don't care about encode padding
+        let engine = E::standard_with_pad_mode(true, mode);
+
+        for num_prefix_quads in 0..256 {
+            let mut s: String = "ABCD".repeat(num_prefix_quads);
+            s.push_str("E=");
+
+            assert_eq!(
+                DecodeError::InvalidByte(num_prefix_quads * 4 + 1, b'='),
+                engine.decode(&s).unwrap_err()
+            );
+
+            // more padding doesn't change the error
+            s.push('=');
+            assert_eq!(
+                DecodeError::InvalidByte(num_prefix_quads * 4 + 1, b'='),
+                engine.decode(&s).unwrap_err()
+            );
+
+            s.push('=');
+            assert_eq!(
+                DecodeError::InvalidByte(num_prefix_quads * 4 + 1, b'='),
+                engine.decode(&s).unwrap_err()
+            );
+        }
+    }
+}
+
+#[apply(all_engines)]
+fn decode_too_few_symbols_in_final_quad_error<E: EngineWrapper>(engine_wrapper: E) {
+    for mode in all_pad_modes() {
+        // we don't encode so we don't care about encode padding
+        let engine = E::standard_with_pad_mode(true, mode);
+
+        for num_prefix_quads in 0..256 {
+            // <2 is invalid
+            for final_quad_symbols in 0..2 {
+                for padding_symbols in 0..=(4 - final_quad_symbols) {
+                    let mut s: String = "ABCD".repeat(num_prefix_quads);
+
+                    for _ in 0..final_quad_symbols {
+                        s.push('A');
+                    }
+                    for _ in 0..padding_symbols {
+                        s.push('=');
+                    }
+
+                    match final_quad_symbols + padding_symbols {
+                        0 => continue,
+                        1 => {
+                            assert_eq!(DecodeError::InvalidLength, engine.decode(&s).unwrap_err());
+                        }
+                        _ => {
+                            // error reported at first padding byte
+                            assert_eq!(
+                                DecodeError::InvalidByte(
+                                    num_prefix_quads * 4 + final_quad_symbols,
+                                    b'=',
+                                ),
+                                engine.decode(&s).unwrap_err()
+                            );
+                        }
+                    }
+                }
+            }
+        }
+    }
+}
+
+#[apply(all_engines)]
+fn decode_invalid_trailing_bytes<E: EngineWrapper>(engine_wrapper: E) {
+    for mode in all_pad_modes() {
+        // we don't encode so we don't care about encode padding
+        let engine = E::standard_with_pad_mode(true, mode);
+
+        for num_prefix_quads in 0..256 {
+            let mut s: String = "ABCD".repeat(num_prefix_quads);
+            s.push_str("Cg==\n");
+
+            // The case of trailing newlines is common enough to warrant a test for a good error
+            // message.
+            assert_eq!(
+                Err(DecodeError::InvalidByte(num_prefix_quads * 4 + 4, b'\n')),
+                engine.decode(&s)
+            );
+
+            // extra padding, however, is still InvalidLength
+            let s = s.replace('\n', "=");
+            assert_eq!(Err(DecodeError::InvalidLength), engine.decode(s));
+        }
+    }
+}
+
+#[apply(all_engines)]
+fn decode_wrong_length_error<E: EngineWrapper>(engine_wrapper: E) {
+    let engine = E::standard_with_pad_mode(true, DecodePaddingMode::Indifferent);
+
+    for num_prefix_quads in 0..256 {
+        // at least one token, otherwise it wouldn't be a final quad
+        for num_tokens_final_quad in 1..=4 {
+            for num_padding in 0..=(4 - num_tokens_final_quad) {
+                let mut s: String = "IIII".repeat(num_prefix_quads);
+                for _ in 0..num_tokens_final_quad {
+                    s.push('g');
+                }
+                for _ in 0..num_padding {
+                    s.push('=');
+                }
+
+                let res = engine.decode(&s);
+                if num_tokens_final_quad >= 2 {
+                    assert!(res.is_ok());
+                } else if num_tokens_final_quad == 1 && num_padding > 0 {
+                    // = is invalid if it's too early
+                    assert_eq!(
+                        Err(DecodeError::InvalidByte(
+                            num_prefix_quads * 4 + num_tokens_final_quad,
+                            61
+                        )),
+                        res
+                    );
+                } else if num_padding > 2 {
+                    assert_eq!(Err(DecodeError::InvalidPadding), res);
+                } else {
+                    assert_eq!(Err(DecodeError::InvalidLength), res);
+                }
+            }
+        }
+    }
+}
+
+#[apply(all_engines)]
+fn decode_into_slice_fits_in_precisely_sized_slice<E: EngineWrapper>(engine_wrapper: E) {
+    let mut orig_data = Vec::new();
+    let mut encoded_data = String::new();
+    let mut decode_buf = Vec::new();
+
+    let input_len_range = distributions::Uniform::new(0, 1000);
+    let mut rng = rngs::SmallRng::from_entropy();
+
+    for _ in 0..10_000 {
+        orig_data.clear();
+        encoded_data.clear();
+        decode_buf.clear();
+
+        let input_len = input_len_range.sample(&mut rng);
+
+        for _ in 0..input_len {
+            orig_data.push(rng.gen());
+        }
+
+        let engine = E::random(&mut rng);
+        engine.encode_string(&orig_data, &mut encoded_data);
+        assert_encode_sanity(&encoded_data, engine.config().encode_padding(), input_len);
+
+        decode_buf.resize(input_len, 0);
+
+        // decode into the non-empty buf
+        let decode_bytes_written = engine
+            .decode_slice_unchecked(encoded_data.as_bytes(), &mut decode_buf[..])
+            .unwrap();
+
+        assert_eq!(orig_data.len(), decode_bytes_written);
+        assert_eq!(orig_data, decode_buf);
+    }
+}
+
+#[apply(all_engines)]
+fn decode_length_estimate_delta<E: EngineWrapper>(engine_wrapper: E) {
+    for engine in [E::standard(), E::standard_unpadded()] {
+        for &padding in &[true, false] {
+            for orig_len in 0..1000 {
+                let encoded_len = encoded_len(orig_len, padding).unwrap();
+
+                let decoded_estimate = engine
+                    .internal_decoded_len_estimate(encoded_len)
+                    .decoded_len_estimate();
+                assert!(decoded_estimate >= orig_len);
+                assert!(
+                    decoded_estimate - orig_len < 3,
+                    "estimate: {}, encoded: {}, orig: {}",
+                    decoded_estimate,
+                    encoded_len,
+                    orig_len
+                );
+            }
+        }
+    }
+}
+
+/// Returns a tuple of the original data length, the encoded data length (just data), and the length including padding.
+///
+/// Vecs provided should be empty.
+fn generate_random_encoded_data<E: Engine, R: rand::Rng, D: distributions::Distribution<usize>>(
+    engine: &E,
+    orig_data: &mut Vec<u8>,
+    encode_buf: &mut Vec<u8>,
+    rng: &mut R,
+    length_distribution: &D,
+) -> (usize, usize, usize) {
+    let padding: bool = engine.config().encode_padding();
+
+    let orig_len = fill_rand(orig_data, rng, length_distribution);
+    let expected_encoded_len = encoded_len(orig_len, padding).unwrap();
+    encode_buf.resize(expected_encoded_len, 0);
+
+    let base_encoded_len = engine.internal_encode(&orig_data[..], &mut encode_buf[..]);
+
+    let enc_len_with_padding = if padding {
+        base_encoded_len + add_padding(orig_len, &mut encode_buf[base_encoded_len..])
+    } else {
+        base_encoded_len
+    };
+
+    assert_eq!(expected_encoded_len, enc_len_with_padding);
+
+    (orig_len, base_encoded_len, enc_len_with_padding)
+}
+
+// fill to a random length
+fn fill_rand<R: rand::Rng, D: distributions::Distribution<usize>>(
+    vec: &mut Vec<u8>,
+    rng: &mut R,
+    length_distribution: &D,
+) -> usize {
+    let len = length_distribution.sample(rng);
+    for _ in 0..len {
+        vec.push(rng.gen());
+    }
+
+    len
+}
+
+fn fill_rand_len<R: rand::Rng>(vec: &mut Vec<u8>, rng: &mut R, len: usize) {
+    for _ in 0..len {
+        vec.push(rng.gen());
+    }
+}
+
+fn prefixed_data<'i, 'd>(
+    input_with_prefix: &'i mut String,
+    prefix_len: usize,
+    data: &'d str,
+) -> &'i str {
+    input_with_prefix.truncate(prefix_len);
+    input_with_prefix.push_str(data);
+    input_with_prefix.as_str()
+}
+
+/// A wrapper to make using engines in rstest fixtures easier.
+/// The functions don't need to be instance methods, but rstest does seem
+/// to want an instance, so instances are passed to test functions and then ignored.
+trait EngineWrapper {
+    type Engine: Engine;
+
+    /// Return an engine configured for RFC standard base64
+    fn standard() -> Self::Engine;
+
+    /// Return an engine configured for RFC standard base64, except with no padding appended on
+    /// encode, and required no padding on decode.
+    fn standard_unpadded() -> Self::Engine;
+
+    /// Return an engine configured for RFC standard alphabet with the provided encode and decode
+    /// pad settings
+    fn standard_with_pad_mode(encode_pad: bool, decode_pad_mode: DecodePaddingMode)
+        -> Self::Engine;
+
+    /// Return an engine configured for RFC standard base64 that allows invalid trailing bits
+    fn standard_allow_trailing_bits() -> Self::Engine;
+
+    /// Return an engine configured with a randomized alphabet and config
+    fn random<R: rand::Rng>(rng: &mut R) -> Self::Engine;
+
+    /// Return an engine configured with the specified alphabet and randomized config
+    fn random_alphabet<R: rand::Rng>(rng: &mut R, alphabet: &Alphabet) -> Self::Engine;
+}
+
+struct GeneralPurposeWrapper {}
+
+impl EngineWrapper for GeneralPurposeWrapper {
+    type Engine = general_purpose::GeneralPurpose;
+
+    fn standard() -> Self::Engine {
+        general_purpose::GeneralPurpose::new(&STANDARD, general_purpose::PAD)
+    }
+
+    fn standard_unpadded() -> Self::Engine {
+        general_purpose::GeneralPurpose::new(&STANDARD, general_purpose::NO_PAD)
+    }
+
+    fn standard_with_pad_mode(
+        encode_pad: bool,
+        decode_pad_mode: DecodePaddingMode,
+    ) -> Self::Engine {
+        general_purpose::GeneralPurpose::new(
+            &STANDARD,
+            general_purpose::GeneralPurposeConfig::new()
+                .with_encode_padding(encode_pad)
+                .with_decode_padding_mode(decode_pad_mode),
+        )
+    }
+
+    fn standard_allow_trailing_bits() -> Self::Engine {
+        general_purpose::GeneralPurpose::new(
+            &STANDARD,
+            general_purpose::GeneralPurposeConfig::new().with_decode_allow_trailing_bits(true),
+        )
+    }
+
+    fn random<R: rand::Rng>(rng: &mut R) -> Self::Engine {
+        let alphabet = random_alphabet(rng);
+
+        Self::random_alphabet(rng, alphabet)
+    }
+
+    fn random_alphabet<R: rand::Rng>(rng: &mut R, alphabet: &Alphabet) -> Self::Engine {
+        general_purpose::GeneralPurpose::new(alphabet, random_config(rng))
+    }
+}
+
+struct NaiveWrapper {}
+
+impl EngineWrapper for NaiveWrapper {
+    type Engine = naive::Naive;
+
+    fn standard() -> Self::Engine {
+        naive::Naive::new(
+            &STANDARD,
+            naive::NaiveConfig {
+                encode_padding: true,
+                decode_allow_trailing_bits: false,
+                decode_padding_mode: DecodePaddingMode::RequireCanonical,
+            },
+        )
+    }
+
+    fn standard_unpadded() -> Self::Engine {
+        naive::Naive::new(
+            &STANDARD,
+            naive::NaiveConfig {
+                encode_padding: false,
+                decode_allow_trailing_bits: false,
+                decode_padding_mode: DecodePaddingMode::RequireNone,
+            },
+        )
+    }
+
+    fn standard_with_pad_mode(
+        encode_pad: bool,
+        decode_pad_mode: DecodePaddingMode,
+    ) -> Self::Engine {
+        naive::Naive::new(
+            &STANDARD,
+            naive::NaiveConfig {
+                encode_padding: false,
+                decode_allow_trailing_bits: false,
+                decode_padding_mode: decode_pad_mode,
+            },
+        )
+    }
+
+    fn standard_allow_trailing_bits() -> Self::Engine {
+        naive::Naive::new(
+            &STANDARD,
+            naive::NaiveConfig {
+                encode_padding: true,
+                decode_allow_trailing_bits: true,
+                decode_padding_mode: DecodePaddingMode::RequireCanonical,
+            },
+        )
+    }
+
+    fn random<R: rand::Rng>(rng: &mut R) -> Self::Engine {
+        let alphabet = random_alphabet(rng);
+
+        Self::random_alphabet(rng, alphabet)
+    }
+
+    fn random_alphabet<R: rand::Rng>(rng: &mut R, alphabet: &Alphabet) -> Self::Engine {
+        let mode = rng.gen();
+
+        let config = naive::NaiveConfig {
+            encode_padding: match mode {
+                DecodePaddingMode::Indifferent => rng.gen(),
+                DecodePaddingMode::RequireCanonical => true,
+                DecodePaddingMode::RequireNone => false,
+            },
+            decode_allow_trailing_bits: rng.gen(),
+            decode_padding_mode: mode,
+        };
+
+        naive::Naive::new(alphabet, config)
+    }
+}
+
+fn seeded_rng() -> impl rand::Rng {
+    rngs::SmallRng::from_entropy()
+}
+
+fn all_pad_modes() -> Vec<DecodePaddingMode> {
+    vec![
+        DecodePaddingMode::Indifferent,
+        DecodePaddingMode::RequireCanonical,
+        DecodePaddingMode::RequireNone,
+    ]
+}
+
+fn assert_all_suffixes_ok<E: Engine>(engine: E, suffixes: Vec<&str>) {
+    for num_prefix_quads in 0..256 {
+        for &suffix in suffixes.iter() {
+            let mut encoded = "AAAA".repeat(num_prefix_quads);
+            encoded.push_str(suffix);
+
+            let res = &engine.decode(&encoded);
+            assert!(res.is_ok());
+        }
+    }
+}

+ 120 - 186
zeroidc/vendor/base64/src/lib.rs

@@ -1,61 +1,123 @@
-//! # Configs
+//! # Getting started
 //!
-//! There isn't just one type of Base64; that would be too simple. You need to choose a character
-//! set (standard, URL-safe, etc) and padding suffix (yes/no).
-//! The `Config` struct encapsulates this info. There are some common configs included: `STANDARD`,
-//! `URL_SAFE`, etc. You can also make your own `Config` if needed.
+//! 1. Perhaps one of the preconfigured engines in [engine::general_purpose] will suit, e.g.
+//! [engine::general_purpose::STANDARD_NO_PAD].
+//!     - These are re-exported in [prelude] with a `BASE64_` prefix for those who prefer to
+//!       `use base64::prelude::*` or equivalent, e.g. [prelude::BASE64_STANDARD_NO_PAD]
+//! 1. If not, choose which alphabet you want. Most usage will want [alphabet::STANDARD] or [alphabet::URL_SAFE].
+//! 1. Choose which [Engine] implementation you want. For the moment there is only one: [engine::GeneralPurpose].
+//! 1. Configure the engine appropriately using the engine's `Config` type.
+//!     - This is where you'll select whether to add padding (when encoding) or expect it (when
+//!     decoding). If given the choice, prefer no padding.
+//! 1. Build the engine using the selected alphabet and config.
 //!
-//! The functions that don't have `config` in the name (e.g. `encode()` and `decode()`) use the
-//! `STANDARD` config .
+//! For more detail, see below.
 //!
-//! The functions that write to a slice (the ones that end in `_slice`) are generally the fastest
-//! because they don't need to resize anything. If it fits in your workflow and you care about
-//! performance, keep using the same buffer (growing as need be) and use the `_slice` methods for
-//! the best performance.
+//! ## Alphabets
+//!
+//! An [alphabet::Alphabet] defines what ASCII symbols are used to encode to or decode from.
+//!
+//! Constants in [alphabet] like [alphabet::STANDARD] or [alphabet::URL_SAFE] provide commonly used
+//! alphabets, but you can also build your own custom [alphabet::Alphabet] if needed.
+//!
+//! ## Engines
+//!
+//! Once you have an `Alphabet`, you can pick which `Engine` you want. A few parts of the public
+//! API provide a default, but otherwise the user must provide an `Engine` to use.
+//!
+//! See [Engine] for more.
+//!
+//! ## Config
+//!
+//! In addition to an `Alphabet`, constructing an `Engine` also requires an [engine::Config]. Each
+//! `Engine` has a corresponding `Config` implementation since different `Engine`s may offer different
+//! levels of configurability.
 //!
 //! # Encoding
 //!
-//! Several different encoding functions are available to you depending on your desire for
+//! Several different encoding methods on [Engine] are available to you depending on your desire for
 //! convenience vs performance.
 //!
-//! | Function                | Output                       | Allocates                      |
-//! | ----------------------- | ---------------------------- | ------------------------------ |
-//! | `encode`                | Returns a new `String`       | Always                         |
-//! | `encode_config`         | Returns a new `String`       | Always                         |
-//! | `encode_config_buf`     | Appends to provided `String` | Only if `String` needs to grow |
-//! | `encode_config_slice`   | Writes to provided `&[u8]`   | Never                          |
+//! | Method                   | Output                       | Allocates                      |
+//! | ------------------------ | ---------------------------- | ------------------------------ |
+//! | [Engine::encode]         | Returns a new `String`       | Always                         |
+//! | [Engine::encode_string]  | Appends to provided `String` | Only if `String` needs to grow |
+//! | [Engine::encode_slice]   | Writes to provided `&[u8]`   | Never - fastest                |
 //!
-//! All of the encoding functions that take a `Config` will pad as per the config.
+//! All of the encoding methods will pad as per the engine's config.
 //!
 //! # Decoding
 //!
-//! Just as for encoding, there are different decoding functions available.
+//! Just as for encoding, there are different decoding methods available.
 //!
-//! | Function                | Output                        | Allocates                      |
-//! | ----------------------- | ----------------------------- | ------------------------------ |
-//! | `decode`                | Returns a new `Vec<u8>`       | Always                         |
-//! | `decode_config`         | Returns a new `Vec<u8>`       | Always                         |
-//! | `decode_config_buf`     | Appends to provided `Vec<u8>` | Only if `Vec` needs to grow    |
-//! | `decode_config_slice`   | Writes to provided `&[u8]`    | Never                          |
+//! | Method                   | Output                        | Allocates                      |
+//! | ------------------------ | ----------------------------- | ------------------------------ |
+//! | [Engine::decode]         | Returns a new `Vec<u8>`       | Always                         |
+//! | [Engine::decode_vec]     | Appends to provided `Vec<u8>` | Only if `Vec` needs to grow    |
+//! | [Engine::decode_slice]   | Writes to provided `&[u8]`    | Never - fastest                |
 //!
-//! Unlike encoding, where all possible input is valid, decoding can fail (see `DecodeError`).
+//! Unlike encoding, where all possible input is valid, decoding can fail (see [DecodeError]).
 //!
-//! Input can be invalid because it has invalid characters or invalid padding. (No padding at all is
-//! valid, but excess padding is not.) Whitespace in the input is invalid.
+//! Input can be invalid because it has invalid characters or invalid padding. The nature of how
+//! padding is checked depends on the engine's config.
+//! Whitespace in the input is invalid, just like any other non-base64 byte.
 //!
 //! # `Read` and `Write`
 //!
-//! To map a `Read` of b64 bytes to the decoded bytes, wrap a reader (file, network socket, etc)
-//! with `base64::read::DecoderReader`. To write raw bytes and have them b64 encoded on the fly,
-//! wrap a writer with `base64::write::EncoderWriter`. There is some performance overhead (15% or
-//! so) because of the necessary buffer shuffling -- still fast enough that almost nobody cares.
-//! Also, these implementations do not heap allocate.
+//! To decode a [std::io::Read] of b64 bytes, wrap a reader (file, network socket, etc) with
+//! [read::DecoderReader].
+//!
+//! To write raw bytes and have them b64 encoded on the fly, wrap a [std::io::Write] with
+//! [write::EncoderWriter].
+//!
+//! There is some performance overhead (15% or so) because of the necessary buffer shuffling --
+//! still fast enough that almost nobody cares. Also, these implementations do not heap allocate.
+//!
+//! # `Display`
+//!
+//! See [display] for how to transparently base64 data via a `Display` implementation.
+//!
+//! # Examples
+//!
+//! ## Using predefined engines
+//!
+//! ```
+//! use base64::{Engine as _, engine::general_purpose};
+//!
+//! let orig = b"data";
+//! let encoded: String = general_purpose::STANDARD_NO_PAD.encode(orig);
+//! assert_eq!("ZGF0YQ", encoded);
+//! assert_eq!(orig.as_slice(), &general_purpose::STANDARD_NO_PAD.decode(encoded).unwrap());
+//!
+//! // or, URL-safe
+//! let encoded_url = general_purpose::URL_SAFE_NO_PAD.encode(orig);
+//! ```
+//!
+//! ## Custom alphabet, config, and engine
+//!
+//! ```
+//! use base64::{engine, alphabet, Engine as _};
+//!
+//! // bizarro-world base64: +/ as the first symbols instead of the last
+//! let alphabet =
+//!     alphabet::Alphabet::new("+/ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789")
+//!     .unwrap();
+//!
+//! // a very weird config that encodes with padding but requires no padding when decoding...?
+//! let crazy_config = engine::GeneralPurposeConfig::new()
+//!     .with_decode_allow_trailing_bits(true)
+//!     .with_encode_padding(true)
+//!     .with_decode_padding_mode(engine::DecodePaddingMode::RequireNone);
+//!
+//! let crazy_engine = engine::GeneralPurpose::new(&alphabet, crazy_config);
+//!
+//! let encoded = crazy_engine.encode(b"abc 123");
+//!
+//! ```
 //!
 //! # Panics
 //!
 //! If length calculations result in overflowing `usize`, a panic will result.
-//!
-//! The `_slice` flavors of encode or decode will panic if the provided output slice is too small,
 
 #![cfg_attr(feature = "cargo-clippy", allow(clippy::cast_lossless))]
 #![deny(
@@ -69,6 +131,9 @@
     warnings
 )]
 #![forbid(unsafe_code)]
+// Allow globally until https://github.com/rust-lang/rust-clippy/issues/8768 is resolved.
+// The desired state is to allow it only for the rstest_reuse import.
+#![allow(clippy::single_component_path_imports)]
 #![cfg_attr(not(any(feature = "std", test)), no_std)]
 
 #[cfg(all(feature = "alloc", not(any(feature = "std", test))))]
@@ -76,170 +141,39 @@ extern crate alloc;
 #[cfg(any(feature = "std", test))]
 extern crate std as alloc;
 
+// has to be included at top level because of the way rstest_reuse defines its macros
+#[cfg(test)]
+use rstest_reuse;
+
 mod chunked_encoder;
 pub mod display;
 #[cfg(any(feature = "std", test))]
 pub mod read;
-mod tables;
 #[cfg(any(feature = "std", test))]
 pub mod write;
 
+pub mod engine;
+pub use engine::Engine;
+
+pub mod alphabet;
+
 mod encode;
-pub use crate::encode::encode_config_slice;
+#[allow(deprecated)]
 #[cfg(any(feature = "alloc", feature = "std", test))]
-pub use crate::encode::{encode, encode_config, encode_config_buf};
+pub use crate::encode::{encode, encode_engine, encode_engine_string};
+#[allow(deprecated)]
+pub use crate::encode::{encode_engine_slice, encoded_len, EncodeSliceError};
 
 mod decode;
+#[allow(deprecated)]
 #[cfg(any(feature = "alloc", feature = "std", test))]
-pub use crate::decode::{decode, decode_config, decode_config_buf};
-pub use crate::decode::{decode_config_slice, DecodeError};
+pub use crate::decode::{decode, decode_engine, decode_engine_vec};
+#[allow(deprecated)]
+pub use crate::decode::{decode_engine_slice, decoded_len_estimate, DecodeError, DecodeSliceError};
+
+pub mod prelude;
 
 #[cfg(test)]
 mod tests;
 
-/// Available encoding character sets
-#[derive(Clone, Copy, Debug)]
-pub enum CharacterSet {
-    /// The standard character set (uses `+` and `/`).
-    ///
-    /// See [RFC 3548](https://tools.ietf.org/html/rfc3548#section-3).
-    Standard,
-    /// The URL safe character set (uses `-` and `_`).
-    ///
-    /// See [RFC 3548](https://tools.ietf.org/html/rfc3548#section-4).
-    UrlSafe,
-    /// The `crypt(3)` character set (uses `./0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz`).
-    ///
-    /// Not standardized, but folk wisdom on the net asserts that this alphabet is what crypt uses.
-    Crypt,
-    /// The bcrypt character set (uses `./ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789`).
-    Bcrypt,
-    /// The character set used in IMAP-modified UTF-7 (uses `+` and `,`).
-    ///
-    /// See [RFC 3501](https://tools.ietf.org/html/rfc3501#section-5.1.3)
-    ImapMutf7,
-    /// The character set used in BinHex 4.0 files.
-    ///
-    /// See [BinHex 4.0 Definition](http://files.stairways.com/other/binhex-40-specs-info.txt)
-    BinHex,
-}
-
-impl CharacterSet {
-    fn encode_table(self) -> &'static [u8; 64] {
-        match self {
-            CharacterSet::Standard => tables::STANDARD_ENCODE,
-            CharacterSet::UrlSafe => tables::URL_SAFE_ENCODE,
-            CharacterSet::Crypt => tables::CRYPT_ENCODE,
-            CharacterSet::Bcrypt => tables::BCRYPT_ENCODE,
-            CharacterSet::ImapMutf7 => tables::IMAP_MUTF7_ENCODE,
-            CharacterSet::BinHex => tables::BINHEX_ENCODE,
-        }
-    }
-
-    fn decode_table(self) -> &'static [u8; 256] {
-        match self {
-            CharacterSet::Standard => tables::STANDARD_DECODE,
-            CharacterSet::UrlSafe => tables::URL_SAFE_DECODE,
-            CharacterSet::Crypt => tables::CRYPT_DECODE,
-            CharacterSet::Bcrypt => tables::BCRYPT_DECODE,
-            CharacterSet::ImapMutf7 => tables::IMAP_MUTF7_DECODE,
-            CharacterSet::BinHex => tables::BINHEX_DECODE,
-        }
-    }
-}
-
-/// Contains configuration parameters for base64 encoding
-#[derive(Clone, Copy, Debug)]
-pub struct Config {
-    /// Character set to use
-    char_set: CharacterSet,
-    /// True to pad output with `=` characters
-    pad: bool,
-    /// True to ignore excess nonzero bits in the last few symbols, otherwise an error is returned.
-    decode_allow_trailing_bits: bool,
-}
-
-impl Config {
-    /// Create a new `Config`.
-    pub const fn new(char_set: CharacterSet, pad: bool) -> Config {
-        Config {
-            char_set,
-            pad,
-            decode_allow_trailing_bits: false,
-        }
-    }
-
-    /// Sets whether to pad output with `=` characters.
-    pub const fn pad(self, pad: bool) -> Config {
-        Config { pad, ..self }
-    }
-
-    /// Sets whether to emit errors for nonzero trailing bits.
-    ///
-    /// This is useful when implementing
-    /// [forgiving-base64 decode](https://infra.spec.whatwg.org/#forgiving-base64-decode).
-    pub const fn decode_allow_trailing_bits(self, allow: bool) -> Config {
-        Config {
-            decode_allow_trailing_bits: allow,
-            ..self
-        }
-    }
-}
-
-/// Standard character set with padding.
-pub const STANDARD: Config = Config {
-    char_set: CharacterSet::Standard,
-    pad: true,
-    decode_allow_trailing_bits: false,
-};
-
-/// Standard character set without padding.
-pub const STANDARD_NO_PAD: Config = Config {
-    char_set: CharacterSet::Standard,
-    pad: false,
-    decode_allow_trailing_bits: false,
-};
-
-/// URL-safe character set with padding
-pub const URL_SAFE: Config = Config {
-    char_set: CharacterSet::UrlSafe,
-    pad: true,
-    decode_allow_trailing_bits: false,
-};
-
-/// URL-safe character set without padding
-pub const URL_SAFE_NO_PAD: Config = Config {
-    char_set: CharacterSet::UrlSafe,
-    pad: false,
-    decode_allow_trailing_bits: false,
-};
-
-/// As per `crypt(3)` requirements
-pub const CRYPT: Config = Config {
-    char_set: CharacterSet::Crypt,
-    pad: false,
-    decode_allow_trailing_bits: false,
-};
-
-/// Bcrypt character set
-pub const BCRYPT: Config = Config {
-    char_set: CharacterSet::Bcrypt,
-    pad: false,
-    decode_allow_trailing_bits: false,
-};
-
-/// IMAP modified UTF-7 requirements
-pub const IMAP_MUTF7: Config = Config {
-    char_set: CharacterSet::ImapMutf7,
-    pad: false,
-    decode_allow_trailing_bits: false,
-};
-
-/// BinHex character set
-pub const BINHEX: Config = Config {
-    char_set: CharacterSet::BinHex,
-    pad: false,
-    decode_allow_trailing_bits: false,
-};
-
 const PAD_BYTE: u8 = b'=';

+ 19 - 0
zeroidc/vendor/base64/src/prelude.rs

@@ -0,0 +1,19 @@
+//! Preconfigured engines for common use cases.
+//!
+//! These are re-exports of `const` engines in [crate::engine::general_purpose], renamed with a `BASE64_`
+//! prefix for those who prefer to `use` the entire path to a name.
+//!
+//! # Examples
+//!
+//! ```
+//! use base64::prelude::{Engine as _, BASE64_STANDARD_NO_PAD};
+//!
+//! assert_eq!("c29tZSBieXRlcw", &BASE64_STANDARD_NO_PAD.encode(b"some bytes"));
+//! ```
+
+pub use crate::engine::Engine;
+
+pub use crate::engine::general_purpose::STANDARD as BASE64_STANDARD;
+pub use crate::engine::general_purpose::STANDARD_NO_PAD as BASE64_STANDARD_NO_PAD;
+pub use crate::engine::general_purpose::URL_SAFE as BASE64_URL_SAFE;
+pub use crate::engine::general_purpose::URL_SAFE_NO_PAD as BASE64_URL_SAFE_NO_PAD;

+ 47 - 34
zeroidc/vendor/base64/src/read/decoder.rs

@@ -1,5 +1,4 @@
-use crate::{decode_config_slice, Config, DecodeError};
-use std::io::Read;
+use crate::{engine::Engine, DecodeError};
 use std::{cmp, fmt, io};
 
 // This should be large, but it has to fit on the stack.
@@ -16,11 +15,13 @@ const DECODED_CHUNK_SIZE: usize = 3;
 /// ```
 /// use std::io::Read;
 /// use std::io::Cursor;
+/// use base64::engine::general_purpose;
 ///
 /// // use a cursor as the simplest possible `Read` -- in real code this is probably a file, etc.
 /// let mut wrapped_reader = Cursor::new(b"YXNkZg==");
 /// let mut decoder = base64::read::DecoderReader::new(
-///     &mut wrapped_reader, base64::STANDARD);
+///     &mut wrapped_reader,
+///     &general_purpose::STANDARD);
 ///
 /// // handle errors as you normally would
 /// let mut result = Vec::new();
@@ -29,10 +30,10 @@ const DECODED_CHUNK_SIZE: usize = 3;
 /// assert_eq!(b"asdf", &result[..]);
 ///
 /// ```
-pub struct DecoderReader<'a, R: 'a + io::Read> {
-    config: Config,
+pub struct DecoderReader<'e, E: Engine, R: io::Read> {
+    engine: &'e E,
     /// Where b64 data is read from
-    r: &'a mut R,
+    inner: R,
 
     // Holds b64 data read from the delegate reader.
     b64_buffer: [u8; BUF_SIZE],
@@ -54,10 +55,9 @@ pub struct DecoderReader<'a, R: 'a + io::Read> {
     total_b64_decoded: usize,
 }
 
-impl<'a, R: io::Read> fmt::Debug for DecoderReader<'a, R> {
+impl<'e, E: Engine, R: io::Read> fmt::Debug for DecoderReader<'e, E, R> {
     fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
         f.debug_struct("DecoderReader")
-            .field("config", &self.config)
             .field("b64_offset", &self.b64_offset)
             .field("b64_len", &self.b64_len)
             .field("decoded_buffer", &self.decoded_buffer)
@@ -68,12 +68,12 @@ impl<'a, R: io::Read> fmt::Debug for DecoderReader<'a, R> {
     }
 }
 
-impl<'a, R: io::Read> DecoderReader<'a, R> {
+impl<'e, E: Engine, R: io::Read> DecoderReader<'e, E, R> {
     /// Create a new decoder that will read from the provided reader `r`.
-    pub fn new(r: &'a mut R, config: Config) -> Self {
+    pub fn new(reader: R, engine: &'e E) -> Self {
         DecoderReader {
-            config,
-            r,
+            engine,
+            inner: reader,
             b64_buffer: [0; BUF_SIZE],
             b64_offset: 0,
             b64_len: 0,
@@ -89,7 +89,7 @@ impl<'a, R: io::Read> DecoderReader<'a, R> {
     /// Returns a Result with the number of (decoded) bytes copied.
     fn flush_decoded_buf(&mut self, buf: &mut [u8]) -> io::Result<usize> {
         debug_assert!(self.decoded_len > 0);
-        debug_assert!(buf.len() > 0);
+        debug_assert!(!buf.is_empty());
 
         let copy_len = cmp::min(self.decoded_len, buf.len());
         debug_assert!(copy_len > 0);
@@ -114,13 +114,13 @@ impl<'a, R: io::Read> DecoderReader<'a, R> {
         debug_assert!(self.b64_offset + self.b64_len < BUF_SIZE);
 
         let read = self
-            .r
+            .inner
             .read(&mut self.b64_buffer[self.b64_offset + self.b64_len..])?;
         self.b64_len += read;
 
         debug_assert!(self.b64_offset + self.b64_len <= BUF_SIZE);
 
-        return Ok(read);
+        Ok(read)
     }
 
     /// Decode the requested number of bytes from the b64 buffer into the provided buffer. It's the
@@ -130,23 +130,26 @@ impl<'a, R: io::Read> DecoderReader<'a, R> {
     fn decode_to_buf(&mut self, num_bytes: usize, buf: &mut [u8]) -> io::Result<usize> {
         debug_assert!(self.b64_len >= num_bytes);
         debug_assert!(self.b64_offset + self.b64_len <= BUF_SIZE);
-        debug_assert!(buf.len() > 0);
-
-        let decoded = decode_config_slice(
-            &self.b64_buffer[self.b64_offset..self.b64_offset + num_bytes],
-            self.config,
-            &mut buf[..],
-        )
-        .map_err(|e| match e {
-            DecodeError::InvalidByte(offset, byte) => {
-                DecodeError::InvalidByte(self.total_b64_decoded + offset, byte)
-            }
-            DecodeError::InvalidLength => DecodeError::InvalidLength,
-            DecodeError::InvalidLastSymbol(offset, byte) => {
-                DecodeError::InvalidLastSymbol(self.total_b64_decoded + offset, byte)
-            }
-        })
-        .map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e))?;
+        debug_assert!(!buf.is_empty());
+
+        let decoded = self
+            .engine
+            .internal_decode(
+                &self.b64_buffer[self.b64_offset..self.b64_offset + num_bytes],
+                buf,
+                self.engine.internal_decoded_len_estimate(num_bytes),
+            )
+            .map_err(|e| match e {
+                DecodeError::InvalidByte(offset, byte) => {
+                    DecodeError::InvalidByte(self.total_b64_decoded + offset, byte)
+                }
+                DecodeError::InvalidLength => DecodeError::InvalidLength,
+                DecodeError::InvalidLastSymbol(offset, byte) => {
+                    DecodeError::InvalidLastSymbol(self.total_b64_decoded + offset, byte)
+                }
+                DecodeError::InvalidPadding => DecodeError::InvalidPadding,
+            })
+            .map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e))?;
 
         self.total_b64_decoded += num_bytes;
         self.b64_offset += num_bytes;
@@ -156,9 +159,19 @@ impl<'a, R: io::Read> DecoderReader<'a, R> {
 
         Ok(decoded)
     }
+
+    /// Unwraps this `DecoderReader`, returning the base reader which it reads base64 encoded
+    /// input from.
+    ///
+    /// Because `DecoderReader` performs internal buffering, the state of the inner reader is
+    /// unspecified. This function is mainly provided because the inner reader type may provide
+    /// additional functionality beyond the `Read` implementation which may still be useful.
+    pub fn into_inner(self) -> R {
+        self.inner
+    }
 }
 
-impl<'a, R: Read> Read for DecoderReader<'a, R> {
+impl<'e, E: Engine, R: io::Read> io::Read for DecoderReader<'e, E, R> {
     /// Decode input from the wrapped reader.
     ///
     /// Under non-error circumstances, this returns `Ok` with the value being the number of bytes
@@ -172,7 +185,7 @@ impl<'a, R: Read> Read for DecoderReader<'a, R> {
     /// Any errors emitted by the delegate reader are returned. Decoding errors due to invalid
     /// base64 are also possible, and will have `io::ErrorKind::InvalidData`.
     fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
-        if buf.len() == 0 {
+        if buf.is_empty() {
             return Ok(0);
         }
 

+ 52 - 41
zeroidc/vendor/base64/src/read/decoder_tests.rs

@@ -1,12 +1,17 @@
-use std::io::{self, Read};
+use std::{
+    cmp,
+    io::{self, Read as _},
+    iter,
+};
 
-use rand::{Rng, RngCore};
-use std::{cmp, iter};
+use rand::{Rng as _, RngCore as _};
 
 use super::decoder::{DecoderReader, BUF_SIZE};
-use crate::encode::encode_config_buf;
-use crate::tests::random_config;
-use crate::{decode_config_buf, DecodeError, STANDARD};
+use crate::{
+    engine::{general_purpose::STANDARD, Engine, GeneralPurpose},
+    tests::{random_alphabet, random_config, random_engine},
+    DecodeError,
+};
 
 #[test]
 fn simple() {
@@ -27,7 +32,7 @@ fn simple() {
         // Read n bytes at a time.
         for n in 1..base64data.len() + 1 {
             let mut wrapped_reader = io::Cursor::new(base64data);
-            let mut decoder = DecoderReader::new(&mut wrapped_reader, STANDARD);
+            let mut decoder = DecoderReader::new(&mut wrapped_reader, &STANDARD);
 
             // handle errors as you normally would
             let mut text_got = Vec::new();
@@ -59,7 +64,7 @@ fn trailing_junk() {
         // Read n bytes at a time.
         for n in 1..base64data.len() + 1 {
             let mut wrapped_reader = io::Cursor::new(base64data);
-            let mut decoder = DecoderReader::new(&mut wrapped_reader, STANDARD);
+            let mut decoder = DecoderReader::new(&mut wrapped_reader, &STANDARD);
 
             // handle errors as you normally would
             let mut buffer = vec![0u8; n];
@@ -92,14 +97,14 @@ fn handles_short_read_from_delegate() {
         b64.clear();
         decoded.clear();
 
-        let size = rng.gen_range(0, 10 * BUF_SIZE);
+        let size = rng.gen_range(0..(10 * BUF_SIZE));
         bytes.extend(iter::repeat(0).take(size));
         bytes.truncate(size);
         rng.fill_bytes(&mut bytes[..size]);
         assert_eq!(size, bytes.len());
 
-        let config = random_config(&mut rng);
-        encode_config_buf(&bytes[..], config, &mut b64);
+        let engine = random_engine(&mut rng);
+        engine.encode_string(&bytes[..], &mut b64);
 
         let mut wrapped_reader = io::Cursor::new(b64.as_bytes());
         let mut short_reader = RandomShortRead {
@@ -107,7 +112,7 @@ fn handles_short_read_from_delegate() {
             rng: &mut rng,
         };
 
-        let mut decoder = DecoderReader::new(&mut short_reader, config);
+        let mut decoder = DecoderReader::new(&mut short_reader, &engine);
 
         let decoded_len = decoder.read_to_end(&mut decoded).unwrap();
         assert_eq!(size, decoded_len);
@@ -127,7 +132,7 @@ fn read_in_short_increments() {
         b64.clear();
         decoded.clear();
 
-        let size = rng.gen_range(0, 10 * BUF_SIZE);
+        let size = rng.gen_range(0..(10 * BUF_SIZE));
         bytes.extend(iter::repeat(0).take(size));
         // leave room to play around with larger buffers
         decoded.extend(iter::repeat(0).take(size * 3));
@@ -135,12 +140,12 @@ fn read_in_short_increments() {
         rng.fill_bytes(&mut bytes[..]);
         assert_eq!(size, bytes.len());
 
-        let config = random_config(&mut rng);
+        let engine = random_engine(&mut rng);
 
-        encode_config_buf(&bytes[..], config, &mut b64);
+        engine.encode_string(&bytes[..], &mut b64);
 
         let mut wrapped_reader = io::Cursor::new(&b64[..]);
-        let mut decoder = DecoderReader::new(&mut wrapped_reader, config);
+        let mut decoder = DecoderReader::new(&mut wrapped_reader, &engine);
 
         consume_with_short_reads_and_validate(&mut rng, &bytes[..], &mut decoded, &mut decoder);
     }
@@ -158,7 +163,7 @@ fn read_in_short_increments_with_short_delegate_reads() {
         b64.clear();
         decoded.clear();
 
-        let size = rng.gen_range(0, 10 * BUF_SIZE);
+        let size = rng.gen_range(0..(10 * BUF_SIZE));
         bytes.extend(iter::repeat(0).take(size));
         // leave room to play around with larger buffers
         decoded.extend(iter::repeat(0).take(size * 3));
@@ -166,18 +171,23 @@ fn read_in_short_increments_with_short_delegate_reads() {
         rng.fill_bytes(&mut bytes[..]);
         assert_eq!(size, bytes.len());
 
-        let config = random_config(&mut rng);
+        let engine = random_engine(&mut rng);
 
-        encode_config_buf(&bytes[..], config, &mut b64);
+        engine.encode_string(&bytes[..], &mut b64);
 
         let mut base_reader = io::Cursor::new(&b64[..]);
-        let mut decoder = DecoderReader::new(&mut base_reader, config);
+        let mut decoder = DecoderReader::new(&mut base_reader, &engine);
         let mut short_reader = RandomShortRead {
             delegate: &mut decoder,
             rng: &mut rand::thread_rng(),
         };
 
-        consume_with_short_reads_and_validate(&mut rng, &bytes[..], &mut decoded, &mut short_reader)
+        consume_with_short_reads_and_validate(
+            &mut rng,
+            &bytes[..],
+            &mut decoded,
+            &mut short_reader,
+        );
     }
 }
 
@@ -195,32 +205,32 @@ fn reports_invalid_last_symbol_correctly() {
         b64.clear();
         b64_bytes.clear();
 
-        let size = rng.gen_range(1, 10 * BUF_SIZE);
+        let size = rng.gen_range(1..(10 * BUF_SIZE));
         bytes.extend(iter::repeat(0).take(size));
         decoded.extend(iter::repeat(0).take(size));
         rng.fill_bytes(&mut bytes[..]);
         assert_eq!(size, bytes.len());
 
-        let mut config = random_config(&mut rng);
+        let config = random_config(&mut rng);
+        let alphabet = random_alphabet(&mut rng);
         // changing padding will cause invalid padding errors when we twiddle the last byte
-        config.pad = false;
-
-        encode_config_buf(&bytes[..], config, &mut b64);
+        let engine = GeneralPurpose::new(alphabet, config.with_encode_padding(false));
+        engine.encode_string(&bytes[..], &mut b64);
         b64_bytes.extend(b64.bytes());
         assert_eq!(b64_bytes.len(), b64.len());
 
         // change the last character to every possible symbol. Should behave the same as bulk
         // decoding whether invalid or valid.
-        for &s1 in config.char_set.encode_table().iter() {
+        for &s1 in alphabet.symbols.iter() {
             decoded.clear();
             bulk_decoded.clear();
 
             // replace the last
             *b64_bytes.last_mut().unwrap() = s1;
-            let bulk_res = decode_config_buf(&b64_bytes[..], config, &mut bulk_decoded);
+            let bulk_res = engine.decode_vec(&b64_bytes[..], &mut bulk_decoded);
 
             let mut wrapped_reader = io::Cursor::new(&b64_bytes[..]);
-            let mut decoder = DecoderReader::new(&mut wrapped_reader, config);
+            let mut decoder = DecoderReader::new(&mut wrapped_reader, &engine);
 
             let stream_res = decoder.read_to_end(&mut decoded).map(|_| ()).map_err(|e| {
                 e.into_inner()
@@ -244,20 +254,21 @@ fn reports_invalid_byte_correctly() {
         b64.clear();
         decoded.clear();
 
-        let size = rng.gen_range(1, 10 * BUF_SIZE);
+        let size = rng.gen_range(1..(10 * BUF_SIZE));
         bytes.extend(iter::repeat(0).take(size));
         rng.fill_bytes(&mut bytes[..size]);
         assert_eq!(size, bytes.len());
 
-        let config = random_config(&mut rng);
-        encode_config_buf(&bytes[..], config, &mut b64);
+        let engine = random_engine(&mut rng);
+
+        engine.encode_string(&bytes[..], &mut b64);
         // replace one byte, somewhere, with '*', which is invalid
-        let bad_byte_pos = rng.gen_range(0, &b64.len());
+        let bad_byte_pos = rng.gen_range(0..b64.len());
         let mut b64_bytes = b64.bytes().collect::<Vec<u8>>();
         b64_bytes[bad_byte_pos] = b'*';
 
         let mut wrapped_reader = io::Cursor::new(b64_bytes.clone());
-        let mut decoder = DecoderReader::new(&mut wrapped_reader, config);
+        let mut decoder = DecoderReader::new(&mut wrapped_reader, &engine);
 
         // some gymnastics to avoid double-moving the io::Error, which is not Copy
         let read_decode_err = decoder
@@ -273,7 +284,7 @@ fn reports_invalid_byte_correctly() {
             .and_then(|o| o);
 
         let mut bulk_buf = Vec::new();
-        let bulk_decode_err = decode_config_buf(&b64_bytes[..], config, &mut bulk_buf).err();
+        let bulk_decode_err = engine.decode_vec(&b64_bytes[..], &mut bulk_buf).err();
 
         // it's tricky to predict where the invalid data's offset will be since if it's in the last
         // chunk it will be reported at the first padding location because it's treated as invalid
@@ -285,12 +296,12 @@ fn reports_invalid_byte_correctly() {
     }
 }
 
-fn consume_with_short_reads_and_validate<R: Read>(
+fn consume_with_short_reads_and_validate<R: io::Read>(
     rng: &mut rand::rngs::ThreadRng,
     expected_bytes: &[u8],
-    decoded: &mut Vec<u8>,
+    decoded: &mut [u8],
     short_reader: &mut R,
-) -> () {
+) {
     let mut total_read = 0_usize;
     loop {
         assert!(
@@ -302,13 +313,13 @@ fn consume_with_short_reads_and_validate<R: Read>(
         if total_read == expected_bytes.len() {
             assert_eq!(expected_bytes, &decoded[..total_read]);
             // should be done
-            assert_eq!(0, short_reader.read(&mut decoded[..]).unwrap());
+            assert_eq!(0, short_reader.read(&mut *decoded).unwrap());
             // didn't write anything
             assert_eq!(expected_bytes, &decoded[..total_read]);
 
             break;
         }
-        let decode_len = rng.gen_range(1, cmp::max(2, expected_bytes.len() * 2));
+        let decode_len = rng.gen_range(1..cmp::max(2, expected_bytes.len() * 2));
 
         let read = short_reader
             .read(&mut decoded[total_read..total_read + decode_len])
@@ -328,7 +339,7 @@ struct RandomShortRead<'a, 'b, R: io::Read, N: rand::Rng> {
 impl<'a, 'b, R: io::Read, N: rand::Rng> io::Read for RandomShortRead<'a, 'b, R, N> {
     fn read(&mut self, buf: &mut [u8]) -> Result<usize, io::Error> {
         // avoid 0 since it means EOF for non-empty buffers
-        let effective_len = cmp::min(self.rng.gen_range(1, 20), buf.len());
+        let effective_len = cmp::min(self.rng.gen_range(1..20), buf.len());
 
         self.delegate.read(&mut buf[..effective_len])
     }

+ 58 - 22
zeroidc/vendor/base64/src/tests.rs

@@ -1,11 +1,19 @@
-use crate::{decode_config, encode::encoded_size, encode_config_buf, CharacterSet, Config};
-
 use std::str;
 
 use rand::{
-    distributions::{Distribution, Uniform},
+    distributions,
+    distributions::{Distribution as _, Uniform},
     seq::SliceRandom,
-    FromEntropy, Rng,
+    Rng, SeedableRng,
+};
+
+use crate::{
+    alphabet,
+    encode::encoded_len,
+    engine::{
+        general_purpose::{GeneralPurpose, GeneralPurposeConfig},
+        Config, DecodePaddingMode, Engine,
+    },
 };
 
 #[test]
@@ -19,10 +27,10 @@ fn roundtrip_random_config_long() {
     roundtrip_random_config(Uniform::new(0, 1000), 10_000);
 }
 
-pub fn assert_encode_sanity(encoded: &str, config: Config, input_len: usize) {
+pub fn assert_encode_sanity(encoded: &str, padded: bool, input_len: usize) {
     let input_rem = input_len % 3;
     let expected_padding_len = if input_rem > 0 {
-        if config.pad {
+        if padded {
             3 - input_rem
         } else {
             0
@@ -31,7 +39,7 @@ pub fn assert_encode_sanity(encoded: &str, config: Config, input_len: usize) {
         0
     };
 
-    let expected_encoded_len = encoded_size(input_len, config).unwrap();
+    let expected_encoded_len = encoded_len(input_len, padded).unwrap();
 
     assert_eq!(expected_encoded_len, encoded.len());
 
@@ -53,29 +61,57 @@ fn roundtrip_random_config(input_len_range: Uniform<usize>, iterations: u32) {
 
         let input_len = input_len_range.sample(&mut rng);
 
-        let config = random_config(&mut rng);
+        let engine = random_engine(&mut rng);
 
         for _ in 0..input_len {
             input_buf.push(rng.gen());
         }
 
-        encode_config_buf(&input_buf, config, &mut encoded_buf);
+        engine.encode_string(&input_buf, &mut encoded_buf);
+
+        assert_encode_sanity(&encoded_buf, engine.config().encode_padding(), input_len);
+
+        assert_eq!(input_buf, engine.decode(&encoded_buf).unwrap());
+    }
+}
 
-        assert_encode_sanity(&encoded_buf, config, input_len);
+pub fn random_config<R: Rng>(rng: &mut R) -> GeneralPurposeConfig {
+    let mode = rng.gen();
+    GeneralPurposeConfig::new()
+        .with_encode_padding(match mode {
+            DecodePaddingMode::Indifferent => rng.gen(),
+            DecodePaddingMode::RequireCanonical => true,
+            DecodePaddingMode::RequireNone => false,
+        })
+        .with_decode_padding_mode(mode)
+        .with_decode_allow_trailing_bits(rng.gen())
+}
 
-        assert_eq!(input_buf, decode_config(&encoded_buf, config).unwrap());
+impl distributions::Distribution<DecodePaddingMode> for distributions::Standard {
+    fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> DecodePaddingMode {
+        match rng.gen_range(0..=2) {
+            0 => DecodePaddingMode::Indifferent,
+            1 => DecodePaddingMode::RequireCanonical,
+            _ => DecodePaddingMode::RequireNone,
+        }
     }
 }
 
-pub fn random_config<R: Rng>(rng: &mut R) -> Config {
-    const CHARSETS: &[CharacterSet] = &[
-        CharacterSet::UrlSafe,
-        CharacterSet::Standard,
-        CharacterSet::Crypt,
-        CharacterSet::ImapMutf7,
-        CharacterSet::BinHex,
-    ];
-    let charset = *CHARSETS.choose(rng).unwrap();
-
-    Config::new(charset, rng.gen())
+pub fn random_alphabet<R: Rng>(rng: &mut R) -> &'static alphabet::Alphabet {
+    ALPHABETS.choose(rng).unwrap()
 }
+
+pub fn random_engine<R: Rng>(rng: &mut R) -> GeneralPurpose {
+    let alphabet = random_alphabet(rng);
+    let config = random_config(rng);
+    GeneralPurpose::new(alphabet, config)
+}
+
+const ALPHABETS: &[alphabet::Alphabet] = &[
+    alphabet::URL_SAFE,
+    alphabet::STANDARD,
+    alphabet::CRYPT,
+    alphabet::BCRYPT,
+    alphabet::IMAP_MUTF7,
+    alphabet::BIN_HEX,
+];

+ 53 - 27
zeroidc/vendor/base64/src/write/encoder.rs

@@ -1,8 +1,7 @@
-use crate::encode::encode_to_slice;
-use crate::{encode_config_slice, Config};
+use crate::engine::Engine;
 use std::{
-    cmp, fmt,
-    io::{ErrorKind, Result, Write},
+    cmp, fmt, io,
+    io::{ErrorKind, Result},
 };
 
 pub(crate) const BUF_SIZE: usize = 1024;
@@ -23,9 +22,10 @@ const MIN_ENCODE_CHUNK_SIZE: usize = 3;
 ///
 /// ```
 /// use std::io::Write;
+/// use base64::engine::general_purpose;
 ///
 /// // use a vec as the simplest possible `Write` -- in real code this is probably a file, etc.
-/// let mut enc = base64::write::EncoderWriter::new(Vec::new(), base64::STANDARD);
+/// let mut enc = base64::write::EncoderWriter::new(Vec::new(), &general_purpose::STANDARD);
 ///
 /// // handle errors as you normally would
 /// enc.write_all(b"asdf").unwrap();
@@ -53,8 +53,15 @@ const MIN_ENCODE_CHUNK_SIZE: usize = 3;
 ///
 /// It has some minor performance loss compared to encoding slices (a couple percent).
 /// It does not do any heap allocation.
-pub struct EncoderWriter<W: Write> {
-    config: Config,
+///
+/// # Limitations
+///
+/// Owing to the specification of the `write` and `flush` methods on the `Write` trait and their
+/// implications for a buffering implementation, these methods may not behave as expected. In
+/// particular, calling `write_all` on this interface may fail with `io::ErrorKind::WriteZero`.
+/// See the documentation of the `Write` trait implementation for further details.
+pub struct EncoderWriter<'e, E: Engine, W: io::Write> {
+    engine: &'e E,
     /// Where encoded data is written to. It's an Option as it's None immediately before Drop is
     /// called so that finish() can return the underlying writer. None implies that finish() has
     /// been called successfully.
@@ -73,7 +80,7 @@ pub struct EncoderWriter<W: Write> {
     panicked: bool,
 }
 
-impl<W: Write> fmt::Debug for EncoderWriter<W> {
+impl<'e, E: Engine, W: io::Write> fmt::Debug for EncoderWriter<'e, E, W> {
     fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
         write!(
             f,
@@ -86,12 +93,12 @@ impl<W: Write> fmt::Debug for EncoderWriter<W> {
     }
 }
 
-impl<W: Write> EncoderWriter<W> {
-    /// Create a new encoder that will write to the provided delegate writer `w`.
-    pub fn new(w: W, config: Config) -> EncoderWriter<W> {
+impl<'e, E: Engine, W: io::Write> EncoderWriter<'e, E, W> {
+    /// Create a new encoder that will write to the provided delegate writer.
+    pub fn new(delegate: W, engine: &'e E) -> EncoderWriter<'e, E, W> {
         EncoderWriter {
-            config,
-            delegate: Some(w),
+            engine,
+            delegate: Some(delegate),
             extra_input: [0u8; MIN_ENCODE_CHUNK_SIZE],
             extra_input_occupied_len: 0,
             output: [0u8; BUF_SIZE],
@@ -120,7 +127,7 @@ impl<W: Write> EncoderWriter<W> {
         // If we could consume self in finish(), we wouldn't have to worry about this case, but
         // finish() is retryable in the face of I/O errors, so we can't consume here.
         if self.delegate.is_none() {
-            panic!("Encoder has already had finish() called")
+            panic!("Encoder has already had finish() called");
         };
 
         self.write_final_leftovers()?;
@@ -141,11 +148,13 @@ impl<W: Write> EncoderWriter<W> {
         self.write_all_encoded_output()?;
 
         if self.extra_input_occupied_len > 0 {
-            let encoded_len = encode_config_slice(
-                &self.extra_input[..self.extra_input_occupied_len],
-                self.config,
-                &mut self.output[..],
-            );
+            let encoded_len = self
+                .engine
+                .encode_slice(
+                    &self.extra_input[..self.extra_input_occupied_len],
+                    &mut self.output[..],
+                )
+                .expect("buffer is large enough");
 
             self.output_occupied_len = encoded_len;
 
@@ -182,7 +191,7 @@ impl<W: Write> EncoderWriter<W> {
                 self.output_occupied_len = current_output_len.checked_sub(consumed).unwrap();
                 // If we're blocking on I/O, the minor inefficiency of copying bytes to the
                 // start of the buffer is the least of our concerns...
-                // Rotate moves more than we need to, but copy_within isn't stabilized yet.
+                // TODO Rotate moves more than we need to; copy_within now stable.
                 self.output.rotate_left(consumed);
             } else {
                 self.output_occupied_len = 0;
@@ -215,15 +224,34 @@ impl<W: Write> EncoderWriter<W> {
         debug_assert_eq!(0, self.output_occupied_len);
         Ok(())
     }
+
+    /// Unwraps this `EncoderWriter`, returning the base writer it writes base64 encoded output
+    /// to.
+    ///
+    /// Normally this method should not be needed, since `finish()` returns the inner writer if
+    /// it completes successfully. That will also ensure all data has been flushed, which the
+    /// `into_inner()` function does *not* do.
+    ///
+    /// Calling this method after `finish()` has completed successfully will panic, since the
+    /// writer has already been returned.
+    ///
+    /// This method may be useful if the writer implements additional APIs beyond the `Write`
+    /// trait. Note that the inner writer might be in an error state or have an incomplete
+    /// base64 string written to it.
+    pub fn into_inner(mut self) -> W {
+        self.delegate
+            .take()
+            .expect("Encoder has already had finish() called")
+    }
 }
 
-impl<W: Write> Write for EncoderWriter<W> {
+impl<'e, E: Engine, W: io::Write> io::Write for EncoderWriter<'e, E, W> {
     /// Encode input and then write to the delegate writer.
     ///
     /// Under non-error circumstances, this returns `Ok` with the value being the number of bytes
     /// of `input` consumed. The value may be `0`, which interacts poorly with `write_all`, which
     /// interprets `Ok(0)` as an error, despite it being allowed by the contract of `write`. See
-    /// https://github.com/rust-lang/rust/issues/56889 for more on that.
+    /// <https://github.com/rust-lang/rust/issues/56889> for more on that.
     ///
     /// If the previous call to `write` provided more (encoded) data than the delegate writer could
     /// accept in a single call to its `write`, the remaining data is buffered. As long as buffered
@@ -286,10 +314,9 @@ impl<W: Write> Write for EncoderWriter<W> {
                 self.extra_input[self.extra_input_occupied_len..MIN_ENCODE_CHUNK_SIZE]
                     .copy_from_slice(&input[0..extra_input_read_len]);
 
-                let len = encode_to_slice(
+                let len = self.engine.internal_encode(
                     &self.extra_input[0..MIN_ENCODE_CHUNK_SIZE],
                     &mut self.output[..],
-                    self.config.char_set.encode_table(),
                 );
                 debug_assert_eq!(4, len);
 
@@ -335,10 +362,9 @@ impl<W: Write> Write for EncoderWriter<W> {
         debug_assert_eq!(0, max_input_len % MIN_ENCODE_CHUNK_SIZE);
         debug_assert_eq!(0, input_chunks_to_encode_len % MIN_ENCODE_CHUNK_SIZE);
 
-        encoded_size += encode_to_slice(
+        encoded_size += self.engine.internal_encode(
             &input[..(input_chunks_to_encode_len)],
             &mut self.output[encoded_size..],
-            self.config.char_set.encode_table(),
         );
 
         // not updating `self.output_occupied_len` here because if the below write fails, it should
@@ -371,7 +397,7 @@ impl<W: Write> Write for EncoderWriter<W> {
     }
 }
 
-impl<W: Write> Drop for EncoderWriter<W> {
+impl<'e, E: Engine, W: io::Write> Drop for EncoderWriter<'e, E, W> {
     fn drop(&mut self) {
         if !self.panicked {
             // like `BufWriter`, ignore errors during drop

+ 29 - 27
zeroidc/vendor/base64/src/write/encoder_string_writer.rs

@@ -1,10 +1,10 @@
 use super::encoder::EncoderWriter;
-use crate::Config;
+use crate::engine::Engine;
 use std::io;
-use std::io::Write;
 
 /// A `Write` implementation that base64-encodes data using the provided config and accumulates the
-/// resulting base64 in memory, which is then exposed as a String via `into_inner()`.
+/// resulting base64 utf8 `&str` in a [StrConsumer] implementation (typically `String`), which is
+/// then exposed via `into_inner()`.
 ///
 /// # Examples
 ///
@@ -12,8 +12,9 @@ use std::io::Write;
 ///
 /// ```
 /// use std::io::Write;
+/// use base64::engine::general_purpose;
 ///
-/// let mut enc = base64::write::EncoderStringWriter::new(base64::STANDARD);
+/// let mut enc = base64::write::EncoderStringWriter::new(&general_purpose::STANDARD);
 ///
 /// enc.write_all(b"asdf").unwrap();
 ///
@@ -23,14 +24,17 @@ use std::io::Write;
 /// assert_eq!("YXNkZg==", &b64_string);
 /// ```
 ///
-/// Or, append to an existing String:
+/// Or, append to an existing `String`, which implements `StrConsumer`:
 ///
 /// ```
 /// use std::io::Write;
+/// use base64::engine::general_purpose;
 ///
 /// let mut buf = String::from("base64: ");
 ///
-/// let mut enc = base64::write::EncoderStringWriter::from(&mut buf, base64::STANDARD);
+/// let mut enc = base64::write::EncoderStringWriter::from_consumer(
+///     &mut buf,
+///     &general_purpose::STANDARD);
 ///
 /// enc.write_all(b"asdf").unwrap();
 ///
@@ -49,40 +53,38 @@ use std::io::Write;
 ///
 /// Because it has to validate that the base64 is UTF-8, it is about 80% as fast as writing plain
 /// bytes to a `io::Write`.
-pub struct EncoderStringWriter<S: StrConsumer> {
-    encoder: EncoderWriter<Utf8SingleCodeUnitWriter<S>>,
+pub struct EncoderStringWriter<'e, E: Engine, S: StrConsumer> {
+    encoder: EncoderWriter<'e, E, Utf8SingleCodeUnitWriter<S>>,
 }
 
-impl<S: StrConsumer> EncoderStringWriter<S> {
+impl<'e, E: Engine, S: StrConsumer> EncoderStringWriter<'e, E, S> {
     /// Create a EncoderStringWriter that will append to the provided `StrConsumer`.
-    pub fn from(str_consumer: S, config: Config) -> Self {
+    pub fn from_consumer(str_consumer: S, engine: &'e E) -> Self {
         EncoderStringWriter {
-            encoder: EncoderWriter::new(Utf8SingleCodeUnitWriter { str_consumer }, config),
+            encoder: EncoderWriter::new(Utf8SingleCodeUnitWriter { str_consumer }, engine),
         }
     }
 
     /// Encode all remaining buffered data, including any trailing incomplete input triples and
     /// associated padding.
     ///
-    /// Once this succeeds, no further writes or calls to this method are allowed.
-    ///
     /// Returns the base64-encoded form of the accumulated written data.
     pub fn into_inner(mut self) -> S {
         self.encoder
             .finish()
-            .expect("Writing to a Vec<u8> should never fail")
+            .expect("Writing to a consumer should never fail")
             .str_consumer
     }
 }
 
-impl EncoderStringWriter<String> {
-    /// Create a EncoderStringWriter that will encode into a new String with the provided config.
-    pub fn new(config: Config) -> Self {
-        EncoderStringWriter::from(String::new(), config)
+impl<'e, E: Engine> EncoderStringWriter<'e, E, String> {
+    /// Create a EncoderStringWriter that will encode into a new `String` with the provided config.
+    pub fn new(engine: &'e E) -> Self {
+        EncoderStringWriter::from_consumer(String::new(), engine)
     }
 }
 
-impl<S: StrConsumer> Write for EncoderStringWriter<S> {
+impl<'e, E: Engine, S: StrConsumer> io::Write for EncoderStringWriter<'e, E, S> {
     fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
         self.encoder.write(buf)
     }
@@ -101,14 +103,14 @@ pub trait StrConsumer {
 /// As for io::Write, `StrConsumer` is implemented automatically for `&mut S`.
 impl<S: StrConsumer + ?Sized> StrConsumer for &mut S {
     fn consume(&mut self, buf: &str) {
-        (**self).consume(buf)
+        (**self).consume(buf);
     }
 }
 
 /// Pushes the str onto the end of the String
 impl StrConsumer for String {
     fn consume(&mut self, buf: &str) {
-        self.push_str(buf)
+        self.push_str(buf);
     }
 }
 
@@ -138,9 +140,9 @@ impl<S: StrConsumer> io::Write for Utf8SingleCodeUnitWriter<S> {
 
 #[cfg(test)]
 mod tests {
-    use crate::encode_config_buf;
-    use crate::tests::random_config;
-    use crate::write::encoder_string_writer::EncoderStringWriter;
+    use crate::{
+        engine::Engine, tests::random_engine, write::encoder_string_writer::EncoderStringWriter,
+    };
     use rand::Rng;
     use std::io::Write;
 
@@ -160,10 +162,10 @@ mod tests {
                 orig_data.push(rng.gen());
             }
 
-            let config = random_config(&mut rng);
-            encode_config_buf(&orig_data, config, &mut normal_encoded);
+            let engine = random_engine(&mut rng);
+            engine.encode_string(&orig_data, &mut normal_encoded);
 
-            let mut stream_encoder = EncoderStringWriter::new(config);
+            let mut stream_encoder = EncoderStringWriter::new(&engine);
             // Write the first i bytes, then the rest
             stream_encoder.write_all(&orig_data[0..i]).unwrap();
             stream_encoder.write_all(&orig_data[i..]).unwrap();

+ 72 - 86
zeroidc/vendor/base64/src/write/encoder_tests.rs

@@ -1,29 +1,39 @@
-use super::EncoderWriter;
-use crate::tests::random_config;
-use crate::{encode_config, encode_config_buf, STANDARD_NO_PAD, URL_SAFE};
-
 use std::io::{Cursor, Write};
 use std::{cmp, io, str};
 
 use rand::Rng;
 
+use crate::{
+    alphabet::{STANDARD, URL_SAFE},
+    engine::{
+        general_purpose::{GeneralPurpose, NO_PAD, PAD},
+        Engine,
+    },
+    tests::random_engine,
+};
+
+use super::EncoderWriter;
+
+const URL_SAFE_ENGINE: GeneralPurpose = GeneralPurpose::new(&URL_SAFE, PAD);
+const NO_PAD_ENGINE: GeneralPurpose = GeneralPurpose::new(&STANDARD, NO_PAD);
+
 #[test]
 fn encode_three_bytes() {
     let mut c = Cursor::new(Vec::new());
     {
-        let mut enc = EncoderWriter::new(&mut c, URL_SAFE);
+        let mut enc = EncoderWriter::new(&mut c, &URL_SAFE_ENGINE);
 
         let sz = enc.write(b"abc").unwrap();
         assert_eq!(sz, 3);
     }
-    assert_eq!(&c.get_ref()[..], encode_config("abc", URL_SAFE).as_bytes());
+    assert_eq!(&c.get_ref()[..], URL_SAFE_ENGINE.encode("abc").as_bytes());
 }
 
 #[test]
 fn encode_nine_bytes_two_writes() {
     let mut c = Cursor::new(Vec::new());
     {
-        let mut enc = EncoderWriter::new(&mut c, URL_SAFE);
+        let mut enc = EncoderWriter::new(&mut c, &URL_SAFE_ENGINE);
 
         let sz = enc.write(b"abcdef").unwrap();
         assert_eq!(sz, 6);
@@ -32,7 +42,7 @@ fn encode_nine_bytes_two_writes() {
     }
     assert_eq!(
         &c.get_ref()[..],
-        encode_config("abcdefghi", URL_SAFE).as_bytes()
+        URL_SAFE_ENGINE.encode("abcdefghi").as_bytes()
     );
 }
 
@@ -40,21 +50,21 @@ fn encode_nine_bytes_two_writes() {
 fn encode_one_then_two_bytes() {
     let mut c = Cursor::new(Vec::new());
     {
-        let mut enc = EncoderWriter::new(&mut c, URL_SAFE);
+        let mut enc = EncoderWriter::new(&mut c, &URL_SAFE_ENGINE);
 
         let sz = enc.write(b"a").unwrap();
         assert_eq!(sz, 1);
         let sz = enc.write(b"bc").unwrap();
         assert_eq!(sz, 2);
     }
-    assert_eq!(&c.get_ref()[..], encode_config("abc", URL_SAFE).as_bytes());
+    assert_eq!(&c.get_ref()[..], URL_SAFE_ENGINE.encode("abc").as_bytes());
 }
 
 #[test]
 fn encode_one_then_five_bytes() {
     let mut c = Cursor::new(Vec::new());
     {
-        let mut enc = EncoderWriter::new(&mut c, URL_SAFE);
+        let mut enc = EncoderWriter::new(&mut c, &URL_SAFE_ENGINE);
 
         let sz = enc.write(b"a").unwrap();
         assert_eq!(sz, 1);
@@ -63,7 +73,7 @@ fn encode_one_then_five_bytes() {
     }
     assert_eq!(
         &c.get_ref()[..],
-        encode_config("abcdef", URL_SAFE).as_bytes()
+        URL_SAFE_ENGINE.encode("abcdef").as_bytes()
     );
 }
 
@@ -71,7 +81,7 @@ fn encode_one_then_five_bytes() {
 fn encode_1_2_3_bytes() {
     let mut c = Cursor::new(Vec::new());
     {
-        let mut enc = EncoderWriter::new(&mut c, URL_SAFE);
+        let mut enc = EncoderWriter::new(&mut c, &URL_SAFE_ENGINE);
 
         let sz = enc.write(b"a").unwrap();
         assert_eq!(sz, 1);
@@ -82,7 +92,7 @@ fn encode_1_2_3_bytes() {
     }
     assert_eq!(
         &c.get_ref()[..],
-        encode_config("abcdef", URL_SAFE).as_bytes()
+        URL_SAFE_ENGINE.encode("abcdef").as_bytes()
     );
 }
 
@@ -90,20 +100,20 @@ fn encode_1_2_3_bytes() {
 fn encode_with_padding() {
     let mut c = Cursor::new(Vec::new());
     {
-        let mut enc = EncoderWriter::new(&mut c, URL_SAFE);
+        let mut enc = EncoderWriter::new(&mut c, &URL_SAFE_ENGINE);
 
         enc.write_all(b"abcd").unwrap();
 
         enc.flush().unwrap();
     }
-    assert_eq!(&c.get_ref()[..], encode_config("abcd", URL_SAFE).as_bytes());
+    assert_eq!(&c.get_ref()[..], URL_SAFE_ENGINE.encode("abcd").as_bytes());
 }
 
 #[test]
 fn encode_with_padding_multiple_writes() {
     let mut c = Cursor::new(Vec::new());
     {
-        let mut enc = EncoderWriter::new(&mut c, URL_SAFE);
+        let mut enc = EncoderWriter::new(&mut c, &URL_SAFE_ENGINE);
 
         assert_eq!(1, enc.write(b"a").unwrap());
         assert_eq!(2, enc.write(b"bc").unwrap());
@@ -114,7 +124,7 @@ fn encode_with_padding_multiple_writes() {
     }
     assert_eq!(
         &c.get_ref()[..],
-        encode_config("abcdefg", URL_SAFE).as_bytes()
+        URL_SAFE_ENGINE.encode("abcdefg").as_bytes()
     );
 }
 
@@ -122,7 +132,7 @@ fn encode_with_padding_multiple_writes() {
 fn finish_writes_extra_byte() {
     let mut c = Cursor::new(Vec::new());
     {
-        let mut enc = EncoderWriter::new(&mut c, URL_SAFE);
+        let mut enc = EncoderWriter::new(&mut c, &URL_SAFE_ENGINE);
 
         assert_eq!(6, enc.write(b"abcdef").unwrap());
 
@@ -134,7 +144,7 @@ fn finish_writes_extra_byte() {
     }
     assert_eq!(
         &c.get_ref()[..],
-        encode_config("abcdefg", URL_SAFE).as_bytes()
+        URL_SAFE_ENGINE.encode("abcdefg").as_bytes()
     );
 }
 
@@ -142,17 +152,14 @@ fn finish_writes_extra_byte() {
 fn write_partial_chunk_encodes_partial_chunk() {
     let mut c = Cursor::new(Vec::new());
     {
-        let mut enc = EncoderWriter::new(&mut c, STANDARD_NO_PAD);
+        let mut enc = EncoderWriter::new(&mut c, &NO_PAD_ENGINE);
 
         // nothing encoded yet
         assert_eq!(2, enc.write(b"ab").unwrap());
         // encoded here
         let _ = enc.finish().unwrap();
     }
-    assert_eq!(
-        &c.get_ref()[..],
-        encode_config("ab", STANDARD_NO_PAD).as_bytes()
-    );
+    assert_eq!(&c.get_ref()[..], NO_PAD_ENGINE.encode("ab").as_bytes());
     assert_eq!(3, c.get_ref().len());
 }
 
@@ -160,15 +167,12 @@ fn write_partial_chunk_encodes_partial_chunk() {
 fn write_1_chunk_encodes_complete_chunk() {
     let mut c = Cursor::new(Vec::new());
     {
-        let mut enc = EncoderWriter::new(&mut c, STANDARD_NO_PAD);
+        let mut enc = EncoderWriter::new(&mut c, &NO_PAD_ENGINE);
 
         assert_eq!(3, enc.write(b"abc").unwrap());
         let _ = enc.finish().unwrap();
     }
-    assert_eq!(
-        &c.get_ref()[..],
-        encode_config("abc", STANDARD_NO_PAD).as_bytes()
-    );
+    assert_eq!(&c.get_ref()[..], NO_PAD_ENGINE.encode("abc").as_bytes());
     assert_eq!(4, c.get_ref().len());
 }
 
@@ -176,16 +180,13 @@ fn write_1_chunk_encodes_complete_chunk() {
 fn write_1_chunk_and_partial_encodes_only_complete_chunk() {
     let mut c = Cursor::new(Vec::new());
     {
-        let mut enc = EncoderWriter::new(&mut c, STANDARD_NO_PAD);
+        let mut enc = EncoderWriter::new(&mut c, &NO_PAD_ENGINE);
 
-        // "d" not written
+        // "d" not consumed since it's not a full chunk
         assert_eq!(3, enc.write(b"abcd").unwrap());
         let _ = enc.finish().unwrap();
     }
-    assert_eq!(
-        &c.get_ref()[..],
-        encode_config("abc", STANDARD_NO_PAD).as_bytes()
-    );
+    assert_eq!(&c.get_ref()[..], NO_PAD_ENGINE.encode("abc").as_bytes());
     assert_eq!(4, c.get_ref().len());
 }
 
@@ -193,16 +194,13 @@ fn write_1_chunk_and_partial_encodes_only_complete_chunk() {
 fn write_2_partials_to_exactly_complete_chunk_encodes_complete_chunk() {
     let mut c = Cursor::new(Vec::new());
     {
-        let mut enc = EncoderWriter::new(&mut c, STANDARD_NO_PAD);
+        let mut enc = EncoderWriter::new(&mut c, &NO_PAD_ENGINE);
 
         assert_eq!(1, enc.write(b"a").unwrap());
         assert_eq!(2, enc.write(b"bc").unwrap());
         let _ = enc.finish().unwrap();
     }
-    assert_eq!(
-        &c.get_ref()[..],
-        encode_config("abc", STANDARD_NO_PAD).as_bytes()
-    );
+    assert_eq!(&c.get_ref()[..], NO_PAD_ENGINE.encode("abc").as_bytes());
     assert_eq!(4, c.get_ref().len());
 }
 
@@ -211,17 +209,14 @@ fn write_partial_then_enough_to_complete_chunk_but_not_complete_another_chunk_en
 ) {
     let mut c = Cursor::new(Vec::new());
     {
-        let mut enc = EncoderWriter::new(&mut c, STANDARD_NO_PAD);
+        let mut enc = EncoderWriter::new(&mut c, &NO_PAD_ENGINE);
 
         assert_eq!(1, enc.write(b"a").unwrap());
         // doesn't consume "d"
         assert_eq!(2, enc.write(b"bcd").unwrap());
         let _ = enc.finish().unwrap();
     }
-    assert_eq!(
-        &c.get_ref()[..],
-        encode_config("abc", STANDARD_NO_PAD).as_bytes()
-    );
+    assert_eq!(&c.get_ref()[..], NO_PAD_ENGINE.encode("abc").as_bytes());
     assert_eq!(4, c.get_ref().len());
 }
 
@@ -229,17 +224,14 @@ fn write_partial_then_enough_to_complete_chunk_but_not_complete_another_chunk_en
 fn write_partial_then_enough_to_complete_chunk_and_another_chunk_encodes_complete_chunks() {
     let mut c = Cursor::new(Vec::new());
     {
-        let mut enc = EncoderWriter::new(&mut c, STANDARD_NO_PAD);
+        let mut enc = EncoderWriter::new(&mut c, &NO_PAD_ENGINE);
 
         assert_eq!(1, enc.write(b"a").unwrap());
         // completes partial chunk, and another chunk
         assert_eq!(5, enc.write(b"bcdef").unwrap());
         let _ = enc.finish().unwrap();
     }
-    assert_eq!(
-        &c.get_ref()[..],
-        encode_config("abcdef", STANDARD_NO_PAD).as_bytes()
-    );
+    assert_eq!(&c.get_ref()[..], NO_PAD_ENGINE.encode("abcdef").as_bytes());
     assert_eq!(8, c.get_ref().len());
 }
 
@@ -248,7 +240,7 @@ fn write_partial_then_enough_to_complete_chunk_and_another_chunk_and_another_par
 ) {
     let mut c = Cursor::new(Vec::new());
     {
-        let mut enc = EncoderWriter::new(&mut c, STANDARD_NO_PAD);
+        let mut enc = EncoderWriter::new(&mut c, &NO_PAD_ENGINE);
 
         assert_eq!(1, enc.write(b"a").unwrap());
         // completes partial chunk, and another chunk, with one more partial chunk that's not
@@ -256,10 +248,7 @@ fn write_partial_then_enough_to_complete_chunk_and_another_chunk_and_another_par
         assert_eq!(5, enc.write(b"bcdefe").unwrap());
         let _ = enc.finish().unwrap();
     }
-    assert_eq!(
-        &c.get_ref()[..],
-        encode_config("abcdef", STANDARD_NO_PAD).as_bytes()
-    );
+    assert_eq!(&c.get_ref()[..], NO_PAD_ENGINE.encode("abcdef").as_bytes());
     assert_eq!(8, c.get_ref().len());
 }
 
@@ -267,13 +256,10 @@ fn write_partial_then_enough_to_complete_chunk_and_another_chunk_and_another_par
 fn drop_calls_finish_for_you() {
     let mut c = Cursor::new(Vec::new());
     {
-        let mut enc = EncoderWriter::new(&mut c, STANDARD_NO_PAD);
+        let mut enc = EncoderWriter::new(&mut c, &NO_PAD_ENGINE);
         assert_eq!(1, enc.write(b"a").unwrap());
     }
-    assert_eq!(
-        &c.get_ref()[..],
-        encode_config("a", STANDARD_NO_PAD).as_bytes()
-    );
+    assert_eq!(&c.get_ref()[..], NO_PAD_ENGINE.encode("a").as_bytes());
     assert_eq!(2, c.get_ref().len());
 }
 
@@ -295,11 +281,11 @@ fn every_possible_split_of_input() {
             orig_data.push(rng.gen());
         }
 
-        let config = random_config(&mut rng);
-        encode_config_buf(&orig_data, config, &mut normal_encoded);
+        let engine = random_engine(&mut rng);
+        engine.encode_string(&orig_data, &mut normal_encoded);
 
         {
-            let mut stream_encoder = EncoderWriter::new(&mut stream_encoded, config);
+            let mut stream_encoder = EncoderWriter::new(&mut stream_encoded, &engine);
             // Write the first i bytes, then the rest
             stream_encoder.write_all(&orig_data[0..i]).unwrap();
             stream_encoder.write_all(&orig_data[i..]).unwrap();
@@ -312,12 +298,12 @@ fn every_possible_split_of_input() {
 #[test]
 fn encode_random_config_matches_normal_encode_reasonable_input_len() {
     // choose up to 2 * buf size, so ~half the time it'll use a full buffer
-    do_encode_random_config_matches_normal_encode(super::encoder::BUF_SIZE * 2)
+    do_encode_random_config_matches_normal_encode(super::encoder::BUF_SIZE * 2);
 }
 
 #[test]
 fn encode_random_config_matches_normal_encode_tiny_input_len() {
-    do_encode_random_config_matches_normal_encode(10)
+    do_encode_random_config_matches_normal_encode(10);
 }
 
 #[test]
@@ -332,14 +318,14 @@ fn retrying_writes_that_error_with_interrupted_works() {
         stream_encoded.clear();
         normal_encoded.clear();
 
-        let orig_len: usize = rng.gen_range(100, 20_000);
+        let orig_len: usize = rng.gen_range(100..20_000);
         for _ in 0..orig_len {
             orig_data.push(rng.gen());
         }
 
         // encode the normal way
-        let config = random_config(&mut rng);
-        encode_config_buf(&orig_data, config, &mut normal_encoded);
+        let engine = random_engine(&mut rng);
+        engine.encode_string(&orig_data, &mut normal_encoded);
 
         // encode via the stream encoder
         {
@@ -350,12 +336,12 @@ fn retrying_writes_that_error_with_interrupted_works() {
                 fraction: 0.8,
             };
 
-            let mut stream_encoder = EncoderWriter::new(&mut interrupting_writer, config);
+            let mut stream_encoder = EncoderWriter::new(&mut interrupting_writer, &engine);
             let mut bytes_consumed = 0;
             while bytes_consumed < orig_len {
                 // use short inputs since we want to use `extra` a lot as that's what needs rollback
                 // when errors occur
-                let input_len: usize = cmp::min(rng.gen_range(0, 10), orig_len - bytes_consumed);
+                let input_len: usize = cmp::min(rng.gen_range(0..10), orig_len - bytes_consumed);
 
                 retry_interrupted_write_all(
                     &mut stream_encoder,
@@ -396,14 +382,14 @@ fn writes_that_only_write_part_of_input_and_sometimes_interrupt_produce_correct_
         stream_encoded.clear();
         normal_encoded.clear();
 
-        let orig_len: usize = rng.gen_range(100, 20_000);
+        let orig_len: usize = rng.gen_range(100..20_000);
         for _ in 0..orig_len {
             orig_data.push(rng.gen());
         }
 
         // encode the normal way
-        let config = random_config(&mut rng);
-        encode_config_buf(&orig_data, config, &mut normal_encoded);
+        let engine = random_engine(&mut rng);
+        engine.encode_string(&orig_data, &mut normal_encoded);
 
         // encode via the stream encoder
         {
@@ -415,11 +401,11 @@ fn writes_that_only_write_part_of_input_and_sometimes_interrupt_produce_correct_
                 no_interrupt_fraction: 0.1,
             };
 
-            let mut stream_encoder = EncoderWriter::new(&mut partial_writer, config);
+            let mut stream_encoder = EncoderWriter::new(&mut partial_writer, &engine);
             let mut bytes_consumed = 0;
             while bytes_consumed < orig_len {
                 // use at most medium-length inputs to exercise retry logic more aggressively
-                let input_len: usize = cmp::min(rng.gen_range(0, 100), orig_len - bytes_consumed);
+                let input_len: usize = cmp::min(rng.gen_range(0..100), orig_len - bytes_consumed);
 
                 let res =
                     stream_encoder.write(&orig_data[bytes_consumed..bytes_consumed + input_len]);
@@ -475,22 +461,22 @@ fn do_encode_random_config_matches_normal_encode(max_input_len: usize) {
         stream_encoded.clear();
         normal_encoded.clear();
 
-        let orig_len: usize = rng.gen_range(100, 20_000);
+        let orig_len: usize = rng.gen_range(100..20_000);
         for _ in 0..orig_len {
             orig_data.push(rng.gen());
         }
 
         // encode the normal way
-        let config = random_config(&mut rng);
-        encode_config_buf(&orig_data, config, &mut normal_encoded);
+        let engine = random_engine(&mut rng);
+        engine.encode_string(&orig_data, &mut normal_encoded);
 
         // encode via the stream encoder
         {
-            let mut stream_encoder = EncoderWriter::new(&mut stream_encoded, config);
+            let mut stream_encoder = EncoderWriter::new(&mut stream_encoded, &engine);
             let mut bytes_consumed = 0;
             while bytes_consumed < orig_len {
                 let input_len: usize =
-                    cmp::min(rng.gen_range(0, max_input_len), orig_len - bytes_consumed);
+                    cmp::min(rng.gen_range(0..max_input_len), orig_len - bytes_consumed);
 
                 // write a little bit of the data
                 stream_encoder
@@ -520,7 +506,7 @@ struct InterruptingWriter<'a, W: 'a + Write, R: 'a + Rng> {
 
 impl<'a, W: Write, R: Rng> Write for InterruptingWriter<'a, W, R> {
     fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
-        if self.rng.gen_range(0.0, 1.0) <= self.fraction {
+        if self.rng.gen_range(0.0..1.0) <= self.fraction {
             return Err(io::Error::new(io::ErrorKind::Interrupted, "interrupted"));
         }
 
@@ -528,7 +514,7 @@ impl<'a, W: Write, R: Rng> Write for InterruptingWriter<'a, W, R> {
     }
 
     fn flush(&mut self) -> io::Result<()> {
-        if self.rng.gen_range(0.0, 1.0) <= self.fraction {
+        if self.rng.gen_range(0.0..1.0) <= self.fraction {
             return Err(io::Error::new(io::ErrorKind::Interrupted, "interrupted"));
         }
 
@@ -548,17 +534,17 @@ struct PartialInterruptingWriter<'a, W: 'a + Write, R: 'a + Rng> {
 
 impl<'a, W: Write, R: Rng> Write for PartialInterruptingWriter<'a, W, R> {
     fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
-        if self.rng.gen_range(0.0, 1.0) > self.no_interrupt_fraction {
+        if self.rng.gen_range(0.0..1.0) > self.no_interrupt_fraction {
             return Err(io::Error::new(io::ErrorKind::Interrupted, "interrupted"));
         }
 
-        if self.rng.gen_range(0.0, 1.0) <= self.full_input_fraction || buf.len() == 0 {
+        if self.rng.gen_range(0.0..1.0) <= self.full_input_fraction || buf.is_empty() {
             // pass through the buf untouched
             self.w.write(buf)
         } else {
             // only use a prefix of it
             self.w
-                .write(&buf[0..(self.rng.gen_range(0, buf.len() - 1))])
+                .write(&buf[0..(self.rng.gen_range(0..(buf.len() - 1)))])
         }
     }
 

+ 5 - 2
zeroidc/vendor/base64/src/write/mod.rs

@@ -1,8 +1,11 @@
 //! Implementations of `io::Write` to transparently handle base64.
 mod encoder;
 mod encoder_string_writer;
-pub use self::encoder::EncoderWriter;
-pub use self::encoder_string_writer::EncoderStringWriter;
+
+pub use self::{
+    encoder::EncoderWriter,
+    encoder_string_writer::{EncoderStringWriter, StrConsumer},
+};
 
 #[cfg(test)]
 mod encoder_tests;

+ 5 - 50
zeroidc/vendor/base64/tests/encode.rs

@@ -1,44 +1,9 @@
-extern crate base64;
-
-use base64::*;
+use base64::{
+    alphabet::URL_SAFE, engine::general_purpose::PAD, engine::general_purpose::STANDARD, *,
+};
 
 fn compare_encode(expected: &str, target: &[u8]) {
-    assert_eq!(expected, encode(target));
-}
-
-#[test]
-fn encode_rfc4648_0() {
-    compare_encode("", b"");
-}
-
-#[test]
-fn encode_rfc4648_1() {
-    compare_encode("Zg==", b"f");
-}
-
-#[test]
-fn encode_rfc4648_2() {
-    compare_encode("Zm8=", b"fo");
-}
-
-#[test]
-fn encode_rfc4648_3() {
-    compare_encode("Zm9v", b"foo");
-}
-
-#[test]
-fn encode_rfc4648_4() {
-    compare_encode("Zm9vYg==", b"foob");
-}
-
-#[test]
-fn encode_rfc4648_5() {
-    compare_encode("Zm9vYmE=", b"fooba");
-}
-
-#[test]
-fn encode_rfc4648_6() {
-    compare_encode("Zm9vYmFy", b"foobar");
+    assert_eq!(expected, STANDARD.encode(target));
 }
 
 #[test]
@@ -90,16 +55,6 @@ fn encode_all_bytes_url() {
          -AgYKDhIWGh4iJiouMjY6PkJGSk5SVlpeYmZqbnJ2en6ChoqOkpaanqKmqq6ytrq\
          -wsbKztLW2t7i5uru8vb6_wMHCw8TFxsfIycrLzM3Oz9DR0tPU1dbX2Nna29zd3t_g4eLj5OXm5-jp6uvs7e7v8PHy\
          8_T19vf4-fr7_P3-_w==",
-        encode_config(&bytes, URL_SAFE)
-    );
-}
-
-#[test]
-fn encode_url_safe_without_padding() {
-    let encoded = encode_config(b"alice", URL_SAFE_NO_PAD);
-    assert_eq!(&encoded, "YWxpY2U");
-    assert_eq!(
-        String::from_utf8(decode(&encoded).unwrap()).unwrap(),
-        "alice"
+        &engine::GeneralPurpose::new(&URL_SAFE, PAD).encode(&bytes)
     );
 }

+ 48 - 81
zeroidc/vendor/base64/tests/tests.rs

@@ -1,18 +1,15 @@
-extern crate base64;
-extern crate rand;
-
-use rand::{FromEntropy, Rng};
+use rand::{Rng, SeedableRng};
 
+use base64::engine::{general_purpose::STANDARD, Engine};
 use base64::*;
 
-mod helpers;
-use self::helpers::*;
+use base64::engine::general_purpose::{GeneralPurpose, NO_PAD};
 
 // generate random contents of the specified length and test encode/decode roundtrip
-fn roundtrip_random(
+fn roundtrip_random<E: Engine>(
     byte_buf: &mut Vec<u8>,
     str_buf: &mut String,
-    config: Config,
+    engine: &E,
     byte_len: usize,
     approx_values_per_byte: u8,
     max_rounds: u64,
@@ -30,8 +27,8 @@ fn roundtrip_random(
             byte_buf.push(r.gen::<u8>());
         }
 
-        encode_config_buf(&byte_buf, config, str_buf);
-        decode_config_buf(&str_buf, config, &mut decode_buf).unwrap();
+        engine.encode_string(&byte_buf, str_buf);
+        engine.decode_vec(&str_buf, &mut decode_buf).unwrap();
 
         assert_eq!(byte_buf, &decode_buf);
     }
@@ -52,17 +49,13 @@ fn calculate_number_of_rounds(byte_len: usize, approx_values_per_byte: u8, max:
     prod
 }
 
-fn no_pad_config() -> Config {
-    Config::new(CharacterSet::Standard, false)
-}
-
 #[test]
 fn roundtrip_random_short_standard() {
     let mut byte_buf: Vec<u8> = Vec::new();
     let mut str_buf = String::new();
 
     for input_len in 0..40 {
-        roundtrip_random(&mut byte_buf, &mut str_buf, STANDARD, input_len, 4, 10000);
+        roundtrip_random(&mut byte_buf, &mut str_buf, &STANDARD, input_len, 4, 10000);
     }
 }
 
@@ -72,7 +65,7 @@ fn roundtrip_random_with_fast_loop_standard() {
     let mut str_buf = String::new();
 
     for input_len in 40..100 {
-        roundtrip_random(&mut byte_buf, &mut str_buf, STANDARD, input_len, 4, 1000);
+        roundtrip_random(&mut byte_buf, &mut str_buf, &STANDARD, input_len, 4, 1000);
     }
 }
 
@@ -81,15 +74,9 @@ fn roundtrip_random_short_no_padding() {
     let mut byte_buf: Vec<u8> = Vec::new();
     let mut str_buf = String::new();
 
+    let engine = GeneralPurpose::new(&alphabet::STANDARD, NO_PAD);
     for input_len in 0..40 {
-        roundtrip_random(
-            &mut byte_buf,
-            &mut str_buf,
-            no_pad_config(),
-            input_len,
-            4,
-            10000,
-        );
+        roundtrip_random(&mut byte_buf, &mut str_buf, &engine, input_len, 4, 10000);
     }
 }
 
@@ -98,15 +85,10 @@ fn roundtrip_random_no_padding() {
     let mut byte_buf: Vec<u8> = Vec::new();
     let mut str_buf = String::new();
 
+    let engine = GeneralPurpose::new(&alphabet::STANDARD, NO_PAD);
+
     for input_len in 40..100 {
-        roundtrip_random(
-            &mut byte_buf,
-            &mut str_buf,
-            no_pad_config(),
-            input_len,
-            4,
-            1000,
-        );
+        roundtrip_random(&mut byte_buf, &mut str_buf, &engine, input_len, 4, 1000);
     }
 }
 
@@ -120,13 +102,14 @@ fn roundtrip_decode_trailing_10_bytes() {
     // to handle that case.
 
     for num_quads in 0..25 {
-        let mut s: String = std::iter::repeat("ABCD").take(num_quads).collect();
+        let mut s: String = "ABCD".repeat(num_quads);
         s.push_str("EFGHIJKLZg");
 
-        let decoded = decode(&s).unwrap();
+        let engine = GeneralPurpose::new(&alphabet::STANDARD, NO_PAD);
+        let decoded = engine.decode(&s).unwrap();
         assert_eq!(num_quads * 3 + 7, decoded.len());
 
-        assert_eq!(s, encode_config(&decoded, STANDARD_NO_PAD));
+        assert_eq!(s, engine.encode(&decoded));
     }
 }
 
@@ -140,55 +123,39 @@ fn display_wrapper_matches_normal_encode() {
     bytes.push(255);
 
     assert_eq!(
-        encode(&bytes),
-        format!(
-            "{}",
-            base64::display::Base64Display::with_config(&bytes, STANDARD)
-        )
+        STANDARD.encode(&bytes),
+        format!("{}", display::Base64Display::new(&bytes, &STANDARD))
     );
 }
 
 #[test]
-fn because_we_can() {
-    compare_decode("alice", "YWxpY2U=");
-    compare_decode("alice", &encode(b"alice"));
-    compare_decode("alice", &encode(&decode(&encode(b"alice")).unwrap()));
-}
-
-#[test]
-fn encode_config_slice_can_use_inline_buffer() {
-    let mut buf: [u8; 22] = [0; 22];
-    let mut larger_buf: [u8; 24] = [0; 24];
-    let mut input: [u8; 16] = [0; 16];
-
-    let mut rng = rand::rngs::SmallRng::from_entropy();
-    for elt in &mut input {
-        *elt = rng.gen();
-    }
-
-    assert_eq!(22, encode_config_slice(&input, STANDARD_NO_PAD, &mut buf));
-    let decoded = decode_config(&buf, STANDARD_NO_PAD).unwrap();
-
-    assert_eq!(decoded, input);
-
-    // let's try it again with padding
-
-    assert_eq!(24, encode_config_slice(&input, STANDARD, &mut larger_buf));
-    let decoded = decode_config(&buf, STANDARD).unwrap();
-
-    assert_eq!(decoded, input);
-}
-
-#[test]
-#[should_panic(expected = "index 24 out of range for slice of length 22")]
-fn encode_config_slice_panics_when_buffer_too_small() {
-    let mut buf: [u8; 22] = [0; 22];
-    let mut input: [u8; 16] = [0; 16];
-
-    let mut rng = rand::rngs::SmallRng::from_entropy();
-    for elt in &mut input {
-        *elt = rng.gen();
+fn encode_engine_slice_error_when_buffer_too_small() {
+    for num_triples in 1..100 {
+        let input = "AAA".repeat(num_triples);
+        let mut vec = vec![0; (num_triples - 1) * 4];
+        assert_eq!(
+            EncodeSliceError::OutputSliceTooSmall,
+            STANDARD.encode_slice(&input, &mut vec).unwrap_err()
+        );
+        vec.push(0);
+        assert_eq!(
+            EncodeSliceError::OutputSliceTooSmall,
+            STANDARD.encode_slice(&input, &mut vec).unwrap_err()
+        );
+        vec.push(0);
+        assert_eq!(
+            EncodeSliceError::OutputSliceTooSmall,
+            STANDARD.encode_slice(&input, &mut vec).unwrap_err()
+        );
+        vec.push(0);
+        assert_eq!(
+            EncodeSliceError::OutputSliceTooSmall,
+            STANDARD.encode_slice(&input, &mut vec).unwrap_err()
+        );
+        vec.push(0);
+        assert_eq!(
+            num_triples * 4,
+            STANDARD.encode_slice(&input, &mut vec).unwrap()
+        );
     }
-
-    encode_config_slice(&input, STANDARD, &mut buf);
 }

文件差异内容过多而无法显示
+ 0 - 0
zeroidc/vendor/bytes/.cargo-checksum.json


+ 48 - 0
zeroidc/vendor/bytes/CHANGELOG.md

@@ -1,3 +1,51 @@
+# 1.3.0 (November 20, 2022)
+
+### Added
+
+- Rename and expose `BytesMut::spare_capacity_mut` (#572)
+- Implement native-endian get and put functions for `Buf` and `BufMut` (#576)
+
+### Fixed
+
+- Don't have important data in unused capacity when calling reserve (#563)
+
+### Documented
+
+- `Bytes::new` etc should return `Self` not `Bytes` (#568)
+
+# 1.2.1 (July 30, 2022)
+
+### Fixed
+
+- Fix unbounded memory growth when using `reserve` (#560)
+
+# 1.2.0 (July 19, 2022)
+
+### Added
+
+- Add `BytesMut::zeroed` (#517)
+- Implement `Extend<Bytes>` for `BytesMut` (#527)
+- Add conversion from `BytesMut` to `Vec<u8>` (#543, #554)
+- Add conversion from `Bytes` to `Vec<u8>` (#547)
+- Add `UninitSlice::as_uninit_slice_mut()` (#548)
+- Add const to `Bytes::{len,is_empty}` (#514)
+
+### Changed
+
+- Reuse vector in `BytesMut::reserve` (#539, #544)
+
+### Fixed
+
+- Make miri happy (#515, #523, #542, #545, #553)
+- Make tsan happy (#541)
+- Fix `remaining_mut()` on chain (#488)
+- Fix amortized asymptotics of `BytesMut` (#555)
+
+### Documented
+
+- Redraw layout diagram with box drawing characters (#539)
+- Clarify `BytesMut::unsplit` docs (#535)
+
 # 1.1.0 (August 25, 2021)
 
 ### Added

+ 26 - 10
zeroidc/vendor/bytes/Cargo.toml

@@ -3,36 +3,52 @@
 # When uploading crates to the registry Cargo will automatically
 # "normalize" Cargo.toml files for maximal compatibility
 # with all versions of Cargo and also rewrite `path` dependencies
-# to registry (e.g., crates.io) dependencies
+# to registry (e.g., crates.io) dependencies.
 #
-# If you believe there's an error in this file please file an
-# issue against the rust-lang/cargo repository. If you're
-# editing this file be aware that the upstream Cargo.toml
-# will likely look very different (and much more reasonable)
+# If you are reading this file be aware that the original Cargo.toml
+# will likely look very different (and much more reasonable).
+# See Cargo.toml.orig for the original contents.
 
 [package]
 edition = "2018"
 name = "bytes"
-version = "1.1.0"
-authors = ["Carl Lerche <[email protected]>", "Sean McArthur <[email protected]>"]
+version = "1.3.0"
+authors = [
+    "Carl Lerche <[email protected]>",
+    "Sean McArthur <[email protected]>",
+]
 description = "Types and traits for working with bytes"
 readme = "README.md"
-keywords = ["buffers", "zero-copy", "io"]
-categories = ["network-programming", "data-structures"]
+keywords = [
+    "buffers",
+    "zero-copy",
+    "io",
+]
+categories = [
+    "network-programming",
+    "data-structures",
+]
 license = "MIT"
 repository = "https://github.com/tokio-rs/bytes"
+
 [package.metadata.docs.rs]
-rustdoc-args = ["--cfg", "docsrs"]
+rustdoc-args = [
+    "--cfg",
+    "docsrs",
+]
+
 [dependencies.serde]
 version = "1.0.60"
 features = ["alloc"]
 optional = true
 default-features = false
+
 [dev-dependencies.serde_test]
 version = "1.0"
 
 [features]
 default = ["std"]
 std = []
+
 [target."cfg(loom)".dev-dependencies.loom]
 version = "0.5"

+ 1 - 1
zeroidc/vendor/bytes/benches/buf.rs

@@ -46,7 +46,7 @@ impl TestBuf {
 }
 impl Buf for TestBuf {
     fn remaining(&self) -> usize {
-        return self.buf.len() - self.pos;
+        self.buf.len() - self.pos
     }
     fn advance(&mut self, cnt: usize) {
         self.pos += cnt;

+ 1 - 0
zeroidc/vendor/bytes/benches/bytes.rs

@@ -88,6 +88,7 @@ fn from_long_slice(b: &mut Bencher) {
 #[bench]
 fn slice_empty(b: &mut Bencher) {
     b.iter(|| {
+        // `clone` is to convert to ARC
         let b = Bytes::from(vec![17; 1024]).clone();
         for i in 0..1000 {
             test::black_box(b.slice(i % 100..i % 100));

+ 5 - 5
zeroidc/vendor/bytes/ci/miri.sh

@@ -1,11 +1,11 @@
 #!/bin/bash
 set -e
 
-MIRI_NIGHTLY=nightly-$(curl -s https://rust-lang.github.io/rustup-components-history/x86_64-unknown-linux-gnu/miri)
-echo "Installing latest nightly with Miri: $MIRI_NIGHTLY"
-rustup set profile minimal
-rustup default "$MIRI_NIGHTLY"
-rustup component add miri
+rustup toolchain install nightly --component miri
+rustup override set nightly
+cargo miri setup
+
+export MIRIFLAGS="-Zmiri-strict-provenance"
 
 cargo miri test
 cargo miri test --target mips64-unknown-linux-gnuabi64

+ 1 - 0
zeroidc/vendor/bytes/clippy.toml

@@ -0,0 +1 @@
+msrv = "1.39"

+ 318 - 0
zeroidc/vendor/bytes/src/buf/buf_impl.rs

@@ -354,6 +354,29 @@ pub trait Buf {
         buf_get_impl!(self, u16::from_le_bytes);
     }
 
+    /// Gets an unsigned 16 bit integer from `self` in native-endian byte order.
+    ///
+    /// The current position is advanced by 2.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use bytes::Buf;
+    ///
+    /// let mut buf: &[u8] = match cfg!(target_endian = "big") {
+    ///     true => b"\x08\x09 hello",
+    ///     false => b"\x09\x08 hello",
+    /// };
+    /// assert_eq!(0x0809, buf.get_u16_ne());
+    /// ```
+    ///
+    /// # Panics
+    ///
+    /// This function panics if there is not enough remaining data in `self`.
+    fn get_u16_ne(&mut self) -> u16 {
+        buf_get_impl!(self, u16::from_ne_bytes);
+    }
+
     /// Gets a signed 16 bit integer from `self` in big-endian byte order.
     ///
     /// The current position is advanced by 2.
@@ -394,6 +417,29 @@ pub trait Buf {
         buf_get_impl!(self, i16::from_le_bytes);
     }
 
+    /// Gets a signed 16 bit integer from `self` in native-endian byte order.
+    ///
+    /// The current position is advanced by 2.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use bytes::Buf;
+    ///
+    /// let mut buf: &[u8] = match cfg!(target_endian = "big") {
+    ///     true => b"\x08\x09 hello",
+    ///     false => b"\x09\x08 hello",
+    /// };
+    /// assert_eq!(0x0809, buf.get_i16_ne());
+    /// ```
+    ///
+    /// # Panics
+    ///
+    /// This function panics if there is not enough remaining data in `self`.
+    fn get_i16_ne(&mut self) -> i16 {
+        buf_get_impl!(self, i16::from_ne_bytes);
+    }
+
     /// Gets an unsigned 32 bit integer from `self` in the big-endian byte order.
     ///
     /// The current position is advanced by 4.
@@ -434,6 +480,29 @@ pub trait Buf {
         buf_get_impl!(self, u32::from_le_bytes);
     }
 
+    /// Gets an unsigned 32 bit integer from `self` in native-endian byte order.
+    ///
+    /// The current position is advanced by 4.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use bytes::Buf;
+    ///
+    /// let mut buf: &[u8] = match cfg!(target_endian = "big") {
+    ///     true => b"\x08\x09\xA0\xA1 hello",
+    ///     false => b"\xA1\xA0\x09\x08 hello",
+    /// };
+    /// assert_eq!(0x0809A0A1, buf.get_u32_ne());
+    /// ```
+    ///
+    /// # Panics
+    ///
+    /// This function panics if there is not enough remaining data in `self`.
+    fn get_u32_ne(&mut self) -> u32 {
+        buf_get_impl!(self, u32::from_ne_bytes);
+    }
+
     /// Gets a signed 32 bit integer from `self` in big-endian byte order.
     ///
     /// The current position is advanced by 4.
@@ -474,6 +543,29 @@ pub trait Buf {
         buf_get_impl!(self, i32::from_le_bytes);
     }
 
+    /// Gets a signed 32 bit integer from `self` in native-endian byte order.
+    ///
+    /// The current position is advanced by 4.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use bytes::Buf;
+    ///
+    /// let mut buf: &[u8] = match cfg!(target_endian = "big") {
+    ///     true => b"\x08\x09\xA0\xA1 hello",
+    ///     false => b"\xA1\xA0\x09\x08 hello",
+    /// };
+    /// assert_eq!(0x0809A0A1, buf.get_i32_ne());
+    /// ```
+    ///
+    /// # Panics
+    ///
+    /// This function panics if there is not enough remaining data in `self`.
+    fn get_i32_ne(&mut self) -> i32 {
+        buf_get_impl!(self, i32::from_ne_bytes);
+    }
+
     /// Gets an unsigned 64 bit integer from `self` in big-endian byte order.
     ///
     /// The current position is advanced by 8.
@@ -514,6 +606,29 @@ pub trait Buf {
         buf_get_impl!(self, u64::from_le_bytes);
     }
 
+    /// Gets an unsigned 64 bit integer from `self` in native-endian byte order.
+    ///
+    /// The current position is advanced by 8.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use bytes::Buf;
+    ///
+    /// let mut buf: &[u8] = match cfg!(target_endian = "big") {
+    ///     true => b"\x01\x02\x03\x04\x05\x06\x07\x08 hello",
+    ///     false => b"\x08\x07\x06\x05\x04\x03\x02\x01 hello",
+    /// };
+    /// assert_eq!(0x0102030405060708, buf.get_u64_ne());
+    /// ```
+    ///
+    /// # Panics
+    ///
+    /// This function panics if there is not enough remaining data in `self`.
+    fn get_u64_ne(&mut self) -> u64 {
+        buf_get_impl!(self, u64::from_ne_bytes);
+    }
+
     /// Gets a signed 64 bit integer from `self` in big-endian byte order.
     ///
     /// The current position is advanced by 8.
@@ -554,6 +669,29 @@ pub trait Buf {
         buf_get_impl!(self, i64::from_le_bytes);
     }
 
+    /// Gets a signed 64 bit integer from `self` in native-endian byte order.
+    ///
+    /// The current position is advanced by 8.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use bytes::Buf;
+    ///
+    /// let mut buf: &[u8] = match cfg!(target_endian = "big") {
+    ///     true => b"\x01\x02\x03\x04\x05\x06\x07\x08 hello",
+    ///     false => b"\x08\x07\x06\x05\x04\x03\x02\x01 hello",
+    /// };
+    /// assert_eq!(0x0102030405060708, buf.get_i64_ne());
+    /// ```
+    ///
+    /// # Panics
+    ///
+    /// This function panics if there is not enough remaining data in `self`.
+    fn get_i64_ne(&mut self) -> i64 {
+        buf_get_impl!(self, i64::from_ne_bytes);
+    }
+
     /// Gets an unsigned 128 bit integer from `self` in big-endian byte order.
     ///
     /// The current position is advanced by 16.
@@ -594,6 +732,29 @@ pub trait Buf {
         buf_get_impl!(self, u128::from_le_bytes);
     }
 
+    /// Gets an unsigned 128 bit integer from `self` in native-endian byte order.
+    ///
+    /// The current position is advanced by 16.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use bytes::Buf;
+    ///
+    /// let mut buf: &[u8] = match cfg!(target_endian = "big") {
+    ///     true => b"\x01\x02\x03\x04\x05\x06\x07\x08\x09\x10\x11\x12\x13\x14\x15\x16 hello",
+    ///     false => b"\x16\x15\x14\x13\x12\x11\x10\x09\x08\x07\x06\x05\x04\x03\x02\x01 hello",
+    /// };
+    /// assert_eq!(0x01020304050607080910111213141516, buf.get_u128_ne());
+    /// ```
+    ///
+    /// # Panics
+    ///
+    /// This function panics if there is not enough remaining data in `self`.
+    fn get_u128_ne(&mut self) -> u128 {
+        buf_get_impl!(self, u128::from_ne_bytes);
+    }
+
     /// Gets a signed 128 bit integer from `self` in big-endian byte order.
     ///
     /// The current position is advanced by 16.
@@ -634,6 +795,29 @@ pub trait Buf {
         buf_get_impl!(self, i128::from_le_bytes);
     }
 
+    /// Gets a signed 128 bit integer from `self` in native-endian byte order.
+    ///
+    /// The current position is advanced by 16.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use bytes::Buf;
+    ///
+    /// let mut buf: &[u8] = match cfg!(target_endian = "big") {
+    ///     true => b"\x01\x02\x03\x04\x05\x06\x07\x08\x09\x10\x11\x12\x13\x14\x15\x16 hello",
+    ///     false => b"\x16\x15\x14\x13\x12\x11\x10\x09\x08\x07\x06\x05\x04\x03\x02\x01 hello",
+    /// };
+    /// assert_eq!(0x01020304050607080910111213141516, buf.get_i128_ne());
+    /// ```
+    ///
+    /// # Panics
+    ///
+    /// This function panics if there is not enough remaining data in `self`.
+    fn get_i128_ne(&mut self) -> i128 {
+        buf_get_impl!(self, i128::from_ne_bytes);
+    }
+
     /// Gets an unsigned n-byte integer from `self` in big-endian byte order.
     ///
     /// The current position is advanced by `nbytes`.
@@ -674,6 +858,33 @@ pub trait Buf {
         buf_get_impl!(le => self, u64, nbytes);
     }
 
+    /// Gets an unsigned n-byte integer from `self` in native-endian byte order.
+    ///
+    /// The current position is advanced by `nbytes`.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use bytes::Buf;
+    ///
+    /// let mut buf: &[u8] = match cfg!(target_endian = "big") {
+    ///     true => b"\x01\x02\x03 hello",
+    ///     false => b"\x03\x02\x01 hello",
+    /// };
+    /// assert_eq!(0x010203, buf.get_uint_ne(3));
+    /// ```
+    ///
+    /// # Panics
+    ///
+    /// This function panics if there is not enough remaining data in `self`.
+    fn get_uint_ne(&mut self, nbytes: usize) -> u64 {
+        if cfg!(target_endian = "big") {
+            self.get_uint(nbytes)
+        } else {
+            self.get_uint_le(nbytes)
+        }
+    }
+
     /// Gets a signed n-byte integer from `self` in big-endian byte order.
     ///
     /// The current position is advanced by `nbytes`.
@@ -714,6 +925,33 @@ pub trait Buf {
         buf_get_impl!(le => self, i64, nbytes);
     }
 
+    /// Gets a signed n-byte integer from `self` in native-endian byte order.
+    ///
+    /// The current position is advanced by `nbytes`.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use bytes::Buf;
+    ///
+    /// let mut buf: &[u8] = match cfg!(target_endian = "big") {
+    ///     true => b"\x01\x02\x03 hello",
+    ///     false => b"\x03\x02\x01 hello",
+    /// };
+    /// assert_eq!(0x010203, buf.get_int_ne(3));
+    /// ```
+    ///
+    /// # Panics
+    ///
+    /// This function panics if there is not enough remaining data in `self`.
+    fn get_int_ne(&mut self, nbytes: usize) -> i64 {
+        if cfg!(target_endian = "big") {
+            self.get_int(nbytes)
+        } else {
+            self.get_int_le(nbytes)
+        }
+    }
+
     /// Gets an IEEE754 single-precision (4 bytes) floating point number from
     /// `self` in big-endian byte order.
     ///
@@ -756,6 +994,30 @@ pub trait Buf {
         f32::from_bits(Self::get_u32_le(self))
     }
 
+    /// Gets an IEEE754 single-precision (4 bytes) floating point number from
+    /// `self` in native-endian byte order.
+    ///
+    /// The current position is advanced by 4.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use bytes::Buf;
+    ///
+    /// let mut buf: &[u8] = match cfg!(target_endian = "big") {
+    ///     true => b"\x3F\x99\x99\x9A hello",
+    ///     false => b"\x9A\x99\x99\x3F hello",
+    /// };
+    /// assert_eq!(1.2f32, buf.get_f32_ne());
+    /// ```
+    ///
+    /// # Panics
+    ///
+    /// This function panics if there is not enough remaining data in `self`.
+    fn get_f32_ne(&mut self) -> f32 {
+        f32::from_bits(Self::get_u32_ne(self))
+    }
+
     /// Gets an IEEE754 double-precision (8 bytes) floating point number from
     /// `self` in big-endian byte order.
     ///
@@ -798,6 +1060,30 @@ pub trait Buf {
         f64::from_bits(Self::get_u64_le(self))
     }
 
+    /// Gets an IEEE754 double-precision (8 bytes) floating point number from
+    /// `self` in native-endian byte order.
+    ///
+    /// The current position is advanced by 8.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use bytes::Buf;
+    ///
+    /// let mut buf: &[u8] = match cfg!(target_endian = "big") {
+    ///     true => b"\x3F\xF3\x33\x33\x33\x33\x33\x33 hello",
+    ///     false => b"\x33\x33\x33\x33\x33\x33\xF3\x3F hello",
+    /// };
+    /// assert_eq!(1.2f64, buf.get_f64_ne());
+    /// ```
+    ///
+    /// # Panics
+    ///
+    /// This function panics if there is not enough remaining data in `self`.
+    fn get_f64_ne(&mut self) -> f64 {
+        f64::from_bits(Self::get_u64_ne(self))
+    }
+
     /// Consumes `len` bytes inside self and returns new instance of `Bytes`
     /// with this data.
     ///
@@ -948,6 +1234,10 @@ macro_rules! deref_forward_buf {
             (**self).get_u16_le()
         }
 
+        fn get_u16_ne(&mut self) -> u16 {
+            (**self).get_u16_ne()
+        }
+
         fn get_i16(&mut self) -> i16 {
             (**self).get_i16()
         }
@@ -956,6 +1246,10 @@ macro_rules! deref_forward_buf {
             (**self).get_i16_le()
         }
 
+        fn get_i16_ne(&mut self) -> i16 {
+            (**self).get_i16_ne()
+        }
+
         fn get_u32(&mut self) -> u32 {
             (**self).get_u32()
         }
@@ -964,6 +1258,10 @@ macro_rules! deref_forward_buf {
             (**self).get_u32_le()
         }
 
+        fn get_u32_ne(&mut self) -> u32 {
+            (**self).get_u32_ne()
+        }
+
         fn get_i32(&mut self) -> i32 {
             (**self).get_i32()
         }
@@ -972,6 +1270,10 @@ macro_rules! deref_forward_buf {
             (**self).get_i32_le()
         }
 
+        fn get_i32_ne(&mut self) -> i32 {
+            (**self).get_i32_ne()
+        }
+
         fn get_u64(&mut self) -> u64 {
             (**self).get_u64()
         }
@@ -980,6 +1282,10 @@ macro_rules! deref_forward_buf {
             (**self).get_u64_le()
         }
 
+        fn get_u64_ne(&mut self) -> u64 {
+            (**self).get_u64_ne()
+        }
+
         fn get_i64(&mut self) -> i64 {
             (**self).get_i64()
         }
@@ -988,6 +1294,10 @@ macro_rules! deref_forward_buf {
             (**self).get_i64_le()
         }
 
+        fn get_i64_ne(&mut self) -> i64 {
+            (**self).get_i64_ne()
+        }
+
         fn get_uint(&mut self, nbytes: usize) -> u64 {
             (**self).get_uint(nbytes)
         }
@@ -996,6 +1306,10 @@ macro_rules! deref_forward_buf {
             (**self).get_uint_le(nbytes)
         }
 
+        fn get_uint_ne(&mut self, nbytes: usize) -> u64 {
+            (**self).get_uint_ne(nbytes)
+        }
+
         fn get_int(&mut self, nbytes: usize) -> i64 {
             (**self).get_int(nbytes)
         }
@@ -1004,6 +1318,10 @@ macro_rules! deref_forward_buf {
             (**self).get_int_le(nbytes)
         }
 
+        fn get_int_ne(&mut self, nbytes: usize) -> i64 {
+            (**self).get_int_ne(nbytes)
+        }
+
         fn copy_to_bytes(&mut self, len: usize) -> crate::Bytes {
             (**self).copy_to_bytes(len)
         }

+ 350 - 0
zeroidc/vendor/bytes/src/buf/buf_mut.rs

@@ -56,6 +56,10 @@ pub unsafe trait BufMut {
     /// Implementations of `remaining_mut` should ensure that the return value
     /// does not change unless a call is made to `advance_mut` or any other
     /// function that is documented to change the `BufMut`'s current position.
+    ///
+    /// # Note
+    ///
+    /// `remaining_mut` may return value smaller than actual available space.
     fn remaining_mut(&self) -> usize;
 
     /// Advance the internal cursor of the BufMut
@@ -382,6 +386,32 @@ pub unsafe trait BufMut {
         self.put_slice(&n.to_le_bytes())
     }
 
+    /// Writes an unsigned 16 bit integer to `self` in native-endian byte order.
+    ///
+    /// The current position is advanced by 2.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use bytes::BufMut;
+    ///
+    /// let mut buf = vec![];
+    /// buf.put_u16_ne(0x0809);
+    /// if cfg!(target_endian = "big") {
+    ///     assert_eq!(buf, b"\x08\x09");
+    /// } else {
+    ///     assert_eq!(buf, b"\x09\x08");
+    /// }
+    /// ```
+    ///
+    /// # Panics
+    ///
+    /// This function panics if there is not enough remaining capacity in
+    /// `self`.
+    fn put_u16_ne(&mut self, n: u16) {
+        self.put_slice(&n.to_ne_bytes())
+    }
+
     /// Writes a signed 16 bit integer to `self` in big-endian byte order.
     ///
     /// The current position is advanced by 2.
@@ -426,6 +456,32 @@ pub unsafe trait BufMut {
         self.put_slice(&n.to_le_bytes())
     }
 
+    /// Writes a signed 16 bit integer to `self` in native-endian byte order.
+    ///
+    /// The current position is advanced by 2.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use bytes::BufMut;
+    ///
+    /// let mut buf = vec![];
+    /// buf.put_i16_ne(0x0809);
+    /// if cfg!(target_endian = "big") {
+    ///     assert_eq!(buf, b"\x08\x09");
+    /// } else {
+    ///     assert_eq!(buf, b"\x09\x08");
+    /// }
+    /// ```
+    ///
+    /// # Panics
+    ///
+    /// This function panics if there is not enough remaining capacity in
+    /// `self`.
+    fn put_i16_ne(&mut self, n: i16) {
+        self.put_slice(&n.to_ne_bytes())
+    }
+
     /// Writes an unsigned 32 bit integer to `self` in big-endian byte order.
     ///
     /// The current position is advanced by 4.
@@ -470,6 +526,32 @@ pub unsafe trait BufMut {
         self.put_slice(&n.to_le_bytes())
     }
 
+    /// Writes an unsigned 32 bit integer to `self` in native-endian byte order.
+    ///
+    /// The current position is advanced by 4.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use bytes::BufMut;
+    ///
+    /// let mut buf = vec![];
+    /// buf.put_u32_ne(0x0809A0A1);
+    /// if cfg!(target_endian = "big") {
+    ///     assert_eq!(buf, b"\x08\x09\xA0\xA1");
+    /// } else {
+    ///     assert_eq!(buf, b"\xA1\xA0\x09\x08");
+    /// }
+    /// ```
+    ///
+    /// # Panics
+    ///
+    /// This function panics if there is not enough remaining capacity in
+    /// `self`.
+    fn put_u32_ne(&mut self, n: u32) {
+        self.put_slice(&n.to_ne_bytes())
+    }
+
     /// Writes a signed 32 bit integer to `self` in big-endian byte order.
     ///
     /// The current position is advanced by 4.
@@ -514,6 +596,32 @@ pub unsafe trait BufMut {
         self.put_slice(&n.to_le_bytes())
     }
 
+    /// Writes a signed 32 bit integer to `self` in native-endian byte order.
+    ///
+    /// The current position is advanced by 4.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use bytes::BufMut;
+    ///
+    /// let mut buf = vec![];
+    /// buf.put_i32_ne(0x0809A0A1);
+    /// if cfg!(target_endian = "big") {
+    ///     assert_eq!(buf, b"\x08\x09\xA0\xA1");
+    /// } else {
+    ///     assert_eq!(buf, b"\xA1\xA0\x09\x08");
+    /// }
+    /// ```
+    ///
+    /// # Panics
+    ///
+    /// This function panics if there is not enough remaining capacity in
+    /// `self`.
+    fn put_i32_ne(&mut self, n: i32) {
+        self.put_slice(&n.to_ne_bytes())
+    }
+
     /// Writes an unsigned 64 bit integer to `self` in the big-endian byte order.
     ///
     /// The current position is advanced by 8.
@@ -558,6 +666,32 @@ pub unsafe trait BufMut {
         self.put_slice(&n.to_le_bytes())
     }
 
+    /// Writes an unsigned 64 bit integer to `self` in native-endian byte order.
+    ///
+    /// The current position is advanced by 8.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use bytes::BufMut;
+    ///
+    /// let mut buf = vec![];
+    /// buf.put_u64_ne(0x0102030405060708);
+    /// if cfg!(target_endian = "big") {
+    ///     assert_eq!(buf, b"\x01\x02\x03\x04\x05\x06\x07\x08");
+    /// } else {
+    ///     assert_eq!(buf, b"\x08\x07\x06\x05\x04\x03\x02\x01");
+    /// }
+    /// ```
+    ///
+    /// # Panics
+    ///
+    /// This function panics if there is not enough remaining capacity in
+    /// `self`.
+    fn put_u64_ne(&mut self, n: u64) {
+        self.put_slice(&n.to_ne_bytes())
+    }
+
     /// Writes a signed 64 bit integer to `self` in the big-endian byte order.
     ///
     /// The current position is advanced by 8.
@@ -602,6 +736,32 @@ pub unsafe trait BufMut {
         self.put_slice(&n.to_le_bytes())
     }
 
+    /// Writes a signed 64 bit integer to `self` in native-endian byte order.
+    ///
+    /// The current position is advanced by 8.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use bytes::BufMut;
+    ///
+    /// let mut buf = vec![];
+    /// buf.put_i64_ne(0x0102030405060708);
+    /// if cfg!(target_endian = "big") {
+    ///     assert_eq!(buf, b"\x01\x02\x03\x04\x05\x06\x07\x08");
+    /// } else {
+    ///     assert_eq!(buf, b"\x08\x07\x06\x05\x04\x03\x02\x01");
+    /// }
+    /// ```
+    ///
+    /// # Panics
+    ///
+    /// This function panics if there is not enough remaining capacity in
+    /// `self`.
+    fn put_i64_ne(&mut self, n: i64) {
+        self.put_slice(&n.to_ne_bytes())
+    }
+
     /// Writes an unsigned 128 bit integer to `self` in the big-endian byte order.
     ///
     /// The current position is advanced by 16.
@@ -646,6 +806,32 @@ pub unsafe trait BufMut {
         self.put_slice(&n.to_le_bytes())
     }
 
+    /// Writes an unsigned 128 bit integer to `self` in native-endian byte order.
+    ///
+    /// The current position is advanced by 16.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use bytes::BufMut;
+    ///
+    /// let mut buf = vec![];
+    /// buf.put_u128_ne(0x01020304050607080910111213141516);
+    /// if cfg!(target_endian = "big") {
+    ///     assert_eq!(buf, b"\x01\x02\x03\x04\x05\x06\x07\x08\x09\x10\x11\x12\x13\x14\x15\x16");
+    /// } else {
+    ///     assert_eq!(buf, b"\x16\x15\x14\x13\x12\x11\x10\x09\x08\x07\x06\x05\x04\x03\x02\x01");
+    /// }
+    /// ```
+    ///
+    /// # Panics
+    ///
+    /// This function panics if there is not enough remaining capacity in
+    /// `self`.
+    fn put_u128_ne(&mut self, n: u128) {
+        self.put_slice(&n.to_ne_bytes())
+    }
+
     /// Writes a signed 128 bit integer to `self` in the big-endian byte order.
     ///
     /// The current position is advanced by 16.
@@ -690,6 +876,32 @@ pub unsafe trait BufMut {
         self.put_slice(&n.to_le_bytes())
     }
 
+    /// Writes a signed 128 bit integer to `self` in native-endian byte order.
+    ///
+    /// The current position is advanced by 16.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use bytes::BufMut;
+    ///
+    /// let mut buf = vec![];
+    /// buf.put_i128_ne(0x01020304050607080910111213141516);
+    /// if cfg!(target_endian = "big") {
+    ///     assert_eq!(buf, b"\x01\x02\x03\x04\x05\x06\x07\x08\x09\x10\x11\x12\x13\x14\x15\x16");
+    /// } else {
+    ///     assert_eq!(buf, b"\x16\x15\x14\x13\x12\x11\x10\x09\x08\x07\x06\x05\x04\x03\x02\x01");
+    /// }
+    /// ```
+    ///
+    /// # Panics
+    ///
+    /// This function panics if there is not enough remaining capacity in
+    /// `self`.
+    fn put_i128_ne(&mut self, n: i128) {
+        self.put_slice(&n.to_ne_bytes())
+    }
+
     /// Writes an unsigned n-byte integer to `self` in big-endian byte order.
     ///
     /// The current position is advanced by `nbytes`.
@@ -734,6 +946,36 @@ pub unsafe trait BufMut {
         self.put_slice(&n.to_le_bytes()[0..nbytes]);
     }
 
+    /// Writes an unsigned n-byte integer to `self` in the native-endian byte order.
+    ///
+    /// The current position is advanced by `nbytes`.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use bytes::BufMut;
+    ///
+    /// let mut buf = vec![];
+    /// buf.put_uint_ne(0x010203, 3);
+    /// if cfg!(target_endian = "big") {
+    ///     assert_eq!(buf, b"\x01\x02\x03");
+    /// } else {
+    ///     assert_eq!(buf, b"\x03\x02\x01");
+    /// }
+    /// ```
+    ///
+    /// # Panics
+    ///
+    /// This function panics if there is not enough remaining capacity in
+    /// `self`.
+    fn put_uint_ne(&mut self, n: u64, nbytes: usize) {
+        if cfg!(target_endian = "big") {
+            self.put_uint(n, nbytes)
+        } else {
+            self.put_uint_le(n, nbytes)
+        }
+    }
+
     /// Writes low `nbytes` of a signed integer to `self` in big-endian byte order.
     ///
     /// The current position is advanced by `nbytes`.
@@ -778,6 +1020,36 @@ pub unsafe trait BufMut {
         self.put_slice(&n.to_le_bytes()[0..nbytes]);
     }
 
+    /// Writes low `nbytes` of a signed integer to `self` in native-endian byte order.
+    ///
+    /// The current position is advanced by `nbytes`.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use bytes::BufMut;
+    ///
+    /// let mut buf = vec![];
+    /// buf.put_int_ne(0x010203, 3);
+    /// if cfg!(target_endian = "big") {
+    ///     assert_eq!(buf, b"\x01\x02\x03");
+    /// } else {
+    ///     assert_eq!(buf, b"\x03\x02\x01");
+    /// }
+    /// ```
+    ///
+    /// # Panics
+    ///
+    /// This function panics if there is not enough remaining capacity in
+    /// `self` or if `nbytes` is greater than 8.
+    fn put_int_ne(&mut self, n: i64, nbytes: usize) {
+        if cfg!(target_endian = "big") {
+            self.put_int(n, nbytes)
+        } else {
+            self.put_int_le(n, nbytes)
+        }
+    }
+
     /// Writes  an IEEE754 single-precision (4 bytes) floating point number to
     /// `self` in big-endian byte order.
     ///
@@ -824,6 +1096,33 @@ pub unsafe trait BufMut {
         self.put_u32_le(n.to_bits());
     }
 
+    /// Writes an IEEE754 single-precision (4 bytes) floating point number to
+    /// `self` in native-endian byte order.
+    ///
+    /// The current position is advanced by 4.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use bytes::BufMut;
+    ///
+    /// let mut buf = vec![];
+    /// buf.put_f32_ne(1.2f32);
+    /// if cfg!(target_endian = "big") {
+    ///     assert_eq!(buf, b"\x3F\x99\x99\x9A");
+    /// } else {
+    ///     assert_eq!(buf, b"\x9A\x99\x99\x3F");
+    /// }
+    /// ```
+    ///
+    /// # Panics
+    ///
+    /// This function panics if there is not enough remaining capacity in
+    /// `self`.
+    fn put_f32_ne(&mut self, n: f32) {
+        self.put_u32_ne(n.to_bits());
+    }
+
     /// Writes  an IEEE754 double-precision (8 bytes) floating point number to
     /// `self` in big-endian byte order.
     ///
@@ -870,6 +1169,33 @@ pub unsafe trait BufMut {
         self.put_u64_le(n.to_bits());
     }
 
+    /// Writes  an IEEE754 double-precision (8 bytes) floating point number to
+    /// `self` in native-endian byte order.
+    ///
+    /// The current position is advanced by 8.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use bytes::BufMut;
+    ///
+    /// let mut buf = vec![];
+    /// buf.put_f64_ne(1.2f64);
+    /// if cfg!(target_endian = "big") {
+    ///     assert_eq!(buf, b"\x3F\xF3\x33\x33\x33\x33\x33\x33");
+    /// } else {
+    ///     assert_eq!(buf, b"\x33\x33\x33\x33\x33\x33\xF3\x3F");
+    /// }
+    /// ```
+    ///
+    /// # Panics
+    ///
+    /// This function panics if there is not enough remaining capacity in
+    /// `self`.
+    fn put_f64_ne(&mut self, n: f64) {
+        self.put_u64_ne(n.to_bits());
+    }
+
     /// Creates an adaptor which can write at most `limit` bytes to `self`.
     ///
     /// # Examples
@@ -982,6 +1308,10 @@ macro_rules! deref_forward_bufmut {
             (**self).put_u16_le(n)
         }
 
+        fn put_u16_ne(&mut self, n: u16) {
+            (**self).put_u16_ne(n)
+        }
+
         fn put_i16(&mut self, n: i16) {
             (**self).put_i16(n)
         }
@@ -990,6 +1320,10 @@ macro_rules! deref_forward_bufmut {
             (**self).put_i16_le(n)
         }
 
+        fn put_i16_ne(&mut self, n: i16) {
+            (**self).put_i16_ne(n)
+        }
+
         fn put_u32(&mut self, n: u32) {
             (**self).put_u32(n)
         }
@@ -998,6 +1332,10 @@ macro_rules! deref_forward_bufmut {
             (**self).put_u32_le(n)
         }
 
+        fn put_u32_ne(&mut self, n: u32) {
+            (**self).put_u32_ne(n)
+        }
+
         fn put_i32(&mut self, n: i32) {
             (**self).put_i32(n)
         }
@@ -1006,6 +1344,10 @@ macro_rules! deref_forward_bufmut {
             (**self).put_i32_le(n)
         }
 
+        fn put_i32_ne(&mut self, n: i32) {
+            (**self).put_i32_ne(n)
+        }
+
         fn put_u64(&mut self, n: u64) {
             (**self).put_u64(n)
         }
@@ -1014,6 +1356,10 @@ macro_rules! deref_forward_bufmut {
             (**self).put_u64_le(n)
         }
 
+        fn put_u64_ne(&mut self, n: u64) {
+            (**self).put_u64_ne(n)
+        }
+
         fn put_i64(&mut self, n: i64) {
             (**self).put_i64(n)
         }
@@ -1021,6 +1367,10 @@ macro_rules! deref_forward_bufmut {
         fn put_i64_le(&mut self, n: i64) {
             (**self).put_i64_le(n)
         }
+
+        fn put_i64_ne(&mut self, n: i64) {
+            (**self).put_i64_ne(n)
+        }
     };
 }
 

+ 1 - 2
zeroidc/vendor/bytes/src/buf/chain.rs

@@ -198,8 +198,7 @@ where
     fn remaining_mut(&self) -> usize {
         self.a
             .remaining_mut()
-            .checked_add(self.b.remaining_mut())
-            .unwrap()
+            .saturating_add(self.b.remaining_mut())
     }
 
     fn chunk_mut(&mut self) -> &mut UninitSlice {

+ 31 - 1
zeroidc/vendor/bytes/src/buf/uninit_slice.rs

@@ -22,6 +22,10 @@ use core::ops::{
 pub struct UninitSlice([MaybeUninit<u8>]);
 
 impl UninitSlice {
+    pub(crate) fn from_slice(slice: &mut [MaybeUninit<u8>]) -> &mut UninitSlice {
+        unsafe { &mut *(slice as *mut [MaybeUninit<u8>] as *mut UninitSlice) }
+    }
+
     /// Create a `&mut UninitSlice` from a pointer and a length.
     ///
     /// # Safety
@@ -44,7 +48,7 @@ impl UninitSlice {
     pub unsafe fn from_raw_parts_mut<'a>(ptr: *mut u8, len: usize) -> &'a mut UninitSlice {
         let maybe_init: &mut [MaybeUninit<u8>] =
             core::slice::from_raw_parts_mut(ptr as *mut _, len);
-        &mut *(maybe_init as *mut [MaybeUninit<u8>] as *mut UninitSlice)
+        Self::from_slice(maybe_init)
     }
 
     /// Write a single byte at the specified offset.
@@ -124,6 +128,32 @@ impl UninitSlice {
         self.0.as_mut_ptr() as *mut _
     }
 
+    /// Return a `&mut [MaybeUninit<u8>]` to this slice's buffer.
+    ///
+    /// # Safety
+    ///
+    /// The caller **must not** read from the referenced memory and **must not** write
+    /// **uninitialized** bytes to the slice either. This is because `BufMut` implementation
+    /// that created the `UninitSlice` knows which parts are initialized. Writing uninitalized
+    /// bytes to the slice may cause the `BufMut` to read those bytes and trigger undefined
+    /// behavior.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use bytes::BufMut;
+    ///
+    /// let mut data = [0, 1, 2];
+    /// let mut slice = &mut data[..];
+    /// unsafe {
+    ///     let uninit_slice = BufMut::chunk_mut(&mut slice).as_uninit_slice_mut();
+    /// };
+    /// ```
+    #[inline]
+    pub unsafe fn as_uninit_slice_mut<'a>(&'a mut self) -> &'a mut [MaybeUninit<u8>] {
+        &mut *(self as *mut _ as *mut [MaybeUninit<u8>])
+    }
+
     /// Returns the number of bytes in the slice.
     ///
     /// # Examples

+ 183 - 51
zeroidc/vendor/bytes/src/bytes.rs

@@ -2,12 +2,18 @@ use core::iter::FromIterator;
 use core::ops::{Deref, RangeBounds};
 use core::{cmp, fmt, hash, mem, ptr, slice, usize};
 
-use alloc::{borrow::Borrow, boxed::Box, string::String, vec::Vec};
+use alloc::{
+    alloc::{dealloc, Layout},
+    borrow::Borrow,
+    boxed::Box,
+    string::String,
+    vec::Vec,
+};
 
 use crate::buf::IntoIter;
 #[allow(unused)]
 use crate::loom::sync::atomic::AtomicMut;
-use crate::loom::sync::atomic::{self, AtomicPtr, AtomicUsize, Ordering};
+use crate::loom::sync::atomic::{AtomicPtr, AtomicUsize, Ordering};
 use crate::Buf;
 
 /// A cheaply cloneable and sliceable chunk of contiguous memory.
@@ -55,7 +61,7 @@ use crate::Buf;
 /// # Sharing
 ///
 /// `Bytes` contains a vtable, which allows implementations of `Bytes` to define
-/// how sharing/cloneing is implemented in detail.
+/// how sharing/cloning is implemented in detail.
 /// When `Bytes::clone()` is called, `Bytes` will call the vtable function for
 /// cloning the backing storage in order to share it behind between multiple
 /// `Bytes` instances.
@@ -78,18 +84,18 @@ use crate::Buf;
 ///
 /// ```text
 ///
-///    Arc ptrs                   +---------+
-///    ________________________ / | Bytes 2 |
-///   /                           +---------+
-///  /          +-----------+     |         |
-/// |_________/ |  Bytes 1  |     |         |
-/// |           +-----------+     |         |
+///    Arc ptrs                   ┌─────────┐
+///    ________________________ / │ Bytes 2 │
+///   /                           └─────────┘
+///  /          ┌───────────┐     |         |
+/// |_________/ │  Bytes 1  │     |         |
+/// |           └───────────┘     |         |
 /// |           |           | ___/ data     | tail
 /// |      data |      tail |/              |
 /// v           v           v               v
-/// +-----+---------------------------------+-----+
-/// | Arc |     |           |               |     |
-/// +-----+---------------------------------+-----+
+/// ┌─────┬─────┬───────────┬───────────────┬─────┐
+/// │ Arc │     │           │               │     │
+/// └─────┴─────┴───────────┴───────────────┴─────┘
 /// ```
 pub struct Bytes {
     ptr: *const u8,
@@ -103,6 +109,10 @@ pub(crate) struct Vtable {
     /// fn(data, ptr, len)
     pub clone: unsafe fn(&AtomicPtr<()>, *const u8, usize) -> Bytes,
     /// fn(data, ptr, len)
+    ///
+    /// takes `Bytes` to value
+    pub to_vec: unsafe fn(&AtomicPtr<()>, *const u8, usize) -> Vec<u8>,
+    /// fn(data, ptr, len)
     pub drop: unsafe fn(&mut AtomicPtr<()>, *const u8, usize),
 }
 
@@ -121,7 +131,7 @@ impl Bytes {
     /// ```
     #[inline]
     #[cfg(not(all(loom, test)))]
-    pub const fn new() -> Bytes {
+    pub const fn new() -> Self {
         // Make it a named const to work around
         // "unsizing casts are not allowed in const fn"
         const EMPTY: &[u8] = &[];
@@ -129,7 +139,7 @@ impl Bytes {
     }
 
     #[cfg(all(loom, test))]
-    pub fn new() -> Bytes {
+    pub fn new() -> Self {
         const EMPTY: &[u8] = &[];
         Bytes::from_static(EMPTY)
     }
@@ -149,7 +159,7 @@ impl Bytes {
     /// ```
     #[inline]
     #[cfg(not(all(loom, test)))]
-    pub const fn from_static(bytes: &'static [u8]) -> Bytes {
+    pub const fn from_static(bytes: &'static [u8]) -> Self {
         Bytes {
             ptr: bytes.as_ptr(),
             len: bytes.len(),
@@ -159,7 +169,7 @@ impl Bytes {
     }
 
     #[cfg(all(loom, test))]
-    pub fn from_static(bytes: &'static [u8]) -> Bytes {
+    pub fn from_static(bytes: &'static [u8]) -> Self {
         Bytes {
             ptr: bytes.as_ptr(),
             len: bytes.len(),
@@ -179,7 +189,7 @@ impl Bytes {
     /// assert_eq!(b.len(), 5);
     /// ```
     #[inline]
-    pub fn len(&self) -> usize {
+    pub const fn len(&self) -> usize {
         self.len
     }
 
@@ -194,7 +204,7 @@ impl Bytes {
     /// assert!(b.is_empty());
     /// ```
     #[inline]
-    pub fn is_empty(&self) -> bool {
+    pub const fn is_empty(&self) -> bool {
         self.len == 0
     }
 
@@ -225,7 +235,7 @@ impl Bytes {
     ///
     /// Requires that `begin <= end` and `end <= self.len()`, otherwise slicing
     /// will panic.
-    pub fn slice(&self, range: impl RangeBounds<usize>) -> Bytes {
+    pub fn slice(&self, range: impl RangeBounds<usize>) -> Self {
         use core::ops::Bound;
 
         let len = self.len();
@@ -262,7 +272,7 @@ impl Bytes {
         let mut ret = self.clone();
 
         ret.len = end - begin;
-        ret.ptr = unsafe { ret.ptr.offset(begin as isize) };
+        ret.ptr = unsafe { ret.ptr.add(begin) };
 
         ret
     }
@@ -292,7 +302,7 @@ impl Bytes {
     ///
     /// Requires that the given `sub` slice is in fact contained within the
     /// `Bytes` buffer; otherwise this function will panic.
-    pub fn slice_ref(&self, subset: &[u8]) -> Bytes {
+    pub fn slice_ref(&self, subset: &[u8]) -> Self {
         // Empty slice and empty Bytes may have their pointers reset
         // so explicitly allow empty slice to be a subslice of any slice.
         if subset.is_empty() {
@@ -308,15 +318,15 @@ impl Bytes {
         assert!(
             sub_p >= bytes_p,
             "subset pointer ({:p}) is smaller than self pointer ({:p})",
-            sub_p as *const u8,
-            bytes_p as *const u8,
+            subset.as_ptr(),
+            self.as_ptr(),
         );
         assert!(
             sub_p + sub_len <= bytes_p + bytes_len,
             "subset is out of bounds: self = ({:p}, {}), subset = ({:p}, {})",
-            bytes_p as *const u8,
+            self.as_ptr(),
             bytes_len,
-            sub_p as *const u8,
+            subset.as_ptr(),
             sub_len,
         );
 
@@ -349,7 +359,7 @@ impl Bytes {
     ///
     /// Panics if `at > len`.
     #[must_use = "consider Bytes::truncate if you don't need the other half"]
-    pub fn split_off(&mut self, at: usize) -> Bytes {
+    pub fn split_off(&mut self, at: usize) -> Self {
         assert!(
             at <= self.len(),
             "split_off out of bounds: {:?} <= {:?}",
@@ -398,7 +408,7 @@ impl Bytes {
     ///
     /// Panics if `at > len`.
     #[must_use = "consider Bytes::advance if you don't need the other half"]
-    pub fn split_to(&mut self, at: usize) -> Bytes {
+    pub fn split_to(&mut self, at: usize) -> Self {
         assert!(
             at <= self.len(),
             "split_to out of bounds: {:?} <= {:?}",
@@ -501,7 +511,7 @@ impl Bytes {
         // should already be asserted, but debug assert for tests
         debug_assert!(self.len >= by, "internal: inc_start out of bounds");
         self.len -= by;
-        self.ptr = self.ptr.offset(by as isize);
+        self.ptr = self.ptr.add(by);
     }
 }
 
@@ -604,7 +614,7 @@ impl<'a> IntoIterator for &'a Bytes {
     type IntoIter = core::slice::Iter<'a, u8>;
 
     fn into_iter(self) -> Self::IntoIter {
-        self.as_slice().into_iter()
+        self.as_slice().iter()
     }
 }
 
@@ -686,7 +696,7 @@ impl PartialOrd<Bytes> for str {
 
 impl PartialEq<Vec<u8>> for Bytes {
     fn eq(&self, other: &Vec<u8>) -> bool {
-        *self == &other[..]
+        *self == other[..]
     }
 }
 
@@ -710,7 +720,7 @@ impl PartialOrd<Bytes> for Vec<u8> {
 
 impl PartialEq<String> for Bytes {
     fn eq(&self, other: &String) -> bool {
-        *self == &other[..]
+        *self == other[..]
     }
 }
 
@@ -815,18 +825,18 @@ impl From<Box<[u8]>> for Bytes {
         let ptr = Box::into_raw(slice) as *mut u8;
 
         if ptr as usize & 0x1 == 0 {
-            let data = ptr as usize | KIND_VEC;
+            let data = ptr_map(ptr, |addr| addr | KIND_VEC);
             Bytes {
                 ptr,
                 len,
-                data: AtomicPtr::new(data as *mut _),
+                data: AtomicPtr::new(data.cast()),
                 vtable: &PROMOTABLE_EVEN_VTABLE,
             }
         } else {
             Bytes {
                 ptr,
                 len,
-                data: AtomicPtr::new(ptr as *mut _),
+                data: AtomicPtr::new(ptr.cast()),
                 vtable: &PROMOTABLE_ODD_VTABLE,
             }
         }
@@ -839,6 +849,13 @@ impl From<String> for Bytes {
     }
 }
 
+impl From<Bytes> for Vec<u8> {
+    fn from(bytes: Bytes) -> Vec<u8> {
+        let bytes = mem::ManuallyDrop::new(bytes);
+        unsafe { (bytes.vtable.to_vec)(&bytes.data, bytes.ptr, bytes.len) }
+    }
+}
+
 // ===== impl Vtable =====
 
 impl fmt::Debug for Vtable {
@@ -854,6 +871,7 @@ impl fmt::Debug for Vtable {
 
 const STATIC_VTABLE: Vtable = Vtable {
     clone: static_clone,
+    to_vec: static_to_vec,
     drop: static_drop,
 };
 
@@ -862,6 +880,11 @@ unsafe fn static_clone(_: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Bytes {
     Bytes::from_static(slice)
 }
 
+unsafe fn static_to_vec(_: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Vec<u8> {
+    let slice = slice::from_raw_parts(ptr, len);
+    slice.to_vec()
+}
+
 unsafe fn static_drop(_: &mut AtomicPtr<()>, _: *const u8, _: usize) {
     // nothing to drop for &'static [u8]
 }
@@ -870,11 +893,13 @@ unsafe fn static_drop(_: &mut AtomicPtr<()>, _: *const u8, _: usize) {
 
 static PROMOTABLE_EVEN_VTABLE: Vtable = Vtable {
     clone: promotable_even_clone,
+    to_vec: promotable_even_to_vec,
     drop: promotable_even_drop,
 };
 
 static PROMOTABLE_ODD_VTABLE: Vtable = Vtable {
     clone: promotable_odd_clone,
+    to_vec: promotable_odd_to_vec,
     drop: promotable_odd_drop,
 };
 
@@ -883,25 +908,57 @@ unsafe fn promotable_even_clone(data: &AtomicPtr<()>, ptr: *const u8, len: usize
     let kind = shared as usize & KIND_MASK;
 
     if kind == KIND_ARC {
-        shallow_clone_arc(shared as _, ptr, len)
+        shallow_clone_arc(shared.cast(), ptr, len)
     } else {
         debug_assert_eq!(kind, KIND_VEC);
-        let buf = (shared as usize & !KIND_MASK) as *mut u8;
+        let buf = ptr_map(shared.cast(), |addr| addr & !KIND_MASK);
         shallow_clone_vec(data, shared, buf, ptr, len)
     }
 }
 
+unsafe fn promotable_to_vec(
+    data: &AtomicPtr<()>,
+    ptr: *const u8,
+    len: usize,
+    f: fn(*mut ()) -> *mut u8,
+) -> Vec<u8> {
+    let shared = data.load(Ordering::Acquire);
+    let kind = shared as usize & KIND_MASK;
+
+    if kind == KIND_ARC {
+        shared_to_vec_impl(shared.cast(), ptr, len)
+    } else {
+        // If Bytes holds a Vec, then the offset must be 0.
+        debug_assert_eq!(kind, KIND_VEC);
+
+        let buf = f(shared);
+
+        let cap = (ptr as usize - buf as usize) + len;
+
+        // Copy back buffer
+        ptr::copy(ptr, buf, len);
+
+        Vec::from_raw_parts(buf, len, cap)
+    }
+}
+
+unsafe fn promotable_even_to_vec(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Vec<u8> {
+    promotable_to_vec(data, ptr, len, |shared| {
+        ptr_map(shared.cast(), |addr| addr & !KIND_MASK)
+    })
+}
+
 unsafe fn promotable_even_drop(data: &mut AtomicPtr<()>, ptr: *const u8, len: usize) {
     data.with_mut(|shared| {
         let shared = *shared;
         let kind = shared as usize & KIND_MASK;
 
         if kind == KIND_ARC {
-            release_shared(shared as *mut Shared);
+            release_shared(shared.cast());
         } else {
             debug_assert_eq!(kind, KIND_VEC);
-            let buf = (shared as usize & !KIND_MASK) as *mut u8;
-            drop(rebuild_boxed_slice(buf, ptr, len));
+            let buf = ptr_map(shared.cast(), |addr| addr & !KIND_MASK);
+            free_boxed_slice(buf, ptr, len);
         }
     });
 }
@@ -914,38 +971,49 @@ unsafe fn promotable_odd_clone(data: &AtomicPtr<()>, ptr: *const u8, len: usize)
         shallow_clone_arc(shared as _, ptr, len)
     } else {
         debug_assert_eq!(kind, KIND_VEC);
-        shallow_clone_vec(data, shared, shared as *mut u8, ptr, len)
+        shallow_clone_vec(data, shared, shared.cast(), ptr, len)
     }
 }
 
+unsafe fn promotable_odd_to_vec(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Vec<u8> {
+    promotable_to_vec(data, ptr, len, |shared| shared.cast())
+}
+
 unsafe fn promotable_odd_drop(data: &mut AtomicPtr<()>, ptr: *const u8, len: usize) {
     data.with_mut(|shared| {
         let shared = *shared;
         let kind = shared as usize & KIND_MASK;
 
         if kind == KIND_ARC {
-            release_shared(shared as *mut Shared);
+            release_shared(shared.cast());
         } else {
             debug_assert_eq!(kind, KIND_VEC);
 
-            drop(rebuild_boxed_slice(shared as *mut u8, ptr, len));
+            free_boxed_slice(shared.cast(), ptr, len);
         }
     });
 }
 
-unsafe fn rebuild_boxed_slice(buf: *mut u8, offset: *const u8, len: usize) -> Box<[u8]> {
+unsafe fn free_boxed_slice(buf: *mut u8, offset: *const u8, len: usize) {
     let cap = (offset as usize - buf as usize) + len;
-    Box::from_raw(slice::from_raw_parts_mut(buf, cap))
+    dealloc(buf, Layout::from_size_align(cap, 1).unwrap())
 }
 
 // ===== impl SharedVtable =====
 
 struct Shared {
-    // holds vec for drop, but otherwise doesnt access it
-    _vec: Vec<u8>,
+    // Holds arguments to dealloc upon Drop, but otherwise doesn't use them
+    buf: *mut u8,
+    cap: usize,
     ref_cnt: AtomicUsize,
 }
 
+impl Drop for Shared {
+    fn drop(&mut self) {
+        unsafe { dealloc(self.buf, Layout::from_size_align(self.cap, 1).unwrap()) }
+    }
+}
+
 // Assert that the alignment of `Shared` is divisible by 2.
 // This is a necessary invariant since we depend on allocating `Shared` a
 // shared object to implicitly carry the `KIND_ARC` flag in its pointer.
@@ -954,6 +1022,7 @@ const _: [(); 0 - mem::align_of::<Shared>() % 2] = []; // Assert that the alignm
 
 static SHARED_VTABLE: Vtable = Vtable {
     clone: shared_clone,
+    to_vec: shared_to_vec,
     drop: shared_drop,
 };
 
@@ -966,9 +1035,42 @@ unsafe fn shared_clone(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Byte
     shallow_clone_arc(shared as _, ptr, len)
 }
 
+unsafe fn shared_to_vec_impl(shared: *mut Shared, ptr: *const u8, len: usize) -> Vec<u8> {
+    // Check that the ref_cnt is 1 (unique).
+    //
+    // If it is unique, then it is set to 0 with AcqRel fence for the same
+    // reason in release_shared.
+    //
+    // Otherwise, we take the other branch and call release_shared.
+    if (*shared)
+        .ref_cnt
+        .compare_exchange(1, 0, Ordering::AcqRel, Ordering::Relaxed)
+        .is_ok()
+    {
+        let buf = (*shared).buf;
+        let cap = (*shared).cap;
+
+        // Deallocate Shared
+        drop(Box::from_raw(shared as *mut mem::ManuallyDrop<Shared>));
+
+        // Copy back buffer
+        ptr::copy(ptr, buf, len);
+
+        Vec::from_raw_parts(buf, len, cap)
+    } else {
+        let v = slice::from_raw_parts(ptr, len).to_vec();
+        release_shared(shared);
+        v
+    }
+}
+
+unsafe fn shared_to_vec(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Vec<u8> {
+    shared_to_vec_impl(data.load(Ordering::Relaxed).cast(), ptr, len)
+}
+
 unsafe fn shared_drop(data: &mut AtomicPtr<()>, _ptr: *const u8, _len: usize) {
     data.with_mut(|shared| {
-        release_shared(*shared as *mut Shared);
+        release_shared(shared.cast());
     });
 }
 
@@ -1006,9 +1108,9 @@ unsafe fn shallow_clone_vec(
     // updated and since the buffer hasn't been promoted to an
     // `Arc`, those three fields still are the components of the
     // vector.
-    let vec = rebuild_boxed_slice(buf, offset, len).into_vec();
     let shared = Box::new(Shared {
-        _vec: vec,
+        buf,
+        cap: (offset as usize - buf as usize) + len,
         // Initialize refcount to 2. One for this reference, and one
         // for the new clone that will be returned from
         // `shallow_clone`.
@@ -1082,10 +1184,40 @@ unsafe fn release_shared(ptr: *mut Shared) {
     // > "acquire" operation before deleting the object.
     //
     // [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html)
-    atomic::fence(Ordering::Acquire);
+    //
+    // Thread sanitizer does not support atomic fences. Use an atomic load
+    // instead.
+    (*ptr).ref_cnt.load(Ordering::Acquire);
 
     // Drop the data
-    Box::from_raw(ptr);
+    drop(Box::from_raw(ptr));
+}
+
+// Ideally we would always use this version of `ptr_map` since it is strict
+// provenance compatible, but it results in worse codegen. We will however still
+// use it on miri because it gives better diagnostics for people who test bytes
+// code with miri.
+//
+// See https://github.com/tokio-rs/bytes/pull/545 for more info.
+#[cfg(miri)]
+fn ptr_map<F>(ptr: *mut u8, f: F) -> *mut u8
+where
+    F: FnOnce(usize) -> usize,
+{
+    let old_addr = ptr as usize;
+    let new_addr = f(old_addr);
+    let diff = new_addr.wrapping_sub(old_addr);
+    ptr.wrapping_add(diff)
+}
+
+#[cfg(not(miri))]
+fn ptr_map<F>(ptr: *mut u8, f: F) -> *mut u8
+where
+    F: FnOnce(usize) -> usize,
+{
+    let old_addr = ptr as usize;
+    let new_addr = f(old_addr);
+    new_addr as *mut u8
 }
 
 // compile-fails

+ 272 - 58
zeroidc/vendor/bytes/src/bytes_mut.rs

@@ -1,5 +1,5 @@
 use core::iter::{FromIterator, Iterator};
-use core::mem::{self, ManuallyDrop};
+use core::mem::{self, ManuallyDrop, MaybeUninit};
 use core::ops::{Deref, DerefMut};
 use core::ptr::{self, NonNull};
 use core::{cmp, fmt, hash, isize, slice, usize};
@@ -8,6 +8,7 @@ use alloc::{
     borrow::{Borrow, BorrowMut},
     boxed::Box,
     string::String,
+    vec,
     vec::Vec,
 };
 
@@ -15,7 +16,7 @@ use crate::buf::{IntoIter, UninitSlice};
 use crate::bytes::Vtable;
 #[allow(unused)]
 use crate::loom::sync::atomic::AtomicMut;
-use crate::loom::sync::atomic::{self, AtomicPtr, AtomicUsize, Ordering};
+use crate::loom::sync::atomic::{AtomicPtr, AtomicUsize, Ordering};
 use crate::{Buf, BufMut, Bytes};
 
 /// A unique reference to a contiguous slice of memory.
@@ -252,12 +253,28 @@ impl BytesMut {
 
             let ptr = self.ptr.as_ptr();
             let len = self.len;
-            let data = AtomicPtr::new(self.data as _);
+            let data = AtomicPtr::new(self.data.cast());
             mem::forget(self);
             unsafe { Bytes::with_vtable(ptr, len, data, &SHARED_VTABLE) }
         }
     }
 
+    /// Creates a new `BytesMut`, which is initialized with zero.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use bytes::BytesMut;
+    ///
+    /// let zeros = BytesMut::zeroed(42);
+    ///
+    /// assert_eq!(zeros.len(), 42);
+    /// zeros.into_iter().for_each(|x| assert_eq!(x, 0));
+    /// ```
+    pub fn zeroed(len: usize) -> BytesMut {
+        BytesMut::from_vec(vec![0; len])
+    }
+
     /// Splits the bytes into two at the given index.
     ///
     /// Afterwards `self` contains elements `[0, at)`, and the returned
@@ -494,11 +511,20 @@ impl BytesMut {
     /// reallocations. A call to `reserve` may result in an allocation.
     ///
     /// Before allocating new buffer space, the function will attempt to reclaim
-    /// space in the existing buffer. If the current handle references a small
-    /// view in the original buffer and all other handles have been dropped,
-    /// and the requested capacity is less than or equal to the existing
-    /// buffer's capacity, then the current view will be copied to the front of
-    /// the buffer and the handle will take ownership of the full buffer.
+    /// space in the existing buffer. If the current handle references a view
+    /// into a larger original buffer, and all other handles referencing part
+    /// of the same original buffer have been dropped, then the current view
+    /// can be copied/shifted to the front of the buffer and the handle can take
+    /// ownership of the full buffer, provided that the full buffer is large
+    /// enough to fit the requested additional capacity.
+    ///
+    /// This optimization will only happen if shifting the data from the current
+    /// view to the front of the buffer is not too expensive in terms of the
+    /// (amortized) time required. The precise condition is subject to change;
+    /// as of now, the length of the data being shifted needs to be at least as
+    /// large as the distance that it's shifted by. If the current view is empty
+    /// and the original buffer is large enough to fit the requested additional
+    /// capacity, then reallocations will never happen.
     ///
     /// # Examples
     ///
@@ -562,17 +588,34 @@ impl BytesMut {
             // space.
             //
             // Otherwise, since backed by a vector, use `Vec::reserve`
+            //
+            // We need to make sure that this optimization does not kill the
+            // amortized runtimes of BytesMut's operations.
             unsafe {
                 let (off, prev) = self.get_vec_pos();
 
                 // Only reuse space if we can satisfy the requested additional space.
-                if self.capacity() - self.len() + off >= additional {
-                    // There's space - reuse it
+                //
+                // Also check if the value of `off` suggests that enough bytes
+                // have been read to account for the overhead of shifting all
+                // the data (in an amortized analysis).
+                // Hence the condition `off >= self.len()`.
+                //
+                // This condition also already implies that the buffer is going
+                // to be (at least) half-empty in the end; so we do not break
+                // the (amortized) runtime with future resizes of the underlying
+                // `Vec`.
+                //
+                // [For more details check issue #524, and PR #525.]
+                if self.capacity() - self.len() + off >= additional && off >= self.len() {
+                    // There's enough space, and it's not too much overhead:
+                    // reuse the space!
                     //
                     // Just move the pointer back to the start after copying
                     // data back.
                     let base_ptr = self.ptr.as_ptr().offset(-(off as isize));
-                    ptr::copy(self.ptr.as_ptr(), base_ptr, self.len);
+                    // Since `off >= self.len()`, the two regions don't overlap.
+                    ptr::copy_nonoverlapping(self.ptr.as_ptr(), base_ptr, self.len);
                     self.ptr = vptr(base_ptr);
                     self.set_vec_pos(0, prev);
 
@@ -580,13 +623,14 @@ impl BytesMut {
                     // can gain capacity back.
                     self.cap += off;
                 } else {
-                    // No space - allocate more
+                    // Not enough space, or reusing might be too much overhead:
+                    // allocate more space!
                     let mut v =
                         ManuallyDrop::new(rebuild_vec(self.ptr.as_ptr(), self.len, self.cap, off));
                     v.reserve(additional);
 
                     // Update the info
-                    self.ptr = vptr(v.as_mut_ptr().offset(off as isize));
+                    self.ptr = vptr(v.as_mut_ptr().add(off));
                     self.len = v.len() - off;
                     self.cap = v.capacity() - off;
                 }
@@ -596,7 +640,7 @@ impl BytesMut {
         }
 
         debug_assert_eq!(kind, KIND_ARC);
-        let shared: *mut Shared = self.data as _;
+        let shared: *mut Shared = self.data;
 
         // Reserving involves abandoning the currently shared buffer and
         // allocating a new vector with the requested capacity.
@@ -619,29 +663,65 @@ impl BytesMut {
                 // sure that the vector has enough capacity.
                 let v = &mut (*shared).vec;
 
-                if v.capacity() >= new_cap {
-                    // The capacity is sufficient, reclaim the buffer
-                    let ptr = v.as_mut_ptr();
+                let v_capacity = v.capacity();
+                let ptr = v.as_mut_ptr();
 
-                    ptr::copy(self.ptr.as_ptr(), ptr, len);
+                let offset = offset_from(self.ptr.as_ptr(), ptr);
+
+                // Compare the condition in the `kind == KIND_VEC` case above
+                // for more details.
+                if v_capacity >= new_cap + offset {
+                    self.cap = new_cap;
+                    // no copy is necessary
+                } else if v_capacity >= new_cap && offset >= len {
+                    // The capacity is sufficient, and copying is not too much
+                    // overhead: reclaim the buffer!
+
+                    // `offset >= len` means: no overlap
+                    ptr::copy_nonoverlapping(self.ptr.as_ptr(), ptr, len);
 
                     self.ptr = vptr(ptr);
                     self.cap = v.capacity();
+                } else {
+                    // calculate offset
+                    let off = (self.ptr.as_ptr() as usize) - (v.as_ptr() as usize);
 
-                    return;
-                }
+                    // new_cap is calculated in terms of `BytesMut`, not the underlying
+                    // `Vec`, so it does not take the offset into account.
+                    //
+                    // Thus we have to manually add it here.
+                    new_cap = new_cap.checked_add(off).expect("overflow");
 
-                // The vector capacity is not sufficient. The reserve request is
-                // asking for more than the initial buffer capacity. Allocate more
-                // than requested if `new_cap` is not much bigger than the current
-                // capacity.
-                //
-                // There are some situations, using `reserve_exact` that the
-                // buffer capacity could be below `original_capacity`, so do a
-                // check.
-                let double = v.capacity().checked_shl(1).unwrap_or(new_cap);
+                    // The vector capacity is not sufficient. The reserve request is
+                    // asking for more than the initial buffer capacity. Allocate more
+                    // than requested if `new_cap` is not much bigger than the current
+                    // capacity.
+                    //
+                    // There are some situations, using `reserve_exact` that the
+                    // buffer capacity could be below `original_capacity`, so do a
+                    // check.
+                    let double = v.capacity().checked_shl(1).unwrap_or(new_cap);
+
+                    new_cap = cmp::max(double, new_cap);
 
-                new_cap = cmp::max(cmp::max(double, new_cap), original_capacity);
+                    // No space - allocate more
+                    //
+                    // The length field of `Shared::vec` is not used by the `BytesMut`;
+                    // instead we use the `len` field in the `BytesMut` itself. However,
+                    // when calling `reserve`, it doesn't guarantee that data stored in
+                    // the unused capacity of the vector is copied over to the new
+                    // allocation, so we need to ensure that we don't have any data we
+                    // care about in the unused capacity before calling `reserve`.
+                    debug_assert!(off + len <= v.capacity());
+                    v.set_len(off + len);
+                    v.reserve(new_cap - v.len());
+
+                    // Update the info
+                    self.ptr = vptr(v.as_mut_ptr().add(off));
+                    self.cap = v.capacity() - off;
+                }
+
+                return;
             } else {
                 new_cap = cmp::max(new_cap, original_capacity);
             }
@@ -659,7 +739,7 @@ impl BytesMut {
 
         // Update self
         let data = (original_capacity_repr << ORIGINAL_CAPACITY_OFFSET) | KIND_VEC;
-        self.data = data as _;
+        self.data = invalid_ptr(data);
         self.ptr = vptr(v.as_mut_ptr());
         self.len = v.len();
         self.cap = v.capacity();
@@ -686,11 +766,11 @@ impl BytesMut {
         self.reserve(cnt);
 
         unsafe {
-            let dst = self.uninit_slice();
+            let dst = self.spare_capacity_mut();
             // Reserved above
             debug_assert!(dst.len() >= cnt);
 
-            ptr::copy_nonoverlapping(extend.as_ptr(), dst.as_mut_ptr() as *mut u8, cnt);
+            ptr::copy_nonoverlapping(extend.as_ptr(), dst.as_mut_ptr().cast(), cnt);
         }
 
         unsafe {
@@ -700,10 +780,11 @@ impl BytesMut {
 
     /// Absorbs a `BytesMut` that was previously split off.
     ///
-    /// If the two `BytesMut` objects were previously contiguous, i.e., if
-    /// `other` was created by calling `split_off` on this `BytesMut`, then
-    /// this is an `O(1)` operation that just decreases a reference
-    /// count and sets a few indices. Otherwise this method degenerates to
+    /// If the two `BytesMut` objects were previously contiguous and not mutated
+    /// in a way that causes re-allocation i.e., if `other` was created by
+    /// calling `split_off` on this `BytesMut`, then this is an `O(1)` operation
+    /// that just decreases a reference count and sets a few indices.
+    /// Otherwise this method degenerates to
     /// `self.extend_from_slice(other.as_ref())`.
     ///
     /// # Examples
@@ -754,7 +835,7 @@ impl BytesMut {
             ptr,
             len,
             cap,
-            data: data as *mut _,
+            data: invalid_ptr(data),
         }
     }
 
@@ -801,7 +882,7 @@ impl BytesMut {
         // Updating the start of the view is setting `ptr` to point to the
         // new start and updating the `len` field to reflect the new length
         // of the view.
-        self.ptr = vptr(self.ptr.as_ptr().offset(start as isize));
+        self.ptr = vptr(self.ptr.as_ptr().add(start));
 
         if self.len >= start {
             self.len -= start;
@@ -825,7 +906,7 @@ impl BytesMut {
             return Ok(());
         }
 
-        let ptr = unsafe { self.ptr.as_ptr().offset(self.len as isize) };
+        let ptr = unsafe { self.ptr.as_ptr().add(self.len) };
         if ptr == other.ptr.as_ptr()
             && self.kind() == KIND_ARC
             && other.kind() == KIND_ARC
@@ -875,7 +956,7 @@ impl BytesMut {
         // always succeed.
         debug_assert_eq!(shared as usize & KIND_MASK, KIND_ARC);
 
-        self.data = shared as _;
+        self.data = shared;
     }
 
     /// Makes an exact shallow clone of `self`.
@@ -908,16 +989,45 @@ impl BytesMut {
         debug_assert_eq!(self.kind(), KIND_VEC);
         debug_assert!(pos <= MAX_VEC_POS);
 
-        self.data = ((pos << VEC_POS_OFFSET) | (prev & NOT_VEC_POS_MASK)) as *mut _;
+        self.data = invalid_ptr((pos << VEC_POS_OFFSET) | (prev & NOT_VEC_POS_MASK));
     }
 
+    /// Returns the remaining spare capacity of the buffer as a slice of `MaybeUninit<u8>`.
+    ///
+    /// The returned slice can be used to fill the buffer with data (e.g. by
+    /// reading from a file) before marking the data as initialized using the
+    /// [`set_len`] method.
+    ///
+    /// [`set_len`]: BytesMut::set_len
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use bytes::BytesMut;
+    ///
+    /// // Allocate buffer big enough for 10 bytes.
+    /// let mut buf = BytesMut::with_capacity(10);
+    ///
+    /// // Fill in the first 3 elements.
+    /// let uninit = buf.spare_capacity_mut();
+    /// uninit[0].write(0);
+    /// uninit[1].write(1);
+    /// uninit[2].write(2);
+    ///
+    /// // Mark the first 3 bytes of the buffer as being initialized.
+    /// unsafe {
+    ///     buf.set_len(3);
+    /// }
+    ///
+    /// assert_eq!(&buf[..], &[0, 1, 2]);
+    /// ```
     #[inline]
-    fn uninit_slice(&mut self) -> &mut UninitSlice {
+    pub fn spare_capacity_mut(&mut self) -> &mut [MaybeUninit<u8>] {
         unsafe {
-            let ptr = self.ptr.as_ptr().offset(self.len as isize);
+            let ptr = self.ptr.as_ptr().add(self.len);
             let len = self.cap - self.len;
 
-            UninitSlice::from_raw_parts_mut(ptr, len)
+            slice::from_raw_parts_mut(ptr.cast(), len)
         }
     }
 }
@@ -934,7 +1044,7 @@ impl Drop for BytesMut {
                 let _ = rebuild_vec(self.ptr.as_ptr(), self.len, self.cap, off);
             }
         } else if kind == KIND_ARC {
-            unsafe { release_shared(self.data as _) };
+            unsafe { release_shared(self.data) };
         }
     }
 }
@@ -991,7 +1101,7 @@ unsafe impl BufMut for BytesMut {
         if self.capacity() == self.len() {
             self.reserve(64);
         }
-        self.uninit_slice()
+        UninitSlice::from_slice(self.spare_capacity_mut())
     }
 
     // Specialize these methods so they can skip checking `remaining_mut`
@@ -1016,7 +1126,7 @@ unsafe impl BufMut for BytesMut {
     fn put_bytes(&mut self, val: u8, cnt: usize) {
         self.reserve(cnt);
         unsafe {
-            let dst = self.uninit_slice();
+            let dst = self.spare_capacity_mut();
             // Reserved above
             debug_assert!(dst.len() >= cnt);
 
@@ -1161,7 +1271,7 @@ impl<'a> IntoIterator for &'a BytesMut {
     type IntoIter = core::slice::Iter<'a, u8>;
 
     fn into_iter(self) -> Self::IntoIter {
-        self.as_ref().into_iter()
+        self.as_ref().iter()
     }
 }
 
@@ -1190,7 +1300,18 @@ impl<'a> Extend<&'a u8> for BytesMut {
     where
         T: IntoIterator<Item = &'a u8>,
     {
-        self.extend(iter.into_iter().map(|b| *b))
+        self.extend(iter.into_iter().copied())
+    }
+}
+
+impl Extend<Bytes> for BytesMut {
+    fn extend<T>(&mut self, iter: T)
+    where
+        T: IntoIterator<Item = Bytes>,
+    {
+        for bytes in iter {
+            self.extend_from_slice(&bytes)
+        }
     }
 }
 
@@ -1202,7 +1323,7 @@ impl FromIterator<u8> for BytesMut {
 
 impl<'a> FromIterator<&'a u8> for BytesMut {
     fn from_iter<T: IntoIterator<Item = &'a u8>>(into_iter: T) -> Self {
-        BytesMut::from_iter(into_iter.into_iter().map(|b| *b))
+        BytesMut::from_iter(into_iter.into_iter().copied())
     }
 }
 
@@ -1243,10 +1364,13 @@ unsafe fn release_shared(ptr: *mut Shared) {
     // > "acquire" operation before deleting the object.
     //
     // [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html)
-    atomic::fence(Ordering::Acquire);
+    //
+    // Thread sanitizer does not support atomic fences. Use an atomic load
+    // instead.
+    (*ptr).ref_count.load(Ordering::Acquire);
 
     // Drop the data
-    Box::from_raw(ptr);
+    drop(Box::from_raw(ptr));
 }
 
 impl Shared {
@@ -1392,7 +1516,7 @@ impl PartialOrd<BytesMut> for str {
 
 impl PartialEq<Vec<u8>> for BytesMut {
     fn eq(&self, other: &Vec<u8>) -> bool {
-        *self == &other[..]
+        *self == other[..]
     }
 }
 
@@ -1416,7 +1540,7 @@ impl PartialOrd<BytesMut> for Vec<u8> {
 
 impl PartialEq<String> for BytesMut {
     fn eq(&self, other: &String) -> bool {
-        *self == &other[..]
+        *self == other[..]
     }
 }
 
@@ -1482,13 +1606,51 @@ impl PartialOrd<BytesMut> for &str {
 
 impl PartialEq<BytesMut> for Bytes {
     fn eq(&self, other: &BytesMut) -> bool {
-        &other[..] == &self[..]
+        other[..] == self[..]
     }
 }
 
 impl PartialEq<Bytes> for BytesMut {
     fn eq(&self, other: &Bytes) -> bool {
-        &other[..] == &self[..]
+        other[..] == self[..]
+    }
+}
+
+impl From<BytesMut> for Vec<u8> {
+    fn from(mut bytes: BytesMut) -> Self {
+        let kind = bytes.kind();
+
+        let mut vec = if kind == KIND_VEC {
+            unsafe {
+                let (off, _) = bytes.get_vec_pos();
+                rebuild_vec(bytes.ptr.as_ptr(), bytes.len, bytes.cap, off)
+            }
+        } else if kind == KIND_ARC {
+            let shared = bytes.data as *mut Shared;
+
+            if unsafe { (*shared).is_unique() } {
+                let vec = mem::replace(unsafe { &mut (*shared).vec }, Vec::new());
+
+                unsafe { release_shared(shared) };
+
+                vec
+            } else {
+                return bytes.deref().to_vec();
+            }
+        } else {
+            return bytes.deref().to_vec();
+        };
+
+        let len = bytes.len;
+
+        unsafe {
+            ptr::copy(bytes.ptr.as_ptr(), vec.as_mut_ptr(), len);
+            vec.set_len(len);
+        }
+
+        mem::forget(bytes);
+
+        vec
     }
 }
 
@@ -1501,6 +1663,35 @@ fn vptr(ptr: *mut u8) -> NonNull<u8> {
     }
 }
 
+/// Returns a dangling pointer with the given address. This is used to store
+/// integer data in pointer fields.
+///
+/// It is equivalent to `addr as *mut T`, but this fails on miri when strict
+/// provenance checking is enabled.
+#[inline]
+fn invalid_ptr<T>(addr: usize) -> *mut T {
+    let ptr = core::ptr::null_mut::<u8>().wrapping_add(addr);
+    debug_assert_eq!(ptr as usize, addr);
+    ptr.cast::<T>()
+}
+
+/// Precondition: dst >= original
+///
+/// The following line is equivalent to:
+///
+/// ```rust,ignore
+/// self.ptr.as_ptr().offset_from(ptr) as usize;
+/// ```
+///
+/// But due to min rust is 1.39 and it is only stablised
+/// in 1.47, we cannot use it.
+#[inline]
+fn offset_from(dst: *mut u8, original: *mut u8) -> usize {
+    debug_assert!(dst >= original);
+
+    dst as usize - original as usize
+}
+
 unsafe fn rebuild_vec(ptr: *mut u8, mut len: usize, mut cap: usize, off: usize) -> Vec<u8> {
     let ptr = ptr.offset(-(off as isize));
     len += off;
@@ -1513,6 +1704,7 @@ unsafe fn rebuild_vec(ptr: *mut u8, mut len: usize, mut cap: usize, off: usize)
 
 static SHARED_VTABLE: Vtable = Vtable {
     clone: shared_v_clone,
+    to_vec: shared_v_to_vec,
     drop: shared_v_drop,
 };
 
@@ -1520,10 +1712,32 @@ unsafe fn shared_v_clone(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> By
     let shared = data.load(Ordering::Relaxed) as *mut Shared;
     increment_shared(shared);
 
-    let data = AtomicPtr::new(shared as _);
+    let data = AtomicPtr::new(shared as *mut ());
     Bytes::with_vtable(ptr, len, data, &SHARED_VTABLE)
 }
 
+unsafe fn shared_v_to_vec(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Vec<u8> {
+    let shared: *mut Shared = data.load(Ordering::Relaxed).cast();
+
+    if (*shared).is_unique() {
+        let shared = &mut *shared;
+
+        // Drop shared
+        let mut vec = mem::replace(&mut shared.vec, Vec::new());
+        release_shared(shared);
+
+        // Copy back buffer
+        ptr::copy(ptr, vec.as_mut_ptr(), len);
+        vec.set_len(len);
+
+        vec
+    } else {
+        let v = slice::from_raw_parts(ptr, len).to_vec();
+        release_shared(shared);
+        v
+    }
+}
+
 unsafe fn shared_v_drop(data: &mut AtomicPtr<()>, _ptr: *const u8, _len: usize) {
     data.with_mut(|shared| {
         release_shared(*shared as *mut Shared);

+ 3 - 3
zeroidc/vendor/bytes/src/fmt/debug.rs

@@ -25,7 +25,7 @@ impl Debug for BytesRef<'_> {
             } else if b == b'\0' {
                 write!(f, "\\0")?;
             // ASCII printable
-            } else if b >= 0x20 && b < 0x7f {
+            } else if (0x20..0x7f).contains(&b) {
                 write!(f, "{}", b as char)?;
             } else {
                 write!(f, "\\x{:02x}", b)?;
@@ -38,12 +38,12 @@ impl Debug for BytesRef<'_> {
 
 impl Debug for Bytes {
     fn fmt(&self, f: &mut Formatter<'_>) -> Result {
-        Debug::fmt(&BytesRef(&self.as_ref()), f)
+        Debug::fmt(&BytesRef(self.as_ref()), f)
     }
 }
 
 impl Debug for BytesMut {
     fn fmt(&self, f: &mut Formatter<'_>) -> Result {
-        Debug::fmt(&BytesRef(&self.as_ref()), f)
+        Debug::fmt(&BytesRef(self.as_ref()), f)
     }
 }

+ 2 - 2
zeroidc/vendor/bytes/src/loom.rs

@@ -1,7 +1,7 @@
 #[cfg(not(all(test, loom)))]
 pub(crate) mod sync {
     pub(crate) mod atomic {
-        pub(crate) use core::sync::atomic::{fence, AtomicPtr, AtomicUsize, Ordering};
+        pub(crate) use core::sync::atomic::{AtomicPtr, AtomicUsize, Ordering};
 
         pub(crate) trait AtomicMut<T> {
             fn with_mut<F, R>(&mut self, f: F) -> R
@@ -23,7 +23,7 @@ pub(crate) mod sync {
 #[cfg(all(test, loom))]
 pub(crate) mod sync {
     pub(crate) mod atomic {
-        pub(crate) use loom::sync::atomic::{fence, AtomicPtr, AtomicUsize, Ordering};
+        pub(crate) use loom::sync::atomic::{AtomicPtr, AtomicUsize, Ordering};
 
         pub(crate) trait AtomicMut<T> {}
     }

+ 168 - 7
zeroidc/vendor/bytes/tests/test_bytes.rs

@@ -4,8 +4,8 @@ use bytes::{Buf, BufMut, Bytes, BytesMut};
 
 use std::usize;
 
-const LONG: &'static [u8] = b"mary had a little lamb, little lamb, little lamb";
-const SHORT: &'static [u8] = b"hello world";
+const LONG: &[u8] = b"mary had a little lamb, little lamb, little lamb";
+const SHORT: &[u8] = b"hello world";
 
 fn is_sync<T: Sync>() {}
 fn is_send<T: Send>() {}
@@ -411,8 +411,8 @@ fn freeze_after_split_off() {
 fn fns_defined_for_bytes_mut() {
     let mut bytes = BytesMut::from(&b"hello world"[..]);
 
-    bytes.as_ptr();
-    bytes.as_mut_ptr();
+    let _ = bytes.as_ptr();
+    let _ = bytes.as_mut_ptr();
 
     // Iterator
     let v: Vec<u8> = bytes.as_ref().iter().cloned().collect();
@@ -443,7 +443,7 @@ fn reserve_growth() {
     let _ = bytes.split();
 
     bytes.reserve(65);
-    assert_eq!(bytes.capacity(), 128);
+    assert_eq!(bytes.capacity(), 117);
 }
 
 #[test]
@@ -515,6 +515,34 @@ fn reserve_in_arc_unique_doubles() {
     assert_eq!(2000, bytes.capacity());
 }
 
+#[test]
+fn reserve_in_arc_unique_does_not_overallocate_after_split() {
+    let mut bytes = BytesMut::from(LONG);
+    let orig_capacity = bytes.capacity();
+    drop(bytes.split_off(LONG.len() / 2));
+
+    // now bytes is Arc and refcount == 1
+
+    let new_capacity = bytes.capacity();
+    bytes.reserve(orig_capacity - new_capacity);
+    assert_eq!(bytes.capacity(), orig_capacity);
+}
+
+#[test]
+fn reserve_in_arc_unique_does_not_overallocate_after_multiple_splits() {
+    let mut bytes = BytesMut::from(LONG);
+    let orig_capacity = bytes.capacity();
+    for _ in 0..10 {
+        drop(bytes.split_off(LONG.len() / 2));
+
+        // now bytes is Arc and refcount == 1
+
+        let new_capacity = bytes.capacity();
+        bytes.reserve(orig_capacity - new_capacity);
+    }
+    assert_eq!(bytes.capacity(), orig_capacity);
+}
+
 #[test]
 fn reserve_in_arc_nonunique_does_not_overallocate() {
     let mut bytes = BytesMut::with_capacity(1000);
@@ -527,6 +555,25 @@ fn reserve_in_arc_nonunique_does_not_overallocate() {
     assert_eq!(2001, bytes.capacity());
 }
 
+/// This function tests `BytesMut::reserve_inner`, where `BytesMut` holds
+/// a unique reference to the shared vector and decide to reuse it
+/// by reallocating the `Vec`.
+#[test]
+fn reserve_shared_reuse() {
+    let mut bytes = BytesMut::with_capacity(1000);
+    bytes.put_slice(b"Hello, World!");
+    drop(bytes.split());
+
+    bytes.put_slice(b"!123ex123,sadchELLO,_wORLD!");
+    // Use split_off so that v.capacity() - self.cap != off
+    drop(bytes.split_off(9));
+    assert_eq!(&*bytes, b"!123ex123");
+
+    bytes.reserve(2000);
+    assert_eq!(&*bytes, b"!123ex123");
+    assert_eq!(bytes.capacity(), 2009);
+}
+
 #[test]
 fn extend_mut() {
     let mut bytes = BytesMut::with_capacity(0);
@@ -544,6 +591,13 @@ fn extend_from_slice_mut() {
     }
 }
 
+#[test]
+fn extend_mut_from_bytes() {
+    let mut bytes = BytesMut::with_capacity(0);
+    bytes.extend([Bytes::from(LONG)]);
+    assert_eq!(*bytes, LONG[..]);
+}
+
 #[test]
 fn extend_mut_without_size_hint() {
     let mut bytes = BytesMut::with_capacity(0);
@@ -874,7 +928,7 @@ fn from_iter_no_size_hint() {
 
 fn test_slice_ref(bytes: &Bytes, start: usize, end: usize, expected: &[u8]) {
     let slice = &(bytes.as_ref()[start..end]);
-    let sub = bytes.slice_ref(&slice);
+    let sub = bytes.slice_ref(slice);
     assert_eq!(&sub[..], expected);
 }
 
@@ -894,7 +948,7 @@ fn slice_ref_empty() {
     let bytes = Bytes::from(&b""[..]);
     let slice = &(bytes.as_ref()[0..0]);
 
-    let sub = bytes.slice_ref(&slice);
+    let sub = bytes.slice_ref(slice);
     assert_eq!(&sub[..], b"");
 }
 
@@ -1002,3 +1056,110 @@ fn box_slice_empty() {
     let b = Bytes::from(empty);
     assert!(b.is_empty());
 }
+
+#[test]
+fn bytes_into_vec() {
+    // Test kind == KIND_VEC
+    let content = b"helloworld";
+
+    let mut bytes = BytesMut::new();
+    bytes.put_slice(content);
+
+    let vec: Vec<u8> = bytes.into();
+    assert_eq!(&vec, content);
+
+    // Test kind == KIND_ARC, shared.is_unique() == True
+    let mut bytes = BytesMut::new();
+    bytes.put_slice(b"abcdewe23");
+    bytes.put_slice(content);
+
+    // Overwrite the bytes to make sure only one reference to the underlying
+    // Vec exists.
+    bytes = bytes.split_off(9);
+
+    let vec: Vec<u8> = bytes.into();
+    assert_eq!(&vec, content);
+
+    // Test kind == KIND_ARC, shared.is_unique() == False
+    let prefix = b"abcdewe23";
+
+    let mut bytes = BytesMut::new();
+    bytes.put_slice(prefix);
+    bytes.put_slice(content);
+
+    let vec: Vec<u8> = bytes.split_off(prefix.len()).into();
+    assert_eq!(&vec, content);
+
+    let vec: Vec<u8> = bytes.into();
+    assert_eq!(&vec, prefix);
+}
+
+#[test]
+fn test_bytes_into_vec() {
+    // Test STATIC_VTABLE.to_vec
+    let bs = b"1b23exfcz3r";
+    let vec: Vec<u8> = Bytes::from_static(bs).into();
+    assert_eq!(&*vec, bs);
+
+    // Test bytes_mut.SHARED_VTABLE.to_vec impl
+    eprintln!("1");
+    let mut bytes_mut: BytesMut = bs[..].into();
+
+    // Set kind to KIND_ARC so that after freeze, Bytes will use bytes_mut.SHARED_VTABLE
+    eprintln!("2");
+    drop(bytes_mut.split_off(bs.len()));
+
+    eprintln!("3");
+    let b1 = bytes_mut.freeze();
+    eprintln!("4");
+    let b2 = b1.clone();
+
+    eprintln!("{:#?}", (&*b1).as_ptr());
+
+    // shared.is_unique() = False
+    eprintln!("5");
+    assert_eq!(&*Vec::from(b2), bs);
+
+    // shared.is_unique() = True
+    eprintln!("6");
+    assert_eq!(&*Vec::from(b1), bs);
+
+    // Test bytes_mut.SHARED_VTABLE.to_vec impl where offset != 0
+    let mut bytes_mut1: BytesMut = bs[..].into();
+    let bytes_mut2 = bytes_mut1.split_off(9);
+
+    let b1 = bytes_mut1.freeze();
+    let b2 = bytes_mut2.freeze();
+
+    assert_eq!(Vec::from(b2), bs[9..]);
+    assert_eq!(Vec::from(b1), bs[..9]);
+}
+
+#[test]
+fn test_bytes_into_vec_promotable_even() {
+    let vec = vec![33u8; 1024];
+
+    // Test cases where kind == KIND_VEC
+    let b1 = Bytes::from(vec.clone());
+    assert_eq!(Vec::from(b1), vec);
+
+    // Test cases where kind == KIND_ARC, ref_cnt == 1
+    let b1 = Bytes::from(vec.clone());
+    drop(b1.clone());
+    assert_eq!(Vec::from(b1), vec);
+
+    // Test cases where kind == KIND_ARC, ref_cnt == 2
+    let b1 = Bytes::from(vec.clone());
+    let b2 = b1.clone();
+    assert_eq!(Vec::from(b1), vec);
+
+    // Test cases where vtable = SHARED_VTABLE, kind == KIND_ARC, ref_cnt == 1
+    assert_eq!(Vec::from(b2), vec);
+
+    // Test cases where offset != 0
+    let mut b1 = Bytes::from(vec.clone());
+    let b2 = b1.split_off(20);
+
+    assert_eq!(Vec::from(b2), vec[20..]);
+    assert_eq!(Vec::from(b1), vec[..20]);
+}

+ 30 - 2
zeroidc/vendor/bytes/tests/test_bytes_odd_alloc.rs

@@ -24,8 +24,7 @@ unsafe impl GlobalAlloc for Odd {
             };
             let ptr = System.alloc(new_layout);
             if !ptr.is_null() {
-                let ptr = ptr.offset(1);
-                ptr
+                ptr.offset(1)
             } else {
                 ptr
             }
@@ -67,3 +66,32 @@ fn test_bytes_clone_drop() {
     let b1 = Bytes::from(vec);
     let _b2 = b1.clone();
 }
+
+#[test]
+fn test_bytes_into_vec() {
+    let vec = vec![33u8; 1024];
+
+    // Test cases where kind == KIND_VEC
+    let b1 = Bytes::from(vec.clone());
+    assert_eq!(Vec::from(b1), vec);
+
+    // Test cases where kind == KIND_ARC, ref_cnt == 1
+    let b1 = Bytes::from(vec.clone());
+    drop(b1.clone());
+    assert_eq!(Vec::from(b1), vec);
+
+    // Test cases where kind == KIND_ARC, ref_cnt == 2
+    let b1 = Bytes::from(vec.clone());
+    let b2 = b1.clone();
+    assert_eq!(Vec::from(b1), vec);
+
+    // Test cases where vtable = SHARED_VTABLE, kind == KIND_ARC, ref_cnt == 1
+    assert_eq!(Vec::from(b2), vec);
+
+    // Test cases where offset != 0
+    let mut b1 = Bytes::from(vec.clone());
+    let b2 = b1.split_off(20);
+
+    assert_eq!(Vec::from(b2), vec[20..]);
+    assert_eq!(Vec::from(b1), vec[..20]);
+}

+ 103 - 39
zeroidc/vendor/bytes/tests/test_bytes_vec_alloc.rs

@@ -1,61 +1,87 @@
 use std::alloc::{GlobalAlloc, Layout, System};
-use std::{mem, ptr};
+use std::ptr::null_mut;
+use std::sync::atomic::{AtomicPtr, AtomicUsize, Ordering};
 
 use bytes::{Buf, Bytes};
 
 #[global_allocator]
-static LEDGER: Ledger = Ledger;
+static LEDGER: Ledger = Ledger::new();
 
-struct Ledger;
+const LEDGER_LENGTH: usize = 2048;
 
-const USIZE_SIZE: usize = mem::size_of::<usize>();
+struct Ledger {
+    alloc_table: [(AtomicPtr<u8>, AtomicUsize); LEDGER_LENGTH],
+}
 
-unsafe impl GlobalAlloc for Ledger {
-    unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
-        if layout.align() == 1 && layout.size() > 0 {
-            // Allocate extra space to stash a record of
-            // how much space there was.
-            let orig_size = layout.size();
-            let size = orig_size + USIZE_SIZE;
-            let new_layout = match Layout::from_size_align(size, 1) {
-                Ok(layout) => layout,
-                Err(_err) => return ptr::null_mut(),
-            };
-            let ptr = System.alloc(new_layout);
-            if !ptr.is_null() {
-                (ptr as *mut usize).write(orig_size);
-                let ptr = ptr.offset(USIZE_SIZE as isize);
-                ptr
-            } else {
-                ptr
+impl Ledger {
+    const fn new() -> Self {
+        const ELEM: (AtomicPtr<u8>, AtomicUsize) =
+            (AtomicPtr::new(null_mut()), AtomicUsize::new(0));
+        let alloc_table = [ELEM; LEDGER_LENGTH];
+
+        Self { alloc_table }
+    }
+
+    /// Iterate over our table until we find an open entry, then insert into said entry
+    fn insert(&self, ptr: *mut u8, size: usize) {
+        for (entry_ptr, entry_size) in self.alloc_table.iter() {
+            // SeqCst is good enough here, we don't care about perf, i just want to be correct!
+            if entry_ptr
+                .compare_exchange(null_mut(), ptr, Ordering::SeqCst, Ordering::SeqCst)
+                .is_ok()
+            {
+                entry_size.store(size, Ordering::SeqCst);
+                break;
             }
-        } else {
-            System.alloc(layout)
         }
     }
 
-    unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) {
-        if layout.align() == 1 && layout.size() > 0 {
-            let off_ptr = (ptr as *mut usize).offset(-1);
-            let orig_size = off_ptr.read();
-            if orig_size != layout.size() {
-                panic!(
-                    "bad dealloc: alloc size was {}, dealloc size is {}",
-                    orig_size,
-                    layout.size()
-                );
+    fn remove(&self, ptr: *mut u8) -> usize {
+        for (entry_ptr, entry_size) in self.alloc_table.iter() {
+            // set the value to be something that will never try and be deallocated, so that we
+            // don't have any chance of a race condition
+            //
+            // dont worry, LEDGER_LENGTH is really long to compensate for us not reclaiming space
+            if entry_ptr
+                .compare_exchange(
+                    ptr,
+                    invalid_ptr(usize::MAX),
+                    Ordering::SeqCst,
+                    Ordering::SeqCst,
+                )
+                .is_ok()
+            {
+                return entry_size.load(Ordering::SeqCst);
             }
+        }
+
+        panic!("Couldn't find a matching entry for {:x?}", ptr);
+    }
+}
+
+unsafe impl GlobalAlloc for Ledger {
+    unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
+        let size = layout.size();
+        let ptr = System.alloc(layout);
+        self.insert(ptr, size);
+        ptr
+    }
+
+    unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) {
+        let orig_size = self.remove(ptr);
 
-            let new_layout = match Layout::from_size_align(layout.size() + USIZE_SIZE, 1) {
-                Ok(layout) => layout,
-                Err(_err) => std::process::abort(),
-            };
-            System.dealloc(off_ptr as *mut u8, new_layout);
+        if orig_size != layout.size() {
+            panic!(
+                "bad dealloc: alloc size was {}, dealloc size is {}",
+                orig_size,
+                layout.size()
+            );
         } else {
             System.dealloc(ptr, layout);
         }
     }
 }
+
 #[test]
 fn test_bytes_advance() {
     let mut bytes = Bytes::from(vec![10, 20, 30]);
@@ -77,3 +103,41 @@ fn test_bytes_truncate_and_advance() {
     bytes.advance(1);
     drop(bytes);
 }
+
+/// Returns a dangling pointer with the given address. This is used to store
+/// integer data in pointer fields.
+#[inline]
+fn invalid_ptr<T>(addr: usize) -> *mut T {
+    let ptr = std::ptr::null_mut::<u8>().wrapping_add(addr);
+    debug_assert_eq!(ptr as usize, addr);
+    ptr.cast::<T>()
+}
+
+#[test]
+fn test_bytes_into_vec() {
+    let vec = vec![33u8; 1024];
+
+    // Test cases where kind == KIND_VEC
+    let b1 = Bytes::from(vec.clone());
+    assert_eq!(Vec::from(b1), vec);
+
+    // Test cases where kind == KIND_ARC, ref_cnt == 1
+    let b1 = Bytes::from(vec.clone());
+    drop(b1.clone());
+    assert_eq!(Vec::from(b1), vec);
+
+    // Test cases where kind == KIND_ARC, ref_cnt == 2
+    let b1 = Bytes::from(vec.clone());
+    let b2 = b1.clone();
+    assert_eq!(Vec::from(b1), vec);
+
+    // Test cases where vtable = SHARED_VTABLE, kind == KIND_ARC, ref_cnt == 1
+    assert_eq!(Vec::from(b2), vec);
+
+    // Test cases where offset != 0
+    let mut b1 = Bytes::from(vec.clone());
+    let b2 = b1.split_off(20);
+
+    assert_eq!(Vec::from(b2), vec[20..]);
+    assert_eq!(Vec::from(b1), vec[..20]);
+}

+ 22 - 0
zeroidc/vendor/bytes/tests/test_chain.rs

@@ -133,6 +133,28 @@ fn vectored_read() {
     }
 }
 
+#[test]
+fn chain_growing_buffer() {
+    let mut buff = [' ' as u8; 10];
+    let mut vec = b"wassup".to_vec();
+
+    let mut chained = (&mut buff[..]).chain_mut(&mut vec).chain_mut(Vec::new()); // Required for potential overflow because remaining_mut for Vec is isize::MAX - vec.len(), but for chain_mut is usize::MAX
+
+    chained.put_slice(b"hey there123123");
+
+    assert_eq!(&buff, b"hey there1");
+    assert_eq!(&vec, b"wassup23123");
+}
+
+#[test]
+fn chain_overflow_remaining_mut() {
+    let mut chained = Vec::<u8>::new().chain_mut(Vec::new()).chain_mut(Vec::new());
+
+    assert_eq!(chained.remaining_mut(), usize::MAX);
+    chained.put_slice(&[0; 256]);
+    assert_eq!(chained.remaining_mut(), usize::MAX);
+}
+
 #[test]
 fn chain_get_bytes() {
     let mut ab = Bytes::copy_from_slice(b"ab");

文件差异内容过多而无法显示
+ 0 - 0
zeroidc/vendor/darling/.cargo-checksum.json


+ 194 - 0
zeroidc/vendor/darling/CHANGELOG.md

@@ -0,0 +1,194 @@
+# Changelog
+
+## v0.13.4 (April 6, 2022)
+
+- Impl `FromMeta` for `syn::Visibility` [#173](https://github.com/TedDriggs/darling/pull/173)
+
+## v0.13.3 (April 5, 2022)
+
+- Add `error::Accumulator` for dealing with multiple errors [#164](https://github.com/TedDriggs/darling/pull/164)
+- Impl `FromMeta` for `syn::Type` and its variants [#172](https://github.com/TedDriggs/darling/pulls/172)
+
+## v0.13.2 (March 30, 2022)
+
+- Impl `FromMeta` for `syn::ExprPath` [#169](https://github.com/TedDriggs/darling/issues/169)
+
+## v0.13.1 (December 7, 2021)
+
+- Add `FromAttributes` trait and macro [#151](https://github.com/TedDriggs/darling/issues/151)
+
+## v0.13.0 (May 20, 2021)
+
+- Update darling to 2018 edition [#129](https://github.com/TedDriggs/darling/pull/129)
+- Error on duplicate fields in `#[darling(...)]` attributes [#130](https://github.com/TedDriggs/darling/pull/130)
+- Impl `Copy` for `SpannedValue<T: Copy>`
+- Add `SpannedValue::map_ref`
+
+## v0.13.0-beta (April 20, 2021)
+
+- Update darling to 2018 edition [#129](https://github.com/TedDriggs/darling/pull/129)
+- Error on duplicate fields in `#[darling(...)]` attributes [#130](https://github.com/TedDriggs/darling/pull/130)
+
+## v0.12.4 (April 20, 2021)
+
+- Add `and_then` to derive macros for `darling`
+
+## v0.12.3 (April 8, 2021)
+
+- Fix `FromMeta` impl for `char` not to panic [#126](https://github.com/TedDriggs/darling/pull/126)
+
+## v0.12.2 (February 23, 2021)
+
+- Impl `FromMeta` for `HashMap<Ident, V>` and `HashMap<Path, V>`
+
+## v0.12.1 (February 22, 2021)
+
+- Impl `FromMeta` for `syn::ExprArray` [#122](https://github.com/TedDriggs/darling/pull/122)
+- Remove use of `unreachable` from `darling::ast::Data` [#123](https://github.com/TedDriggs/darling/pull/123)
+- Add `darling::ast::Data::try_empty_from` to avoid panics when trying to read a union body [#123](https://github.com/TedDriggs/darling/pull/123)
+
+## v0.12.0 (January 5, 2021)
+
+- POSSIBLY BREAKING: Derived impls of `FromDeriveInput`, `FromField`, `FromVariant`, and `FromTypeParam` will now error when encountering an attribute `darling` has been asked to parse that isn't a supported shape.
+  Any crates using `darling` that relied on those attributes being silently ignored could see new errors reported in their dependent crates. [#113](https://github.com/TedDriggs/darling/pull/113)
+- Impl `syn::spanned::Spanned` for `darling::util::SpannedValue` [#113](https://github.com/TedDriggs/darling/pull/113)
+- Add `darling::util::parse_attribute_to_meta_list` to provide useful errors during attribute parsing [#113](https://github.com/TedDriggs/darling/pull/113)
+- Add `impl From<syn::Error> for Error` to losslessly propagate `syn` errors [#116](https://github.com/TedDriggs/darling/pull/116)
+
+## v0.11.0 (December 14, 2020)
+
+- Bump minor version due to unexpected breaking change [#107](https://github.com/TedDriggs/darling/issues/107)
+
+## v0.10.3 (December 10, 2020)
+
+- Add `discriminant` magic field when deriving `FromVariant` [#105](https://github.com/TedDriggs/darling/pull/105)
+
+## v0.10.2 (October 30, 2019)
+
+- Bump syn dependency to 1.0.1 [#83](https://github.com/TedDriggs/darling/pull/83)
+
+## v0.10.1 (September 25, 2019)
+
+- Fix test compilation errors [#81](https://github.com/TedDriggs/darling/pull/81)
+
+## v0.10.0 (August 15, 2019)
+
+- Bump syn and quote to 1.0 [#79](https://github.com/TedDriggs/darling/pull/79)
+- Increase rust version to 1.31
+
+## v0.9.0 (March 20, 2019)
+
+- Enable "did you mean" suggestions by default
+- Make `darling_core::{codegen, options}` private [#58](https://github.com/TedDriggs/darling/issues/58)
+- Fix `Override::as_mut`: [#66](https://github.com/TedDriggs/darling/issues/66)
+
+## v0.8.6 (March 18, 2019)
+
+- Added "did you mean" suggestions for unknown fields behind the `suggestions` flag [#60](https://github.com/TedDriggs/issues/60)
+- Added `Error::unknown_field_with_alts` to support the suggestion use-case.
+- Added `ast::Fields::len` and `ast::Fields::is_empty` methods.
+
+## v0.8.5 (February 4, 2019)
+
+- Accept unquoted positive numeric literals [#52](https://github.com/TedDriggs/issues/52)
+- Add `FromMeta` to the `syn::Lit` enum and its variants
+- Improve error message for unexpected literal formats to not say "other"
+
+## v0.8.4 (February 4, 2019)
+
+- Use `syn::Error` to provide precise errors before `proc_macro::Diagnostic` is available
+- Add `diagnostics` feature flag to toggle between stable and unstable error backends
+- Attach error information in more contexts
+- Add `allow_unknown_fields` to support parsing the same attribute multiple times for different macros [#51](https://github.com/darling/issues/51)
+- Proc-macro authors will now see better errors in `darling` attributes
+
+## v0.8.3 (January 21, 2019)
+
+- Attach spans to errors in generated trait impls [#37](https://github.com/darling/issues/37)
+- Attach spans to errors for types with provided bespoke implementations
+- Deprecate `set_span` from 0.8.2, as spans should never be broadened after being initially set
+
+## v0.8.2 (January 17, 2019)
+
+- Add spans to errors to make quality warnings and errors easy in darling. This is blocked on diagnostics stabilizing.
+- Add `darling::util::SpannedValue` so proc-macro authors can remember position information alongside parsed values.
+
+## v0.8.0
+
+- Update dependency on `syn` to 0.15 [#44](https://github.com/darling/pull/44). Thanks to @hcpl
+
+## v0.7.0 (July 24, 2018)
+
+- Update dependencies on `syn` and `proc-macro2`
+- Add `util::IdentString`, which acts as an Ident or its string equivalent
+
+## v0.6.3 (May 22, 2018)
+
+- Add support for `Uses*` traits in where predicates
+
+## v0.6.2 (May 22, 2018)
+
+- Add `usage` module for tracking type param and lifetime usage in generic declarations
+  - Add `UsesTypeParams` and `CollectsTypeParams` traits [#37](https://github.com/darling/issues/37)
+  - Add `UsesLifetimes` and `CollectLifetimes` traits [#41](https://github.com/darling/pull/41)
+- Don't add `FromMeta` bounds to type parameters only used by skipped fields [#40](https://github.com/darling/pull/40)
+
+## v0.6.1 (May 17, 2018)
+
+- Fix an issue where the `syn` update broke shape validation [#36](https://github.com/TedDriggs/darling/issues/36)
+
+## v0.6.0 (May 15, 2018)
+
+### Breaking Changes
+
+- Renamed `FromMetaItem` to `FromMeta`, and renamed `from_meta_item` method to `from_meta`
+- Added dedicated `derive(FromMetaItem)` which panics and redirects users to `FromMeta`
+
+## v0.5.0 (May 10, 2018)
+
+- Add `ast::Generics` and `ast::GenericParam` to work with generics in a manner similar to `ast::Data`
+- Add `ast::GenericParamExt` to support alternate representations of generic parameters
+- Add `util::WithOriginal` to get a parsed representation and syn's own struct for a syntax block
+- Add `FromGenerics` and `FromGenericParam` traits (without derive support)
+- Change generated code for `generics` magic field to invoke `FromGenerics` trait during parsing
+- Add `FromTypeParam` trait [#30](https://github.com/TedDriggs/darling/pull/30). Thanks to @upsuper
+
+## v0.4.0 (April 5, 2018)
+
+- Update dependencies on `proc-macro`, `quote`, and `syn` [#26](https://github.com/TedDriggs/darling/pull/26). Thanks to @hcpl
+
+## v0.3.3 (April 2, 2018)
+
+**YANKED**
+
+## v0.3.2 (March 13, 2018)
+
+- Derive `Default` on `darling::Ignored` (fixes [#25](https://github.com/TedDriggs/darling/issues/25)).
+
+## v0.3.1 (March 7, 2018)
+
+- Support proc-macro2/nightly [#24](https://github.com/TedDriggs/darling/pull/24). Thanks to @kdy1
+
+## v0.3.0 (January 26, 2018)
+
+### Breaking Changes
+
+- Update `syn` to 0.12 [#20](https://github.com/TedDriggs/darling/pull/20). Thanks to @Eijebong
+- Update `quote` to 0.4 [#20](https://github.com/TedDriggs/darling/pull/20). Thanks to @Eijebong
+- Rename magic field `body` in derived `FromDeriveInput` structs to `data` to stay in sync with `syn`
+- Rename magic field `data` in derived `FromVariant` structs to `fields` to stay in sync with `syn`
+
+## v0.2.2 (December 5, 2017)
+
+- Update `lazy_static` to 1.0 [#15](https://github.com/TedDriggs/darling/pull/16). Thanks to @Eijebong
+
+## v0.2.1 (November 28, 2017)
+
+- Add `impl FromMetaItem` for integer types [#15](https://github.com/TedDriggs/darling/pull/15)
+
+## v0.2.0 (June 18, 2017)
+
+- Added support for returning multiple errors from parsing [#5](https://github.com/TedDriggs/darling/pull/5)
+- Derived impls no longer return on first error [#5](https://github.com/TedDriggs/darling/pull/5)
+- Removed default types for `V` and `F` from `ast::Body`
+- Enum variants are automatically converted to snake_case [#12](https://github.com/TedDriggs/darling/pull/12)

+ 92 - 0
zeroidc/vendor/darling/Cargo.lock

@@ -0,0 +1,92 @@
+# This file is automatically @generated by Cargo.
+# It is not intended for manual editing.
+version = 3
+
+[[package]]
+name = "darling"
+version = "0.13.4"
+dependencies = [
+ "darling_core",
+ "darling_macro",
+ "proc-macro2",
+ "quote",
+ "syn",
+]
+
+[[package]]
+name = "darling_core"
+version = "0.13.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "859d65a907b6852c9361e3185c862aae7fafd2887876799fa55f5f99dc40d610"
+dependencies = [
+ "fnv",
+ "ident_case",
+ "proc-macro2",
+ "quote",
+ "strsim",
+ "syn",
+]
+
+[[package]]
+name = "darling_macro"
+version = "0.13.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "9c972679f83bdf9c42bd905396b6c3588a843a17f0f16dfcfa3e2c5d57441835"
+dependencies = [
+ "darling_core",
+ "quote",
+ "syn",
+]
+
+[[package]]
+name = "fnv"
+version = "1.0.7"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1"
+
+[[package]]
+name = "ident_case"
+version = "1.0.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b9e0384b61958566e926dc50660321d12159025e767c18e043daf26b70104c39"
+
+[[package]]
+name = "proc-macro2"
+version = "1.0.28"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "5c7ed8b8c7b886ea3ed7dde405212185f423ab44682667c8c6dd14aa1d9f6612"
+dependencies = [
+ "unicode-xid",
+]
+
+[[package]]
+name = "quote"
+version = "1.0.9"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "c3d0b9745dc2debf507c8422de05d7226cc1f0644216dfdfead988f9b1ab32a7"
+dependencies = [
+ "proc-macro2",
+]
+
+[[package]]
+name = "strsim"
+version = "0.10.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "73473c0e59e6d5812c5dfe2a064a6444949f089e20eec9a2e5506596494e4623"
+
+[[package]]
+name = "syn"
+version = "1.0.74"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "1873d832550d4588c3dbc20f01361ab00bfe741048f71e3fecf145a7cc18b29c"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "unicode-xid",
+]
+
+[[package]]
+name = "unicode-xid"
+version = "0.2.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "8ccb82d61f80a663efe1f787a51b16b5a51e3314d6ac365b08639f52387b33f3"

+ 42 - 0
zeroidc/vendor/darling/Cargo.toml

@@ -0,0 +1,42 @@
+# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
+#
+# When uploading crates to the registry Cargo will automatically
+# "normalize" Cargo.toml files for maximal compatibility
+# with all versions of Cargo and also rewrite `path` dependencies
+# to registry (e.g., crates.io) dependencies.
+#
+# If you are reading this file be aware that the original Cargo.toml
+# will likely look very different (and much more reasonable).
+# See Cargo.toml.orig for the original contents.
+
+[package]
+edition = "2018"
+name = "darling"
+version = "0.13.4"
+authors = ["Ted Driggs <[email protected]>"]
+exclude = ["/.travis.yml", "/publish.sh", "/.github/**"]
+description = "A proc-macro library for reading attributes into structs when\nimplementing custom derives.\n"
+documentation = "https://docs.rs/darling/0.13.4"
+readme = "README.md"
+license = "MIT"
+repository = "https://github.com/TedDriggs/darling"
+[dependencies.darling_core]
+version = "=0.13.4"
+
+[dependencies.darling_macro]
+version = "=0.13.4"
+[dev-dependencies.proc-macro2]
+version = "1.0.26"
+
+[dev-dependencies.quote]
+version = "1.0.9"
+
+[dev-dependencies.syn]
+version = "1.0.69"
+
+[features]
+default = ["suggestions"]
+diagnostics = ["darling_core/diagnostics"]
+suggestions = ["darling_core/suggestions"]
+[badges.maintenance]
+status = "actively-developed"

+ 21 - 0
zeroidc/vendor/darling/LICENSE

@@ -0,0 +1,21 @@
+MIT License
+
+Copyright (c) 2017 Ted Driggs
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.

+ 114 - 0
zeroidc/vendor/darling/README.md

@@ -0,0 +1,114 @@
+Darling
+=======
+
+[![Build Status](https://github.com/TedDriggs/darling/workflows/CI/badge.svg)](https://github.com/TedDriggs/darling/actions)
+[![Latest Version](https://img.shields.io/crates/v/darling.svg)](https://crates.io/crates/darling)
+[![Rustc Version 1.31+](https://img.shields.io/badge/rustc-1.31+-lightgray.svg)](https://blog.rust-lang.org/2018/12/06/Rust-1.31-and-rust-2018.html)
+
+`darling` is a crate for proc macro authors, which enables parsing attributes into structs. It is heavily inspired by `serde` both in its internals and in its API.
+
+# Benefits
+* Easy and declarative parsing of macro input - make your proc-macros highly controllable with minimal time investment.
+* Great validation and errors, no work required. When users of your proc-macro make a mistake, `darling` makes sure they get error markers at the right place in their source, and provides "did you mean" suggestions for misspelled fields.
+
+# Usage
+`darling` provides a set of traits which can be derived or manually implemented.
+
+1. `FromMeta` is used to extract values from a meta-item in an attribute. Implementations are likely reusable for many libraries, much like `FromStr` or `serde::Deserialize`. Trait implementations are provided for primitives, some std types, and some `syn` types.
+2. `FromDeriveInput` is implemented or derived by each proc-macro crate which depends on `darling`. This is the root for input parsing; it gets access to the identity, generics, and visibility of the target type, and can specify which attribute names should be parsed or forwarded from the input AST.
+3. `FromField` is implemented or derived by each proc-macro crate which depends on `darling`. Structs deriving this trait will get access to the identity (if it exists), type, and visibility of the field.
+4. `FromVariant` is implemented or derived by each proc-macro crate which depends on `darling`. Structs deriving this trait will get access to the identity and contents of the variant, which can be transformed the same as any other `darling` input.
+5. `FromAttributes` is a lower-level version of the more-specific `FromDeriveInput`, `FromField`, and `FromVariant` traits. Structs deriving this trait get a meta-item extractor and error collection which works for any syntax element, including traits, trait items, and functions. This is useful for non-derive proc macros.
+
+## Additional Modules
+* `darling::ast` provides generic types for representing the AST.
+* `darling::usage` provides traits and functions for determining where type parameters and lifetimes are used in a struct or enum.
+* `darling::util` provides helper types with special `FromMeta` implementations, such as `IdentList`.
+
+# Example
+
+```rust,ignore
+#[macro_use]
+extern crate darling;
+extern crate syn;
+
+#[derive(Default, FromMeta)]
+#[darling(default)]
+pub struct Lorem {
+    #[darling(rename = "sit")]
+    ipsum: bool,
+    dolor: Option<String>,
+}
+
+#[derive(FromDeriveInput)]
+#[darling(attributes(my_crate), forward_attrs(allow, doc, cfg))]
+pub struct MyTraitOpts {
+    ident: syn::Ident,
+    attrs: Vec<syn::Attribute>,
+    lorem: Lorem,
+}
+```
+
+The above code will then be able to parse this input:
+
+```rust,ignore
+/// A doc comment which will be available in `MyTraitOpts::attrs`.
+#[derive(MyTrait)]
+#[my_crate(lorem(dolor = "Hello", sit))]
+pub struct ConsumingType;
+```
+
+# Attribute Macros
+Non-derive attribute macros are supported.
+To parse arguments for attribute macros, derive `FromMeta` on the argument receiver type, then pass `&syn::AttributeArgs` to the `from_list` method.
+This will produce a normal `darling::Result<T>` that can be used the same as a result from parsing a `DeriveInput`.
+
+## Macro Code
+```rust,ignore
+use darling::FromMeta;
+use syn::{AttributeArgs, ItemFn};
+use proc_macro::TokenStream;
+
+#[derive(Debug, FromMeta)]
+pub struct MacroArgs {
+    #[darling(default)]
+    timeout_ms: Option<u16>,
+    path: String,
+}
+
+#[proc_macro_attribute]
+fn your_attr(args: TokenStream, input: TokenStream) -> TokenStream {
+    let attr_args = parse_macro_input!(args as AttributeArgs);
+    let _input = parse_macro_input!(input as ItemFn);
+
+    let _args = match MacroArgs::from_list(&attr_args) {
+        Ok(v) => v,
+        Err(e) => { return TokenStream::from(e.write_errors()); }
+    };
+
+    // do things with `args`
+    unimplemented!()
+}
+```
+
+## Consuming Code
+```rust,ignore
+use your_crate::your_attr;
+
+#[your_attr(path = "hello", timeout_ms = 15)]
+fn do_stuff() {
+    println!("Hello");
+}
+```
+
+# Features
+Darling's features are built to work well for real-world projects.
+
+* **Defaults**: Supports struct- and field-level defaults, using the same path syntax as `serde`.
+* **Field Renaming**: Fields can have different names in usage vs. the backing code.
+* **Auto-populated fields**: Structs deriving `FromDeriveInput` and `FromField` can declare properties named `ident`, `vis`, `ty`, `attrs`, and `generics` to automatically get copies of the matching values from the input AST. `FromDeriveInput` additionally exposes `data` to get access to the body of the deriving type, and `FromVariant` exposes `fields`.
+* **Mapping function**: Use `#[darling(map="path")]` or `#[darling(and_then="path")]` to specify a function that runs on the result of parsing a meta-item field. This can change the return type, which enables you to parse to an intermediate form and convert that to the type you need in your struct.
+* **Skip fields**: Use `#[darling(skip)]` to mark a field that shouldn't be read from attribute meta-items.
+* **Multiple-occurrence fields**: Use `#[darling(multiple)]` on a `Vec` field to allow that field to appear multiple times in the meta-item. Each occurrence will be pushed into the `Vec`.
+* **Span access**: Use `darling::util::SpannedValue` in a struct to get access to that meta item's source code span. This can be used to emit warnings that point at a specific field from your proc macro. In addition, you can use `darling::Error::write_errors` to automatically get precise error location details in most cases.
+* **"Did you mean" suggestions**: Compile errors from derived darling trait impls include suggestions for misspelled fields.

+ 2 - 0
zeroidc/vendor/darling/clippy.toml

@@ -0,0 +1,2 @@
+msrv = "1.31.0"
+blacklisted-names = [] # we want to be able to use placeholder names in tests

+ 73 - 0
zeroidc/vendor/darling/examples/automatic_bounds.rs

@@ -0,0 +1,73 @@
+use darling::{FromDeriveInput, FromMeta};
+
+#[derive(FromMeta, PartialEq, Eq, Debug)]
+enum Volume {
+    Whisper,
+    Talk,
+    Shout,
+}
+
+/// A more complex example showing the ability to skip at a field or struct
+/// level while still tracking which type parameters need to be bounded.
+/// This can be seen by expanding this example using `cargo expand`.
+#[derive(FromMeta)]
+#[allow(dead_code)]
+enum Emphasis<T> {
+    Constant(Volume),
+    Variable(darling::util::PathList),
+    #[darling(skip)]
+    PerPhoneme(Option<T>),
+    Strided {
+        #[darling(skip)]
+        step: Vec<T>,
+        #[darling(multiple)]
+        volume: Vec<Volume>,
+    },
+}
+
+#[derive(FromDeriveInput)]
+#[darling(attributes(speak))]
+struct SpeakingOptions<T, U> {
+    max_volume: U,
+    #[darling(skip, default)]
+    additional_data: Vec<T>,
+}
+
+#[derive(Default)]
+struct Phoneme {
+    #[allow(dead_code)]
+    first: String,
+}
+
+// This is probably the holy grail for `darling`'s own internal use-case:
+// Auto-apply `Default` bound to skipped *field* types in `where` clause.
+impl<T, U> Default for SpeakingOptions<T, U>
+where
+    Vec<T>: Default,
+    U: Default,
+{
+    fn default() -> Self {
+        Self {
+            max_volume: Default::default(),
+            additional_data: Default::default(),
+        }
+    }
+}
+
+fn main() {
+    let derive_input = syn::parse_str(
+        r#"
+        #[derive(Speak)]
+        #[speak(max_volume = "shout")]
+        enum HtmlElement {
+            Div(String)
+        }
+    "#,
+    )
+    .unwrap();
+
+    let parsed: SpeakingOptions<Phoneme, Volume> =
+        FromDeriveInput::from_derive_input(&derive_input).unwrap();
+    assert_eq!(parsed.max_volume, Volume::Shout);
+    assert_eq!(parsed.additional_data.len(), 0);
+}

+ 174 - 0
zeroidc/vendor/darling/examples/consume_fields.rs

@@ -0,0 +1,174 @@
+// The use of fields in debug print commands does not count as "used",
+// which causes the fields to trigger an unwanted dead code warning.
+#![allow(dead_code)]
+
+//! This example shows how to do struct and field parsing using darling.
+
+use darling::{ast, FromDeriveInput, FromField, FromMeta};
+use proc_macro2::TokenStream;
+use quote::{quote, ToTokens};
+use syn::parse_str;
+
+/// A speaking volume. Deriving `FromMeta` will cause this to be usable
+/// as a string value for a meta-item key.
+#[derive(Debug, Clone, Copy, FromMeta)]
+#[darling(default)]
+enum Volume {
+    Normal,
+    Whisper,
+    Shout,
+}
+
+impl Default for Volume {
+    fn default() -> Self {
+        Volume::Normal
+    }
+}
+
+/// Support parsing from a full derive input. Unlike FromMeta, this isn't
+/// composable; each darling-dependent crate should have its own struct to handle
+/// when its trait is derived.
+#[derive(Debug, FromDeriveInput)]
+// This line says that we want to process all attributes declared with `my_trait`,
+// and that darling should panic if this receiver is given an enum.
+#[darling(attributes(my_trait), supports(struct_any))]
+struct MyInputReceiver {
+    /// The struct ident.
+    ident: syn::Ident,
+
+    /// The type's generics. You'll need these any time your trait is expected
+    /// to work with types that declare generics.
+    generics: syn::Generics,
+
+    /// Receives the body of the struct or enum. We don't care about
+    /// struct fields because we previously told darling we only accept structs.
+    data: ast::Data<(), MyFieldReceiver>,
+
+    /// The Input Receiver demands a volume, so use `Volume::Normal` if the
+    /// caller doesn't provide one.
+    #[darling(default)]
+    volume: Volume,
+}
+
+impl ToTokens for MyInputReceiver {
+    fn to_tokens(&self, tokens: &mut TokenStream) {
+        let MyInputReceiver {
+            ref ident,
+            ref generics,
+            ref data,
+            volume,
+        } = *self;
+
+        let (imp, ty, wher) = generics.split_for_impl();
+        let fields = data
+            .as_ref()
+            .take_struct()
+            .expect("Should never be enum")
+            .fields;
+
+        // Generate the format string which shows each field and its name
+        let fmt_string = fields
+            .iter()
+            .enumerate()
+            .map(|(i, f)| {
+                // We have to preformat the ident in this case so we can fall back
+                // to the field index for unnamed fields. It's not easy to read,
+                // unfortunately.
+                format!(
+                    "{} = {{}}",
+                    f.ident
+                        .as_ref()
+                        .map(|v| format!("{}", v))
+                        .unwrap_or_else(|| format!("{}", i))
+                )
+            })
+            .collect::<Vec<_>>()
+            .join(", ");
+
+        // Generate the actual values to fill the format string.
+        let field_list = fields
+            .into_iter()
+            .enumerate()
+            .map(|(i, f)| {
+                let field_volume = f.volume.unwrap_or(volume);
+
+                // This works with named or indexed fields, so we'll fall back to the index so we can
+                // write the output as a key-value pair.
+                let field_ident = f.ident
+                    .as_ref()
+                    .map(|v| quote!(#v))
+                    .unwrap_or_else(|| {
+                        let i = syn::Index::from(i);
+                        quote!(#i)
+                    });
+
+                match field_volume {
+                    Volume::Normal => quote!(self.#field_ident),
+                    Volume::Shout => {
+                        quote!(::std::string::ToString::to_string(&self.#field_ident).to_uppercase())
+                    }
+                    Volume::Whisper => {
+                        quote!(::std::string::ToString::to_string(&self.#field_ident).to_lowercase())
+                    }
+                }
+            })
+            .collect::<Vec<_>>();
+
+        tokens.extend(quote! {
+            impl #imp Speak for #ident #ty #wher {
+                fn speak(&self, writer: &mut ::std::fmt::Formatter) -> ::std::fmt::Result {
+                    write!(writer, #fmt_string, #(#field_list),*)
+                }
+            }
+        });
+    }
+}
+
+#[derive(Debug, FromField)]
+#[darling(attributes(my_trait))]
+struct MyFieldReceiver {
+    /// Get the ident of the field. For fields in tuple or newtype structs or
+    /// enum bodies, this can be `None`.
+    ident: Option<syn::Ident>,
+
+    /// This magic field name pulls the type from the input.
+    ty: syn::Type,
+
+    /// We declare this as an `Option` so that during tokenization we can write
+    /// `field.volume.unwrap_or(derive_input.volume)` to facilitate field-level
+    /// overrides of struct-level settings.
+    #[darling(default)]
+    volume: Option<Volume>,
+}
+
+fn main() {
+    let input = r#"#[derive(MyTrait)]
+#[my_trait(volume = "shout")]
+pub struct Foo {
+    #[my_trait(volume = "whisper")]
+    bar: bool,
+
+    baz: i64,
+}"#;
+
+    let parsed = parse_str(input).unwrap();
+    let receiver = MyInputReceiver::from_derive_input(&parsed).unwrap();
+    let tokens = quote!(#receiver);
+
+    println!(
+        r#"
+INPUT:
+
+{}
+
+PARSED AS:
+
+{:?}
+
+EMITS:
+
+{}
+    "#,
+        input, receiver, tokens
+    );
+}

+ 85 - 0
zeroidc/vendor/darling/examples/fallible_read.rs

@@ -0,0 +1,85 @@
+//! This example demonstrates techniques for performing custom error handling
+//! in a derive-input receiver.
+//!
+//! 1. Using `darling::Result` as a carrier to preserve the error for later display
+//! 1. Using `Result<T, syn::Meta>` to attempt a recovery in imperative code
+//! 1. Using the `map` darling meta-item to post-process a field before returning
+//! 1. Using the `and_then` darling meta-item to post-process the receiver before returning
+
+use darling::{FromDeriveInput, FromMeta};
+use syn::parse_str;
+
+#[derive(Debug, FromDeriveInput)]
+#[darling(attributes(my_trait), and_then = "MyInputReceiver::autocorrect")]
+pub struct MyInputReceiver {
+    /// This field must be present and a string or else parsing will panic.
+    #[darling(map = "MyInputReceiver::make_string_shouty")]
+    name: String,
+
+    /// If this field fails to parse, the struct can still be built; the field
+    /// will contain the error. The consuming struct can then decide if this
+    /// blocks code generation. If so, panic or fail in `and_then`.
+    frequency: darling::Result<i64>,
+
+    /// If this field fails to parse, the struct can still be built; the field
+    /// will contain an `Err` with the original `syn::Meta`. This can be used
+    /// for alternate parsing attempts before panicking.
+    amplitude: Result<u64, syn::Meta>,
+}
+
+impl MyInputReceiver {
+    /// This function will be called by `darling` _after_ it's finished parsing the
+    /// `name` field but before initializing `name` with the resulting value. It's
+    /// a good place for transforms that are easiest to express on already-built
+    /// types.
+    fn make_string_shouty(s: String) -> String {
+        s.to_uppercase()
+    }
+
+    /// This function will be called by `darling` _after_ it's finished parsing the
+    /// input but before returning to the caller. This is a good place to initialize
+    /// skipped fields or to perform corrections that don't lend themselves to being
+    /// done elsewhere.
+    fn autocorrect(self) -> darling::Result<Self> {
+        let Self {
+            name,
+            frequency,
+            amplitude,
+        } = self;
+
+        // Amplitude doesn't have a sign, so if we received a negative number then
+        // we'll go ahead and make it positive.
+        let amplitude = match amplitude {
+            Ok(amp) => amp,
+            Err(mi) => (i64::from_meta(&mi)?).abs() as u64,
+        };
+
+        Ok(Self {
+            name,
+            frequency,
+            amplitude: Ok(amplitude),
+        })
+    }
+}
+
+fn main() {
+    let input = r#"#[derive(MyTrait)]
+#[my_trait(name="Jon", amplitude = "-1", frequency = 1)]
+pub struct Foo;"#;
+
+    let parsed = parse_str(input).unwrap();
+    let receiver = MyInputReceiver::from_derive_input(&parsed).unwrap();
+
+    println!(
+        r#"
+INPUT:
+
+{}
+
+PARSED AS:
+
+{:?}
+    "#,
+        input, receiver
+    );
+}

+ 80 - 0
zeroidc/vendor/darling/examples/shorthand_or_long_field.rs

@@ -0,0 +1,80 @@
+//! Example showing potentially-nested meta item parsing with `darling::util::Override`.
+//!
+//! Based on https://stackoverflow.com/q/68046070/86381 by https://github.com/peterjoel
+
+// The use of fields in debug print commands does not count as "used",
+// which causes the fields to trigger an unwanted dead code warning.
+#![allow(dead_code)]
+
+use std::borrow::Cow;
+
+use darling::{util::Override, FromDeriveInput, FromMeta};
+use syn::{Ident, Path};
+
+#[derive(Debug, FromDeriveInput)]
+#[darling(attributes(myderive))]
+struct MyDeriveInput {
+    ident: Ident,
+    /// We can infer the right "table" behavior for this derive, but we want the caller to be
+    /// explicit that they're expecting the inference behavior to avoid cluttering some hypothetical
+    /// database. Therefore this field is required, but can take word form or key-value form.
+    ///
+    /// To make this field optional, we could add `#[darling(default)]`, and we could additionally
+    /// wrap it in `Option` if the presence or absence of the word makes a difference.
+    table: Override<Table>,
+}
+
+impl MyDeriveInput {
+    fn table(&self) -> Cow<'_, Table> {
+        match &self.table {
+            Override::Explicit(value) => Cow::Borrowed(value),
+            Override::Inherit => Cow::Owned(Table {
+                name: self.ident.to_string(),
+                value: None,
+            }),
+        }
+    }
+}
+
+#[derive(Debug, Clone, FromMeta)]
+struct Table {
+    name: String,
+    #[darling(default)]
+    value: Option<Path>,
+}
+
+fn from_str(s: &str) -> darling::Result<MyDeriveInput> {
+    FromDeriveInput::from_derive_input(&syn::parse_str(s)?)
+}
+
+fn main() {
+    let missing = from_str(
+        r#"
+        #[derive(MyTrait)]
+        struct Foo(u64);
+    "#,
+    )
+    .unwrap_err();
+
+    let short_form = from_str(
+        r#"
+        #[derive(MyTrait)]
+        #[myderive(table)]
+        struct Foo(u64);
+    "#,
+    )
+    .unwrap();
+
+    let long_form = from_str(
+        r#"
+        #[derive(MyTrait)]
+        #[myderive(table(name = "Custom"))]
+        struct Foo(u64);
+    "#,
+    )
+    .unwrap();
+
+    println!("Error when missing: {}", missing);
+    println!("Short form: {:?}", short_form.table());
+    println!("Long form: {:?}", long_form.table());
+}

+ 61 - 0
zeroidc/vendor/darling/examples/supports_struct.rs

@@ -0,0 +1,61 @@
+// The use of fields in debug print commands does not count as "used",
+// which causes the fields to trigger an unwanted dead code warning.
+#![allow(dead_code)]
+
+use darling::{ast, util, FromDeriveInput, FromField};
+use syn::{Ident, Type};
+
+#[derive(Debug, FromField)]
+#[darling(attributes(lorem))]
+pub struct LoremField {
+    ident: Option<Ident>,
+    ty: Type,
+    #[darling(default)]
+    skip: bool,
+}
+
+#[derive(Debug, FromDeriveInput)]
+#[darling(attributes(lorem), supports(struct_named))]
+pub struct Lorem {
+    ident: Ident,
+    data: ast::Data<util::Ignored, LoremField>,
+}
+
+fn main() {
+    let good_input = r#"#[derive(Lorem)]
+pub struct Foo {
+    #[lorem(skip)]
+    bar: bool,
+
+    baz: i64,
+}"#;
+
+    let bad_input = r#"#[derive(Lorem)]
+    pub struct BadFoo(String, u32);"#;
+
+    let parsed = syn::parse_str(good_input).unwrap();
+    let receiver = Lorem::from_derive_input(&parsed).unwrap();
+    let wrong_shape_parsed = syn::parse_str(bad_input).unwrap();
+    let wrong_shape = Lorem::from_derive_input(&wrong_shape_parsed).expect_err("Shape was wrong");
+
+    println!(
+        r#"
+INPUT:
+
+{}
+
+PARSED AS:
+
+{:?}
+
+BAD INPUT:
+
+{}
+
+PRODUCED ERROR:
+
+{}
+    "#,
+        good_input, receiver, bad_input, wrong_shape
+    );
+}

+ 107 - 0
zeroidc/vendor/darling/src/lib.rs

@@ -0,0 +1,107 @@
+//! # Darling
+//! Darling is a tool for declarative attribute parsing in proc macro implementations.
+//!
+//!
+//! ## Design
+//! Darling takes considerable design inspiration from [`serde`](https://serde.rs). A data structure that can be
+//! read from any attribute implements `FromMeta` (or has an implementation automatically
+//! generated using `derive`). Any crate can provide `FromMeta` implementations, even one not
+//! specifically geared towards proc-macro authors.
+//!
+//! Proc-macro crates should provide their own structs which implement or derive `FromDeriveInput`,
+//! `FromField`, `FromVariant`, `FromGenerics`, _et alia_ to gather settings relevant to their operation.
+//!
+//! ## Attributes
+//! There are a number of attributes that `darling` exposes to enable finer-grained control over the code
+//! it generates.
+//!
+//! * **Field renaming**: You can use `#[darling(rename="new_name")]` on a field to change the name Darling looks for.
+//!   You can also use `#[darling(rename_all="...")]` at the struct or enum level to apply a casing rule to all fields or variants.
+//! * **Map function**: You can use `#[darling(map="path::to::function")]` to run code on a field before its stored in the struct.
+//! * **Default values**: You can use `#[darling(default)]` at the type or field level to use that type's default value to fill
+//!   in values not specified by the caller.
+//! * **Skipped fields**: You can skip a variant or field using `#[darling(skip)]`. Fields marked with this will fall back to
+//!   `Default::default()` for their value, but you can override that with an explicit default or a value from the type-level default.
+//!
+//! ## Forwarded Fields
+//! All derivable traits except `FromMeta` support forwarding some fields from the input AST to the derived struct.
+//! These fields are matched up by identifier **before** `rename` attribute values are considered,
+//! allowing you to use their names for your own properties.
+//! The deriving struct is responsible for making sure the types of fields it chooses to declare are compatible with this table.
+//!
+//! A deriving struct is free to include or exclude any of the fields below.
+//!
+//! ### `FromDeriveInput`
+//! |Field name|Type|Meaning|
+//! |---|---|---|
+//! |`ident`|`syn::Ident`|The identifier of the passed-in type|
+//! |`vis`|`syn::Visibility`|The visibility of the passed-in type|
+//! |`generics`|`T: darling::FromGenerics`|The generics of the passed-in type. This can be `syn::Generics`, `darling::ast::Generics`, or any compatible type.|
+//! |`data`|`darling::ast::Data`|The body of the passed-in type|
+//! |`attrs`|`Vec<syn::Attribute>`|The forwarded attributes from the passed in type. These are controlled using the `forward_attrs` attribute.|
+//!
+//! ### `FromField`
+//! |Field name|Type|Meaning|
+//! |---|---|---|
+//! |`ident`|`Option<syn::Ident>`|The identifier of the passed-in field, or `None` for tuple fields|
+//! |`vis`|`syn::Visibility`|The visibility of the passed-in field|
+//! |`ty`|`syn::Type`|The type of the passed-in field|
+//! |`attrs`|`Vec<syn::Attribute>`|The forwarded attributes from the passed in field. These are controlled using the `forward_attrs` attribute.|
+//!
+//! ### `FromTypeParam`
+//! |Field name|Type|Meaning|
+//! |---|---|---|
+//! |`ident`|`syn::Ident`|The identifier of the passed-in type param|
+//! |`bounds`|`Vec<syn::TypeParamBound>`|The bounds applied to the type param|
+//! |`default`|`Option<syn::Type>`|The default type of the parameter, if one exists|
+//! |`attrs`|`Vec<syn::Attribute>`|The forwarded attributes from the passed in type param. These are controlled using the `forward_attrs` attribute.|
+//!
+//! ### `FromVariant`
+//! |Field name|Type|Meaning|
+//! |---|---|---|
+//! |`ident`|`syn::Ident`|The identifier of the passed-in variant|
+//! |`discriminant`|`Option<syn::Expr>`|For a variant such as `Example = 2`, the `2`|
+//! |`fields`|`Option<darling::ast::Fields<__>>`|The fields associated with the variant|
+//! |`attrs`|`Vec<syn::Attribute>`|The forwarded attributes from the passed in variant. These are controlled using the `forward_attrs` attribute.|
+
+extern crate core;
+
+#[allow(unused_imports)]
+#[macro_use]
+extern crate darling_macro;
+
+#[doc(hidden)]
+pub use darling_macro::*;
+
+#[doc(inline)]
+pub use darling_core::{
+    FromAttributes, FromDeriveInput, FromField, FromGenericParam, FromGenerics, FromMeta,
+    FromTypeParam, FromVariant,
+};
+
+#[doc(inline)]
+pub use darling_core::{Error, Result};
+
+#[doc(inline)]
+pub use darling_core::{ast, error, usage, util};
+
+// XXX exported so that `ExtractAttribute::extractor` can convert a path into tokens.
+// This is likely to change in the future, so only generated code should depend on this export.
+#[doc(hidden)]
+pub use darling_core::ToTokens;
+
+/// Core/std trait re-exports. This should help produce generated code which doesn't
+/// depend on `std` unnecessarily, and avoids problems caused by aliasing `std` or any
+/// of the referenced types.
+#[doc(hidden)]
+pub mod export {
+    pub use core::convert::From;
+    pub use core::default::Default;
+    pub use core::option::Option::{self, None, Some};
+    pub use core::result::Result::{self, Err, Ok};
+    pub use std::string::ToString;
+    pub use std::vec::Vec;
+}
+
+#[macro_use]
+mod macros_public;

+ 96 - 0
zeroidc/vendor/darling/src/macros_public.rs

@@ -0,0 +1,96 @@
+//! Macros that should be exported from both `darling_core` and `darling`.
+//! Note that these are **sym-linked** into the main code, and so cannot declare on items that are exported differently
+//! in `darling_core` vs. `darling`.
+
+/// Generator for `UsesTypeParam` impls that unions the used type parameters of the selected fields.
+///
+/// # Usage
+/// The macro takes the type implementing the trait as the first argument, then a comma-separated list of
+/// fields for the rest of its arguments.
+///
+/// The type of each passed-in field must implement `UsesTypeParams`, or the resulting code won't compile.
+///
+/// ```rust
+/// # extern crate syn;
+/// # use darling_core::uses_type_params;
+/// #
+/// struct MyField {
+///     ty: syn::Type,
+/// }
+///
+/// uses_type_params!(MyField, ty);
+///
+/// fn main() {
+///     // no test run
+/// }
+/// ```
+///
+/// `darling` cannot derive this trait automatically, as it doesn't know which information extracted from
+/// proc-macro input is meant to constitute "using" the type parameter, but crate consumers should
+/// implement it by hand or using the macro.
+#[macro_export]
+macro_rules! uses_type_params {
+    ($impl_type:ty, $accessor:ident) => {
+        impl $crate::usage::UsesTypeParams for $impl_type {
+            fn uses_type_params<'gen>(
+                &self,
+                options: &$crate::usage::Options,
+                type_set: &'gen $crate::usage::IdentSet
+            ) -> $crate::usage::IdentRefSet<'gen> {
+                self.$accessor.uses_type_params(options, type_set)
+            }
+        }
+    };
+    ($impl_type:ty, $first:ident, $($field:ident),+) => {
+        impl $crate::usage::UsesTypeParams for $impl_type {
+            fn uses_type_params<'gen>(
+                &self,
+                options: &$crate::usage::Options,
+                type_set: &'gen $crate::usage::IdentSet
+            ) -> $crate::usage::IdentRefSet<'gen> {
+                let mut hits = self.$first.uses_type_params(options, type_set);
+                $(
+                    hits.extend(self.$field.uses_type_params(options, type_set));
+                )*
+                hits
+            }
+        }
+    };
+}
+
+/// Generator for `UsesLifetimes` impls that unions the used lifetimes of the selected fields.
+///
+/// # Usage
+/// The macro takes the type implementing the trait as the first argument, then a comma-separated list of
+/// fields for the rest of its arguments.
+///
+/// The type of each passed-in field must implement `UsesLifetimes`, or the resulting code won't compile.
+#[macro_export]
+macro_rules! uses_lifetimes {
+    ($impl_type:ty, $accessor:ident) => {
+        impl $crate::usage::UsesLifetimes for $impl_type {
+            fn uses_lifetimes<'gen>(
+                &self,
+                options: &$crate::usage::Options,
+                type_set: &'gen $crate::usage::LifetimeSet
+            ) -> $crate::usage::LifetimeRefSet<'gen> {
+                self.$accessor.uses_lifetimes(options, type_set)
+            }
+        }
+    };
+    ($impl_type:ty, $first:ident, $($field:ident),+) => {
+        impl $crate::usage::UsesLifetimes for $impl_type {
+            fn uses_lifetimes<'gen>(
+                &self,
+                options: &$crate::usage::Options,
+                type_set: &'gen $crate::usage::LifetimeSet
+            ) -> $crate::usage::LifetimeRefSet<'gen> {
+                let mut hits = self.$first.uses_lifetimes(options, type_set);
+                $(
+                    hits.extend(self.$field.uses_lifetimes(options, type_set));
+                )*
+                hits
+            }
+        }
+    };
+}

+ 102 - 0
zeroidc/vendor/darling/tests/accrue_errors.rs

@@ -0,0 +1,102 @@
+#![allow(dead_code)]
+//! These tests verify that multiple errors will be collected up from throughout
+//! the parsing process and returned correctly to the caller.
+
+use darling::{ast, FromDeriveInput, FromField, FromMeta};
+use syn::parse_quote;
+
+#[derive(Debug, FromDeriveInput)]
+#[darling(attributes(accrue))]
+struct Lorem {
+    ipsum: String,
+    dolor: Dolor,
+    data: ast::Data<(), LoremField>,
+}
+
+#[derive(Debug, FromMeta)]
+struct Dolor {
+    sit: bool,
+}
+
+#[derive(Debug, FromField)]
+#[darling(attributes(accrue))]
+struct LoremField {
+    ident: Option<syn::Ident>,
+    aliased_as: syn::Ident,
+}
+
+#[test]
+fn bad_type_and_missing_fields() {
+    let input = parse_quote! {
+        #[accrue(ipsum = true, dolor(amet = "Hi"))]
+        pub struct NonConforming {
+            foo: ()
+        }
+    };
+
+    let s_result: ::darling::Error = Lorem::from_derive_input(&input).unwrap_err();
+    let err = s_result.flatten();
+    println!("{}", err);
+    assert_eq!(3, err.len());
+}
+
+#[test]
+fn body_only_issues() {
+    let input = parse_quote! {
+        #[accrue(ipsum = "Hello", dolor(sit))]
+        pub struct NonConforming {
+            foo: (),
+            bar: bool,
+        }
+    };
+
+    let s_err = Lorem::from_derive_input(&input).unwrap_err();
+    println!("{:?}", s_err);
+    assert_eq!(2, s_err.len());
+}
+
+#[derive(Debug, FromMeta)]
+enum Week {
+    Monday,
+    Tuesday { morning: bool, afternoon: String },
+    Wednesday(Dolor),
+}
+
+#[derive(Debug, FromDeriveInput)]
+#[darling(attributes(accrue))]
+struct Month {
+    schedule: Week,
+}
+
+#[test]
+fn error_in_enum_fields() {
+    let input = parse_quote! {
+        #[accrue(schedule(tuesday(morning = "yes")))]
+        pub struct NonConforming {
+            foo: (),
+            bar: bool,
+        }
+    };
+
+    let s_err = Month::from_derive_input(&input).unwrap_err();
+    assert_eq!(2, s_err.len());
+    let err = s_err.flatten();
+    // TODO add tests to check location path is correct
+    println!("{}", err);
+}
+
+#[test]
+fn error_in_newtype_variant() {
+    let input = parse_quote! {
+        #[accrue(schedule(wednesday(sit = "yes")))]
+        pub struct NonConforming {
+            foo: (),
+            bar: bool,
+        }
+    };
+
+    let s_err = Month::from_derive_input(&input).unwrap_err();
+    assert_eq!(1, s_err.len());
+    println!("{}", s_err);
+    println!("{}", s_err.flatten());
+}

+ 42 - 0
zeroidc/vendor/darling/tests/computed_bound.rs

@@ -0,0 +1,42 @@
+use darling::{FromDeriveInput, FromMeta};
+
+fn parse<T: FromDeriveInput>(src: &str) -> T {
+    let ast = syn::parse_str(src).unwrap();
+    FromDeriveInput::from_derive_input(&ast).unwrap()
+}
+
+#[derive(FromMeta, PartialEq, Eq, Debug)]
+enum Volume {
+    Whisper,
+    Talk,
+    Shout,
+}
+
+#[derive(FromDeriveInput)]
+#[darling(attributes(speak))]
+struct SpeakingOptions<T: Default, U> {
+    max_volume: U,
+    #[darling(skip)]
+    #[allow(dead_code)]
+    additional_data: T,
+}
+
+#[derive(Default)]
+struct Phoneme {
+    #[allow(dead_code)]
+    first: String,
+}
+
+#[test]
+fn skipped_field() {
+    let parsed: SpeakingOptions<Phoneme, Volume> = parse(
+        r#"
+        #[derive(Speak)]
+        #[speak(max_volume = "shout")]
+        enum HtmlElement {
+            Div(String)
+        }
+    "#,
+    );
+    assert_eq!(parsed.max_volume, Volume::Shout);
+}

+ 25 - 0
zeroidc/vendor/darling/tests/custom_bound.rs

@@ -0,0 +1,25 @@
+#![allow(dead_code)]
+
+use std::ops::Add;
+
+use darling::{FromDeriveInput, FromMeta};
+
+#[derive(Debug, Clone, FromMeta)]
+#[darling(bound = "T: FromMeta + Add")]
+struct Wrapper<T>(pub T);
+
+impl<T: Add> Add for Wrapper<T> {
+    type Output = Wrapper<<T as Add>::Output>;
+    fn add(self, rhs: Self) -> Wrapper<<T as Add>::Output> {
+        Wrapper(self.0 + rhs.0)
+    }
+}
+
+#[derive(Debug, FromDeriveInput)]
+#[darling(attributes(hello), bound = "Wrapper<T>: Add, T: FromMeta")]
+struct Foo<T> {
+    lorem: Wrapper<T>,
+}
+
+#[test]
+fn expansion() {}

+ 123 - 0
zeroidc/vendor/darling/tests/defaults.rs

@@ -0,0 +1,123 @@
+use darling::FromDeriveInput;
+use syn::parse_quote;
+
+mod foo {
+    pub mod bar {
+        pub fn init() -> String {
+            String::from("hello")
+        }
+    }
+}
+
+#[derive(FromDeriveInput)]
+#[darling(attributes(speak))]
+pub struct SpeakerOpts {
+    #[darling(default = "foo::bar::init")]
+    first_word: String,
+}
+
+#[test]
+fn path_default() {
+    let speaker: SpeakerOpts = FromDeriveInput::from_derive_input(&parse_quote! {
+        struct Foo;
+    })
+    .expect("Unit struct with no attrs should parse");
+
+    assert_eq!(speaker.first_word, "hello");
+}
+
+/// Tests in this module capture the somewhat-confusing behavior observed when defaults
+/// are set at both the field and container level.
+///
+/// The general rule is that more-specific declarations preempt less-specific ones; this is
+/// unsurprising and allows for granular control over what happens when parsing an AST.
+mod stacked_defaults {
+    use darling::{FromDeriveInput, FromMeta};
+    use syn::parse_quote;
+
+    fn jane() -> String {
+        "Jane".into()
+    }
+
+    #[derive(FromMeta)]
+    #[darling(default)]
+    struct PersonName {
+        #[darling(default = "jane")]
+        first: String,
+        #[darling(default)]
+        middle: String,
+        last: String,
+    }
+
+    impl Default for PersonName {
+        fn default() -> Self {
+            Self {
+                first: "John".into(),
+                middle: "T".into(),
+                last: "Doe".into(),
+            }
+        }
+    }
+
+    #[derive(FromDeriveInput)]
+    #[darling(attributes(person))]
+    struct Person {
+        #[darling(default)]
+        name: PersonName,
+        age: u8,
+    }
+
+    #[test]
+    fn name_first_only() {
+        let person = Person::from_derive_input(&parse_quote! {
+            #[person(name(first = "Bill"), age = 5)]
+            struct Foo;
+        })
+        .unwrap();
+
+        assert_eq!(person.name.first, "Bill");
+        assert_eq!(
+            person.name.middle, "",
+            "Explicit field-level default should preempt container-level default"
+        );
+        assert_eq!(
+            person.name.last, "Doe",
+            "Absence of a field-level default falls back to container-level default"
+        );
+    }
+
+    /// This is the most surprising case. The presence of `name()` means we invoke
+    /// `PersonName::from_list(&[])`. When that finishes parsing each of the zero nested
+    /// items it has received, it will then start filling in missing fields, using the
+    /// explicit field-level defaults for `first` and `middle`, while for `last` it will
+    /// use the `last` field from the container-level default.
+    #[test]
+    fn name_empty_list() {
+        let person = Person::from_derive_input(&parse_quote! {
+            #[person(name(), age = 5)]
+            struct Foo;
+        })
+        .unwrap();
+
+        assert_eq!(person.name.first, "Jane");
+        assert_eq!(person.name.middle, "");
+        assert_eq!(person.name.last, "Doe");
+    }
+
+    #[test]
+    fn no_name() {
+        let person = Person::from_derive_input(&parse_quote! {
+            #[person(age = 5)]
+            struct Foo;
+        })
+        .unwrap();
+
+        assert_eq!(person.age, 5);
+        assert_eq!(
+            person.name.first, "John",
+            "If `name` is not specified, `Person`'s field-level default should be used"
+        );
+        assert_eq!(person.name.middle, "T");
+        assert_eq!(person.name.last, "Doe");
+    }
+}

+ 90 - 0
zeroidc/vendor/darling/tests/enums_newtype.rs

@@ -0,0 +1,90 @@
+use darling::{FromDeriveInput, FromMeta};
+use syn::parse_quote;
+
+#[derive(Debug, Default, PartialEq, Eq, FromMeta)]
+#[darling(default)]
+pub struct Amet {
+    hello: bool,
+    world: String,
+}
+
+#[derive(Debug, PartialEq, Eq, FromMeta)]
+#[darling(rename_all = "snake_case")]
+pub enum Lorem {
+    Ipsum(bool),
+    Dolor(String),
+    Sit(Amet),
+}
+
+#[derive(Debug, PartialEq, Eq, FromDeriveInput)]
+#[darling(attributes(hello))]
+pub struct Holder {
+    lorem: Lorem,
+}
+
+impl PartialEq<Lorem> for Holder {
+    fn eq(&self, other: &Lorem) -> bool {
+        self.lorem == *other
+    }
+}
+
+#[test]
+fn bool_word() {
+    let di = parse_quote! {
+        #[hello(lorem(ipsum))]
+        pub struct Bar;
+    };
+
+    let pr = Holder::from_derive_input(&di).unwrap();
+    assert_eq!(pr, Lorem::Ipsum(true));
+}
+
+#[test]
+fn bool_literal() {
+    let di = parse_quote! {
+        #[hello(lorem(ipsum = false))]
+        pub struct Bar;
+    };
+
+    let pr = Holder::from_derive_input(&di).unwrap();
+    assert_eq!(pr, Lorem::Ipsum(false));
+}
+
+#[test]
+fn string_literal() {
+    let di = parse_quote! {
+        #[hello(lorem(dolor = "Hello"))]
+        pub struct Bar;
+    };
+
+    let pr = Holder::from_derive_input(&di).unwrap();
+    assert_eq!(pr, Lorem::Dolor("Hello".to_string()));
+}
+
+#[test]
+fn struct_nested() {
+    let di = parse_quote! {
+        #[hello(lorem(sit(world = "Hello", hello = false)))]
+        pub struct Bar;
+    };
+
+    let pr = Holder::from_derive_input(&di).unwrap();
+    assert_eq!(
+        pr,
+        Lorem::Sit(Amet {
+            hello: false,
+            world: "Hello".to_string(),
+        })
+    );
+}
+
+#[test]
+#[should_panic]
+fn format_mismatch() {
+    let di = parse_quote! {
+        #[hello(lorem(dolor(world = "Hello", hello = false)))]
+        pub struct Bar;
+    };
+
+    Holder::from_derive_input(&di).unwrap();
+}

+ 15 - 0
zeroidc/vendor/darling/tests/enums_struct.rs

@@ -0,0 +1,15 @@
+#![allow(dead_code)]
+
+//! Test expansion of enums which have struct variants.
+
+use darling::FromMeta;
+#[derive(Debug, FromMeta)]
+#[darling(rename_all = "snake_case")]
+enum Message {
+    Hello { user: String, silent: bool },
+    Ping,
+    Goodbye { user: String },
+}
+
+#[test]
+fn expansion() {}

部分文件因为文件数量过多而无法显示