Browse Source

Merge branch 'TechEmpower:master' into main

Juanjo Aguililla 1 year ago
parent
commit
9f62ae3bff
92 changed files with 2638 additions and 2429 deletions
  1. 4 3
      frameworks/C++/userver/userver-bare.dockerfile
  2. 4 3
      frameworks/C++/userver/userver.dockerfile
  3. 1 1
      frameworks/C++/userver/userver_benchmark/CMakeLists.txt
  4. 13 15
      frameworks/C++/userver/userver_benchmark/bare/simple_connection.cpp
  5. 15 15
      frameworks/C++/userver/userver_benchmark/controllers/multiple_queries/handler.cpp
  6. 32 25
      frameworks/C++/userver/userver_benchmark/controllers/updates/handler.cpp
  7. 17 1
      frameworks/C++/userver/userver_benchmark/userver_techempower.cpp
  8. 2 0
      frameworks/C++/userver/userver_configs/static_config.yaml
  9. 5 1
      frameworks/C/h2o/h2o.dockerfile
  10. 3 4
      frameworks/Go/fiber/src/go.mod
  11. 9 11
      frameworks/Go/fiber/src/go.sum
  12. 1 1
      frameworks/Java/inverno/pom.xml
  13. 1 1
      frameworks/Java/jooby/jooby-jetty.dockerfile
  14. 1 1
      frameworks/Java/jooby/jooby-mvc.dockerfile
  15. 1 1
      frameworks/Java/jooby/jooby-netty.dockerfile
  16. 1 1
      frameworks/Java/jooby/jooby-pgclient.dockerfile
  17. 1 1
      frameworks/Java/jooby/jooby.dockerfile
  18. 12 12
      frameworks/Java/jooby/pom.xml
  19. 1 1
      frameworks/Java/netty/pom.xml
  20. 48 0
      frameworks/JavaScript/mesh/README.md
  21. 18 0
      frameworks/JavaScript/mesh/app.js
  22. 92 0
      frameworks/JavaScript/mesh/benchmark_config.json
  23. 16 0
      frameworks/JavaScript/mesh/drivers/mongodb.js
  24. 24 0
      frameworks/JavaScript/mesh/drivers/mysql.js
  25. 20 0
      frameworks/JavaScript/mesh/drivers/postgres.js
  26. 12 0
      frameworks/JavaScript/mesh/mesh-mongodb.dockerfile
  27. 12 0
      frameworks/JavaScript/mesh/mesh-mysql.dockerfile
  28. 12 0
      frameworks/JavaScript/mesh/mesh-postgres.dockerfile
  29. 11 0
      frameworks/JavaScript/mesh/mesh.dockerfile
  30. 369 0
      frameworks/JavaScript/mesh/package-lock.json
  31. 13 0
      frameworks/JavaScript/mesh/package.json
  32. 104 0
      frameworks/JavaScript/mesh/server.js
  33. 2 2
      frameworks/Kotlin/pellet/pellet.dockerfile
  34. 9 13
      frameworks/Kotlin/pellet/sample/build.gradle.kts
  35. 8 9
      frameworks/Kotlin/pellet/sample/src/main/kotlin/benchmark/Benchmark.kt
  36. 1 1
      frameworks/Kotlin/pellet/sample/src/main/kotlin/benchmark/data/FortuneDAO.kt
  37. 17 9
      frameworks/Kotlin/pellet/sample/src/main/kotlin/benchmark/data/TFBRepository.kt
  38. 2 2
      frameworks/Kotlin/pellet/sample/src/main/kotlin/benchmark/data/WorldDAO.kt
  39. 5 1
      frameworks/PHP/php/php-h2o.dockerfile
  40. 68 13
      frameworks/Python/async-worker/Pipfile.lock
  41. 1 1
      frameworks/Python/django/requirements.txt
  42. 20 0
      frameworks/Python/mrhttp/README.md
  43. 18 0
      frameworks/Python/mrhttp/app.py
  44. 23 0
      frameworks/Python/mrhttp/benchmark_config.json
  45. 15 0
      frameworks/Python/mrhttp/config.toml
  46. 13 0
      frameworks/Python/mrhttp/mrhttp.dockerfile
  47. 6 0
      frameworks/Python/mrhttp/requirements.txt
  48. 3 1
      frameworks/Ruby/h2o_mruby/h2o_mruby.dockerfile
  49. 1 1
      frameworks/Ruby/rails/Gemfile.lock
  50. 1 1
      frameworks/Rust/actix/rust-toolchain.toml
  51. 1 1
      frameworks/Rust/ohkami/.gitignore
  52. 235 415
      frameworks/Rust/ohkami/Cargo.lock
  53. 9 7
      frameworks/Rust/ohkami/Cargo.toml
  54. 35 9
      frameworks/Rust/ohkami/README.md
  55. 19 17
      frameworks/Rust/ohkami/benchmark_config.json
  56. 19 0
      frameworks/Rust/ohkami/config.toml
  57. 15 9
      frameworks/Rust/ohkami/ohkami.dockerfile
  58. 0 60
      frameworks/Rust/ohkami/src/components.rs
  59. 62 85
      frameworks/Rust/ohkami/src/main.rs
  60. 36 0
      frameworks/Rust/ohkami/src/models.rs
  61. 102 0
      frameworks/Rust/ohkami/src/postgres.rs
  62. 18 0
      frameworks/Rust/ohkami/src/templates.rs
  63. 80 83
      frameworks/Rust/xitca-web/Cargo.lock
  64. 6 6
      frameworks/Rust/xitca-web/Cargo.toml
  65. 15 26
      frameworks/Rust/xitca-web/src/db.rs
  66. 4 13
      frameworks/Rust/xitca-web/src/main_sync.rs
  67. 12 27
      frameworks/Rust/xitca-web/src/main_wasm.rs
  68. 1 0
      frameworks/Rust/xitca-web/src/ser.rs
  69. 1 1
      frameworks/Rust/xitca-web/xitca-web-axum.dockerfile
  70. 1 1
      frameworks/Rust/xitca-web/xitca-web-iou.dockerfile
  71. 1 1
      frameworks/Rust/xitca-web/xitca-web-sync.dockerfile
  72. 1 1
      frameworks/Rust/xitca-web/xitca-web-wasm.dockerfile
  73. 1 1
      frameworks/Rust/xitca-web/xitca-web.dockerfile
  74. 1 1
      frameworks/TypeScript/nest/package.json
  75. 2 0
      frameworks/Zig/zap/.gitignore
  76. 25 0
      frameworks/Zig/zap/README.md
  77. 26 0
      frameworks/Zig/zap/benchmark_config.json
  78. 99 0
      frameworks/Zig/zap/build.zig
  79. 21 0
      frameworks/Zig/zap/build.zig.zon
  80. 3 0
      frameworks/Zig/zap/run.sh
  81. 334 0
      frameworks/Zig/zap/src/endpoints.zig
  82. 95 0
      frameworks/Zig/zap/src/main.zig
  83. 129 0
      frameworks/Zig/zap/src/middleware.zig
  84. 78 0
      frameworks/Zig/zap/src/pool.zig
  85. 42 0
      frameworks/Zig/zap/zap.dockerfile
  86. 5 1
      toolset/databases/postgres/config.sh
  87. 0 5
      toolset/databases/postgres/create-postgres-database.sql
  88. 0 100
      toolset/databases/postgres/pg_hba.conf
  89. 8 10
      toolset/databases/postgres/postgres.dockerfile
  90. 0 143
      toolset/databases/postgres/postgresql-min.conf
  91. 8 441
      toolset/databases/postgres/postgresql.conf
  92. 0 813
      toolset/databases/postgres/postgresql.conf.sample

+ 4 - 3
frameworks/C++/userver/userver-bare.dockerfile

@@ -6,7 +6,7 @@ RUN apt update && \
 
 
 WORKDIR /src
 WORKDIR /src
 RUN git clone https://github.com/userver-framework/userver.git && \
 RUN git clone https://github.com/userver-framework/userver.git && \
-    cd userver && git checkout fcf0514be560f46740f8a654f2fdce5dc1cd450c
+    cd userver && git checkout c2ca5454f0b0e93dd0a2e082904dedda5cda3052
 
 
 COPY userver_benchmark/ ./
 COPY userver_benchmark/ ./
 RUN mkdir build && cd build && \
 RUN mkdir build && cd build && \
@@ -14,8 +14,9 @@ RUN mkdir build && cd build && \
           -DUSERVER_FEATURE_UTEST=0 \
           -DUSERVER_FEATURE_UTEST=0 \
           -DUSERVER_FEATURE_POSTGRESQL=1 \
           -DUSERVER_FEATURE_POSTGRESQL=1 \
           -DUSERVER_FEATURE_ERASE_LOG_WITH_LEVEL=warning \
           -DUSERVER_FEATURE_ERASE_LOG_WITH_LEVEL=warning \
-          -DCMAKE_BUILD_TYPE=Release -DCMAKE_CXX_FLAGS="-march=native" -DCMAKE_C_FLAGS="-march=native" \
-          -DCMAKE_CXX_COMPILER=clang++-16 -DCMAKE_C_COMPILER=clang-16 -DUSERVER_USE_LD=lld-16 -DUSERVER_LTO_CACHE=0 .. && \
+          -DCMAKE_BUILD_TYPE=Release -DCMAKE_CXX_FLAGS="-march=native -flto=thin" -DCMAKE_C_FLAGS="-march=native -flto=thin" \
+          -DCMAKE_CXX_COMPILER=clang++-16 -DCMAKE_C_COMPILER=clang-16 -DUSERVER_USE_LD=lld-16 \
+          -DUSERVER_LTO=0 .. && \
     make -j $(nproc)
     make -j $(nproc)
 
 
 FROM builder AS runner
 FROM builder AS runner

+ 4 - 3
frameworks/C++/userver/userver.dockerfile

@@ -6,7 +6,7 @@ RUN apt update && \
 
 
 WORKDIR /src
 WORKDIR /src
 RUN git clone https://github.com/userver-framework/userver.git && \
 RUN git clone https://github.com/userver-framework/userver.git && \
-    cd userver && git checkout fcf0514be560f46740f8a654f2fdce5dc1cd450c
+    cd userver && git checkout c2ca5454f0b0e93dd0a2e082904dedda5cda3052
 
 
 COPY userver_benchmark/ ./
 COPY userver_benchmark/ ./
 RUN mkdir build && cd build && \
 RUN mkdir build && cd build && \
@@ -14,8 +14,9 @@ RUN mkdir build && cd build && \
           -DUSERVER_FEATURE_UTEST=0 \
           -DUSERVER_FEATURE_UTEST=0 \
           -DUSERVER_FEATURE_POSTGRESQL=1 \
           -DUSERVER_FEATURE_POSTGRESQL=1 \
           -DUSERVER_FEATURE_ERASE_LOG_WITH_LEVEL=warning \
           -DUSERVER_FEATURE_ERASE_LOG_WITH_LEVEL=warning \
-          -DCMAKE_BUILD_TYPE=Release -DCMAKE_CXX_FLAGS="-march=native" -DCMAKE_C_FLAGS="-march=native" \
-          -DCMAKE_CXX_COMPILER=clang++-16 -DCMAKE_C_COMPILER=clang-16 -DUSERVER_USE_LD=lld-16 -DUSERVER_LTO_CACHE=0 .. && \
+          -DCMAKE_BUILD_TYPE=Release -DCMAKE_CXX_FLAGS="-march=native -flto=thin" -DCMAKE_C_FLAGS="-march=native -flto=thin" \
+          -DCMAKE_CXX_COMPILER=clang++-16 -DCMAKE_C_COMPILER=clang-16 -DUSERVER_USE_LD=lld-16 \
+          -DUSERVER_LTO=0 .. && \
     make -j $(nproc)
     make -j $(nproc)
 
 
 FROM builder AS runner
 FROM builder AS runner

+ 1 - 1
frameworks/C++/userver/userver_benchmark/CMakeLists.txt

@@ -15,4 +15,4 @@ add_subdirectory(userver)
 userver_setup_environment()
 userver_setup_environment()
 
 
 add_executable(${PROJECT_NAME} ${SOURCES} userver_techempower.cpp)
 add_executable(${PROJECT_NAME} ${SOURCES} userver_techempower.cpp)
-target_link_libraries(${PROJECT_NAME} PRIVATE userver-core userver-postgresql)
+target_link_libraries(${PROJECT_NAME} PRIVATE userver-core userver-postgresql userver-llhttp)

+ 13 - 15
frameworks/C++/userver/userver_benchmark/bare/simple_connection.cpp

@@ -3,7 +3,7 @@
 #include <array>
 #include <array>
 
 
 #include <cctz/time_zone.h>
 #include <cctz/time_zone.h>
-#include <http_parser.h>
+#include <llhttp.h>
 #include <boost/container/small_vector.hpp>
 #include <boost/container/small_vector.hpp>
 
 
 #include "simple_server.hpp"
 #include "simple_server.hpp"
@@ -18,8 +18,8 @@ namespace userver_techempower::bare {
 namespace {
 namespace {
 
 
 struct HttpParser final {
 struct HttpParser final {
-  http_parser parser{};
-  http_parser_settings parser_settings{};
+  llhttp_t parser{};
+  llhttp_settings_t parser_settings{};
 
 
   std::function<void(std::string_view)> on_request_cb{};
   std::function<void(std::string_view)> on_request_cb{};
 
 
@@ -27,33 +27,32 @@ struct HttpParser final {
 
 
   explicit HttpParser(std::function<void(std::string_view)> on_request_cb)
   explicit HttpParser(std::function<void(std::string_view)> on_request_cb)
       : on_request_cb{std::move(on_request_cb)} {
       : on_request_cb{std::move(on_request_cb)} {
-    http_parser_init(&parser, HTTP_REQUEST);
-    parser.data = this;
-
-    http_parser_settings_init(&parser_settings);
+    llhttp_settings_init(&parser_settings);
     parser_settings.on_url = HttpOnUrl;
     parser_settings.on_url = HttpOnUrl;
     parser_settings.on_message_begin = HttpOnMessageBegin;
     parser_settings.on_message_begin = HttpOnMessageBegin;
     parser_settings.on_message_complete = HttpOnMessageComplete;
     parser_settings.on_message_complete = HttpOnMessageComplete;
+
+    llhttp_init(&parser, HTTP_REQUEST, &parser_settings);
+    parser.data = this;
   }
   }
 
 
-  void Execute(const char* data, std::size_t length) {
-    http_parser_execute(&parser, &parser_settings, data, length);
+  auto Execute(const char* data, std::size_t length) {
+    return llhttp_execute(&parser, data, length);
   }
   }
 
 
-  static int HttpOnUrl(http_parser* parser, const char* data,
-                       std::size_t length) {
+  static int HttpOnUrl(llhttp_t* parser, const char* data, std::size_t length) {
     auto* self = static_cast<HttpParser*>(parser->data);
     auto* self = static_cast<HttpParser*>(parser->data);
     self->url.append(std::string_view{data, length});
     self->url.append(std::string_view{data, length});
     return 0;
     return 0;
   }
   }
 
 
-  static int HttpOnMessageBegin(http_parser* parser) {
+  static int HttpOnMessageBegin(llhttp_t* parser) {
     auto* self = static_cast<HttpParser*>(parser->data);
     auto* self = static_cast<HttpParser*>(parser->data);
     self->url.clear();
     self->url.clear();
     return 0;
     return 0;
   }
   }
 
 
-  static int HttpOnMessageComplete(http_parser* parser) {
+  static int HttpOnMessageComplete(llhttp_t* parser) {
     auto* self = static_cast<HttpParser*>(parser->data);
     auto* self = static_cast<HttpParser*>(parser->data);
     self->on_request_cb(static_cast<std::string_view>(self->url));
     self->on_request_cb(static_cast<std::string_view>(self->url));
     return 0;
     return 0;
@@ -192,8 +191,7 @@ void SimpleConnection::Process() {
       break;
       break;
     }
     }
 
 
-    parser.Execute(buffer.data(), last_bytes_read);
-    if (parser.parser.http_errno != 0) {
+    if (parser.Execute(buffer.data(), last_bytes_read) != HPE_OK) {
       break;
       break;
     }
     }
 
 

+ 15 - 15
frameworks/C++/userver/userver_benchmark/controllers/multiple_queries/handler.cpp

@@ -37,24 +37,24 @@ std::string Handler::HandleRequestThrow(
 }
 }
 
 
 std::string Handler::GetResponse(int queries) const {
 std::string Handler::GetResponse(int queries) const {
-  boost::container::small_vector<db_helpers::WorldTableRow, 20> result(queries);
-  for (auto& value : result) {
-    value.id = db_helpers::GenerateRandomId();
-  }
-
-  {
+  const auto db_result = [this, queries] {
     const auto lock = semaphore_.Acquire();
     const auto lock = semaphore_.Acquire();
 
 
-    auto trx =
-        pg_->Begin(db_helpers::kClusterHostType, {}, db_helpers::kDefaultPgCC);
-    for (auto& value : result) {
-      value.random_number = trx.Execute(db_helpers::kDefaultPgCC,
-                                        db_helpers::kSelectRowQuery, value.id)
-                                .AsSingleRow<db_helpers::WorldTableRow>(
-                                    userver::storages::postgres::kRowTag)
-                                .random_number;
+    auto query_queue = pg_->CreateQueryQueue(db_helpers::kClusterHostType,
+                                             db_helpers::kDefaultPgCC.execute);
+    query_queue.Reserve(queries);
+    for (std::size_t i = 0; i < static_cast<std::size_t>(queries); ++i) {
+      query_queue.Push(db_helpers::kDefaultPgCC, db_helpers::kSelectRowQuery,
+                       db_helpers::GenerateRandomId());
     }
     }
-    trx.Commit();
+
+    return query_queue.Collect(db_helpers::kDefaultPgCC.execute);
+  }();
+
+  boost::container::small_vector<db_helpers::WorldTableRow, 20> result(queries);
+  for (std::size_t i = 0; i < static_cast<std::size_t>(queries); ++i) {
+    result[i] = db_result[i].AsSingleRow<db_helpers::WorldTableRow>(
+        userver::storages::postgres::kRowTag);
   }
   }
 
 
   userver::formats::json::StringBuilder sb{};
   userver::formats::json::StringBuilder sb{};

+ 32 - 25
frameworks/C++/userver/userver_benchmark/controllers/updates/handler.cpp

@@ -7,6 +7,15 @@
 
 
 #include <boost/container/small_vector.hpp>
 #include <boost/container/small_vector.hpp>
 
 
+namespace userver::storages::postgres::io::traits {
+
+// Hijack userver's whitelist of allowed containers
+template <typename T, std::size_t Size>
+struct IsCompatibleContainer<boost::container::small_vector<T, Size>>
+    : std::true_type {};
+
+}  // namespace userver::storages::postgres::io::traits
+
 namespace userver_techempower::updates {
 namespace userver_techempower::updates {
 
 
 namespace {
 namespace {
@@ -46,41 +55,39 @@ std::string Handler::HandleRequestThrow(
 }
 }
 
 
 std::string Handler::GetResponse(int queries) const {
 std::string Handler::GetResponse(int queries) const {
-  // userver's PG doesn't accept boost::small_vector as an input, sadly
-  std::vector<db_helpers::WorldTableRow> values(queries);
-  for (auto& value : values) {
-    value.id = db_helpers::GenerateRandomId();
+  boost::container::small_vector<int, 20> ids(queries);
+  for (auto& id : ids) {
+    id = db_helpers::GenerateRandomId();
   }
   }
   // we have to sort ids to not deadlock in update
   // we have to sort ids to not deadlock in update
-  std::sort(values.begin(), values.end(),
-            [](const auto& lhs, const auto& rhs) { return lhs.id < rhs.id; });
+  std::sort(ids.begin(), ids.end(),
+            [](const auto& lhs, const auto& rhs) { return lhs < rhs; });
 
 
-  boost::container::small_vector<db_helpers::WorldTableRow, 20> result;
+  boost::container::small_vector<int, 20> values(queries);
+  for (auto& value : values) {
+    value = db_helpers::GenerateRandomValue();
+  }
 
 
-  {
+  const auto db_results = [this, &ids, &values] {
     const auto lock = semaphore_.Acquire();
     const auto lock = semaphore_.Acquire();
 
 
-    auto trx =
-        pg_->Begin(db_helpers::kClusterHostType, {}, db_helpers::kDefaultPgCC);
-    for (auto& value : values) {
-      value.random_number = trx.Execute(db_helpers::kDefaultPgCC,
-                                        db_helpers::kSelectRowQuery, value.id)
-                                .AsSingleRow<db_helpers::WorldTableRow>(
-                                    userver::storages::postgres::kRowTag)
-                                .random_number;
+    auto query_queue = pg_->CreateQueryQueue(db_helpers::kClusterHostType,
+                                             db_helpers::kDefaultPgCC.execute);
+    query_queue.Reserve(ids.size() + 1 /* for the update query */);
+    for (const auto id : ids) {
+      query_queue.Push(db_helpers::kDefaultPgCC, db_helpers::kSelectRowQuery,
+                       id);
     }
     }
 
 
-    // We copy values here (and hope compiler optimizes it into one memcpy call)
-    // to not serialize into json within transaction
-    result.assign(values.begin(), values.end());
+    query_queue.Push(db_helpers::kDefaultPgCC, update_query_, ids, values);
 
 
-    for (auto& value : values) {
-      value.random_number = db_helpers::GenerateRandomValue();
-    }
+    return query_queue.Collect(db_helpers::kDefaultPgCC.execute);
+  }();
 
 
-    trx.ExecuteDecomposeBulk(db_helpers::kDefaultPgCC, update_query_, values,
-                             values.size());
-    trx.Commit();
+  boost::container::small_vector<db_helpers::WorldTableRow, 20> result(queries);
+  for (std::size_t i = 0; i < result.size(); ++i) {
+    result[i] = db_results[i].AsSingleRow<db_helpers::WorldTableRow>(
+        userver::storages::postgres::kRowTag);
   }
   }
 
 
   userver::formats::json::StringBuilder sb{};
   userver::formats::json::StringBuilder sb{};

+ 17 - 1
frameworks/C++/userver/userver_benchmark/userver_techempower.cpp

@@ -4,6 +4,7 @@
 
 
 #include <userver/clients/dns/component.hpp>
 #include <userver/clients/dns/component.hpp>
 
 
+#include <userver/server/middlewares/configuration.hpp>
 #include <userver/storages/postgres/component.hpp>
 #include <userver/storages/postgres/component.hpp>
 #include <userver/storages/secdist/component.hpp>
 #include <userver/storages/secdist/component.hpp>
 #include <userver/storages/secdist/provider_component.hpp>
 #include <userver/storages/secdist/provider_component.hpp>
@@ -45,6 +46,20 @@ class NoopTracingManager final
       userver::server::http::HttpResponse&) const final {}
       userver::server::http::HttpResponse&) const final {}
 };
 };
 
 
+class MinimalMiddlewarePipelineBuilder final
+    : public userver::server::middlewares::PipelineBuilder {
+ public:
+  static constexpr std::string_view kName{
+      "minimal-middleware-pipeline-builder"};
+  using userver::server::middlewares::PipelineBuilder::PipelineBuilder;
+
+ private:
+  userver::server::middlewares::MiddlewaresList BuildPipeline(
+      userver::server::middlewares::MiddlewaresList) const override {
+    return {"userver-unknown-exceptions-handling-middleware"};
+  }
+};
+
 int Main(int argc, char* argv[]) {
 int Main(int argc, char* argv[]) {
   auto component_list =
   auto component_list =
       userver::components::MinimalServerComponentList()
       userver::components::MinimalServerComponentList()
@@ -63,8 +78,9 @@ int Main(int argc, char* argv[]) {
           .Append<cached_queries::WorldCacheComponent>()  // cache component
           .Append<cached_queries::WorldCacheComponent>()  // cache component
           .Append<cached_queries::Handler>()
           .Append<cached_queries::Handler>()
           .Append<fortunes::Handler>()
           .Append<fortunes::Handler>()
-          // tracing tweaks
+          // tracing and metrics tweaks
           .Append<NoopTracingManager>()
           .Append<NoopTracingManager>()
+          .Append<MinimalMiddlewarePipelineBuilder>()
           // bare
           // bare
           .Append<bare::SimpleRouter>()
           .Append<bare::SimpleRouter>()
           .Append<bare::SimpleServer>();
           .Append<bare::SimpleServer>();

+ 2 - 0
frameworks/C++/userver/userver_configs/static_config.yaml

@@ -29,6 +29,7 @@ components_manager:
                 handler-defaults:
                 handler-defaults:
                     set_tracing_headers: false
                     set_tracing_headers: false
             server-name: us
             server-name: us
+            middleware-pipeline-builder: minimal-middleware-pipeline-builder
         simple-router:
         simple-router:
         simple-server:
         simple-server:
             port: 8081
             port: 8081
@@ -62,6 +63,7 @@ components_manager:
         noop-tracing-manager:
         noop-tracing-manager:
         tracing-manager-locator:
         tracing-manager-locator:
             component-name: noop-tracing-manager
             component-name: noop-tracing-manager
+        minimal-middleware-pipeline-builder:
 
 
         plaintext-handler:
         plaintext-handler:
             path: /plaintext
             path: /plaintext

+ 5 - 1
frameworks/C/h2o/h2o.dockerfile

@@ -13,6 +13,7 @@ RUN apt-get -yqq update && \
       curl \
       curl \
       flex \
       flex \
       g++ \
       g++ \
+      libbpfcc-dev \
       libbrotli-dev \
       libbrotli-dev \
       libcap-dev \
       libcap-dev \
       libicu-dev \
       libicu-dev \
@@ -27,9 +28,11 @@ RUN apt-get -yqq update && \
       make \
       make \
       ninja-build \
       ninja-build \
       pkg-config \
       pkg-config \
+      rsync \
+      ruby \
       systemtap-sdt-dev
       systemtap-sdt-dev
 
 
-ARG H2O_VERSION=13ba727ad12dfb2338165d2bcfb2136457e33c8a
+ARG H2O_VERSION=18b175f71ede08b50d3e5ae8303dacef3ea510fc
 
 
 WORKDIR /tmp/h2o-build
 WORKDIR /tmp/h2o-build
 RUN curl -LSs "https://github.com/h2o/h2o/archive/${H2O_VERSION}.tar.gz" | \
 RUN curl -LSs "https://github.com/h2o/h2o/archive/${H2O_VERSION}.tar.gz" | \
@@ -39,6 +42,7 @@ RUN curl -LSs "https://github.com/h2o/h2o/archive/${H2O_VERSION}.tar.gz" | \
       -DCMAKE_AR=/usr/bin/gcc-ar \
       -DCMAKE_AR=/usr/bin/gcc-ar \
       -DCMAKE_C_FLAGS="-flto -march=native -mtune=native" \
       -DCMAKE_C_FLAGS="-flto -march=native -mtune=native" \
       -DCMAKE_RANLIB=/usr/bin/gcc-ranlib \
       -DCMAKE_RANLIB=/usr/bin/gcc-ranlib \
+      -DWITH_MRUBY=on \
       -G Ninja \
       -G Ninja \
       -S . && \
       -S . && \
     cmake --build build -j && \
     cmake --build build -j && \

+ 3 - 4
frameworks/Go/fiber/src/go.mod

@@ -5,7 +5,7 @@ go 1.19
 require (
 require (
 	github.com/goccy/go-json v0.10.0
 	github.com/goccy/go-json v0.10.0
 	github.com/gofiber/fiber/v2 v2.52.1
 	github.com/gofiber/fiber/v2 v2.52.1
-	github.com/jackc/pgx/v5 v5.2.0
+	github.com/jackc/pgx/v5 v5.5.4
 	github.com/valyala/quicktemplate v1.7.0
 	github.com/valyala/quicktemplate v1.7.0
 )
 )
 
 
@@ -13,8 +13,8 @@ require (
 	github.com/andybalholm/brotli v1.0.5 // indirect
 	github.com/andybalholm/brotli v1.0.5 // indirect
 	github.com/google/uuid v1.5.0 // indirect
 	github.com/google/uuid v1.5.0 // indirect
 	github.com/jackc/pgpassfile v1.0.0 // indirect
 	github.com/jackc/pgpassfile v1.0.0 // indirect
-	github.com/jackc/pgservicefile v0.0.0-20200714003250-2b9c44734f2b // indirect
-	github.com/jackc/puddle/v2 v2.1.2 // indirect
+	github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a // indirect
+	github.com/jackc/puddle/v2 v2.2.1 // indirect
 	github.com/klauspost/compress v1.17.0 // indirect
 	github.com/klauspost/compress v1.17.0 // indirect
 	github.com/mattn/go-colorable v0.1.13 // indirect
 	github.com/mattn/go-colorable v0.1.13 // indirect
 	github.com/mattn/go-isatty v0.0.20 // indirect
 	github.com/mattn/go-isatty v0.0.20 // indirect
@@ -23,7 +23,6 @@ require (
 	github.com/valyala/bytebufferpool v1.0.0 // indirect
 	github.com/valyala/bytebufferpool v1.0.0 // indirect
 	github.com/valyala/fasthttp v1.51.0 // indirect
 	github.com/valyala/fasthttp v1.51.0 // indirect
 	github.com/valyala/tcplisten v1.0.0 // indirect
 	github.com/valyala/tcplisten v1.0.0 // indirect
-	go.uber.org/atomic v1.10.0 // indirect
 	golang.org/x/crypto v0.17.0 // indirect
 	golang.org/x/crypto v0.17.0 // indirect
 	golang.org/x/sync v0.1.0 // indirect
 	golang.org/x/sync v0.1.0 // indirect
 	golang.org/x/sys v0.15.0 // indirect
 	golang.org/x/sys v0.15.0 // indirect

+ 9 - 11
frameworks/Go/fiber/src/go.sum

@@ -13,12 +13,12 @@ github.com/google/uuid v1.5.0 h1:1p67kYwdtXjb0gL0BPiP1Av9wiZPo5A8z2cWkTZ+eyU=
 github.com/google/uuid v1.5.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
 github.com/google/uuid v1.5.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
 github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM=
 github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM=
 github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg=
 github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg=
-github.com/jackc/pgservicefile v0.0.0-20200714003250-2b9c44734f2b h1:C8S2+VttkHFdOOCXJe+YGfa4vHYwlt4Zx+IVXQ97jYg=
-github.com/jackc/pgservicefile v0.0.0-20200714003250-2b9c44734f2b/go.mod h1:vsD4gTJCa9TptPL8sPkXrLZ+hDuNrZCnj29CQpr4X1E=
-github.com/jackc/pgx/v5 v5.2.0 h1:NdPpngX0Y6z6XDFKqmFQaE+bCtkqzvQIOt1wvBlAqs8=
-github.com/jackc/pgx/v5 v5.2.0/go.mod h1:Ptn7zmohNsWEsdxRawMzk3gaKma2obW+NWTnKa0S4nk=
-github.com/jackc/puddle/v2 v2.1.2 h1:0f7vaaXINONKTsxYDn4otOAiJanX/BMeAtY//BXqzlg=
-github.com/jackc/puddle/v2 v2.1.2/go.mod h1:2lpufsF5mRHO6SuZkm0fNYxM6SWHfvyFj62KwNzgels=
+github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a h1:bbPeKD0xmW/Y25WS6cokEszi5g+S0QxI/d45PkRi7Nk=
+github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM=
+github.com/jackc/pgx/v5 v5.5.4 h1:Xp2aQS8uXButQdnCMWNmvx6UysWQQC+u1EoizjguY+8=
+github.com/jackc/pgx/v5 v5.5.4/go.mod h1:ez9gk+OAat140fv9ErkZDYFWmXLfV+++K0uAOiwgm1A=
+github.com/jackc/puddle/v2 v2.2.1 h1:RhxXJtFG022u4ibrCSMSiu5aOq1i77R3OHKNJj77OAk=
+github.com/jackc/puddle/v2 v2.2.1/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4=
 github.com/klauspost/compress v1.13.4/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg=
 github.com/klauspost/compress v1.13.4/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg=
 github.com/klauspost/compress v1.13.5/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
 github.com/klauspost/compress v1.13.5/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
 github.com/klauspost/compress v1.17.0 h1:Rnbp4K9EjcDuVuHtd0dgA4qNuv9yKDYKK1ulpJwgrqM=
 github.com/klauspost/compress v1.17.0 h1:Rnbp4K9EjcDuVuHtd0dgA4qNuv9yKDYKK1ulpJwgrqM=
@@ -36,8 +36,8 @@ github.com/rivo/uniseg v0.2.0 h1:S1pD9weZBuJdFmowNwbpi7BJ8TNftyUImj/0WQi72jY=
 github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
 github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
 github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
 github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
 github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
 github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
-github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
-github.com/stretchr/testify v1.8.0 h1:pSgiaMZlXftHpm5L7V1+rVB+AZJydKsMxsQBIJw4PKk=
+github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
+github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk=
 github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw=
 github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw=
 github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc=
 github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc=
 github.com/valyala/fasthttp v1.30.0/go.mod h1:2rsYD01CKFrjjsvFxx75KlEUNpWNBY9JWD3K/7o2Cus=
 github.com/valyala/fasthttp v1.30.0/go.mod h1:2rsYD01CKFrjjsvFxx75KlEUNpWNBY9JWD3K/7o2Cus=
@@ -47,8 +47,6 @@ github.com/valyala/quicktemplate v1.7.0 h1:LUPTJmlVcb46OOUY3IeD9DojFpAVbsG+5WFTc
 github.com/valyala/quicktemplate v1.7.0/go.mod h1:sqKJnoaOF88V07vkO+9FL8fb9uZg/VPSJnLYn+LmLk8=
 github.com/valyala/quicktemplate v1.7.0/go.mod h1:sqKJnoaOF88V07vkO+9FL8fb9uZg/VPSJnLYn+LmLk8=
 github.com/valyala/tcplisten v1.0.0 h1:rBHj/Xf+E1tRGZyWIWwJDiRY0zc1Js+CV5DqwacVSA8=
 github.com/valyala/tcplisten v1.0.0 h1:rBHj/Xf+E1tRGZyWIWwJDiRY0zc1Js+CV5DqwacVSA8=
 github.com/valyala/tcplisten v1.0.0/go.mod h1:T0xQ8SeCZGxckz9qRXTfG43PvQ/mcWh7FwZEA7Ioqkc=
 github.com/valyala/tcplisten v1.0.0/go.mod h1:T0xQ8SeCZGxckz9qRXTfG43PvQ/mcWh7FwZEA7Ioqkc=
-go.uber.org/atomic v1.10.0 h1:9qC72Qh0+3MqyJbAn8YU5xVq1frD8bn3JtD2oXtafVQ=
-go.uber.org/atomic v1.10.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0=
 golang.org/x/crypto v0.0.0-20210513164829-c07d793c2f9a/go.mod h1:P+XmwS30IXTQdn5tA2iutPOUgjI07+tq3H3K9MVA1s8=
 golang.org/x/crypto v0.0.0-20210513164829-c07d793c2f9a/go.mod h1:P+XmwS30IXTQdn5tA2iutPOUgjI07+tq3H3K9MVA1s8=
 golang.org/x/crypto v0.17.0 h1:r8bRNjWL3GshPW3gkd+RpvzWrZAwPS49OmTGZ/uhM4k=
 golang.org/x/crypto v0.17.0 h1:r8bRNjWL3GshPW3gkd+RpvzWrZAwPS49OmTGZ/uhM4k=
 golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4=
 golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4=
@@ -70,5 +68,5 @@ golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ=
 golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
 golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
 golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
 golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
 gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
 gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
-gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
 gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
 gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=

+ 1 - 1
frameworks/Java/inverno/pom.xml

@@ -6,7 +6,7 @@
 	<parent>
 	<parent>
 		<groupId>io.inverno.dist</groupId>
 		<groupId>io.inverno.dist</groupId>
 		<artifactId>inverno-parent</artifactId>
 		<artifactId>inverno-parent</artifactId>
-		<version>1.6.2</version>
+		<version>1.7.0</version>
 	</parent>
 	</parent>
 	<groupId>com.techempower</groupId>
 	<groupId>com.techempower</groupId>
 	<artifactId>inverno-benchmark</artifactId>
 	<artifactId>inverno-benchmark</artifactId>

+ 1 - 1
frameworks/Java/jooby/jooby-jetty.dockerfile

@@ -1,4 +1,4 @@
-FROM maven:3.9.0-eclipse-temurin-17
+FROM maven:3.9.6-eclipse-temurin-21-jammy
 WORKDIR /jooby
 WORKDIR /jooby
 COPY pom.xml pom.xml
 COPY pom.xml pom.xml
 COPY src src
 COPY src src

+ 1 - 1
frameworks/Java/jooby/jooby-mvc.dockerfile

@@ -1,4 +1,4 @@
-FROM maven:3.9.0-eclipse-temurin-17
+FROM maven:3.9.6-eclipse-temurin-21-jammy
 WORKDIR /jooby
 WORKDIR /jooby
 COPY pom.xml pom.xml
 COPY pom.xml pom.xml
 COPY src src
 COPY src src

+ 1 - 1
frameworks/Java/jooby/jooby-netty.dockerfile

@@ -1,4 +1,4 @@
-FROM maven:3.9.0-eclipse-temurin-17
+FROM maven:3.9.6-eclipse-temurin-21-jammy
 WORKDIR /jooby
 WORKDIR /jooby
 COPY pom.xml pom.xml
 COPY pom.xml pom.xml
 COPY src src
 COPY src src

+ 1 - 1
frameworks/Java/jooby/jooby-pgclient.dockerfile

@@ -1,4 +1,4 @@
-FROM maven:3.9.0-eclipse-temurin-17
+FROM maven:3.9.6-eclipse-temurin-21-jammy
 WORKDIR /jooby
 WORKDIR /jooby
 COPY pom.xml pom.xml
 COPY pom.xml pom.xml
 COPY src src
 COPY src src

+ 1 - 1
frameworks/Java/jooby/jooby.dockerfile

@@ -1,4 +1,4 @@
-FROM maven:3.9.0-eclipse-temurin-17
+FROM maven:3.9.6-eclipse-temurin-21-jammy
 WORKDIR /jooby
 WORKDIR /jooby
 COPY pom.xml pom.xml
 COPY pom.xml pom.xml
 COPY src src
 COPY src src

+ 12 - 12
frameworks/Java/jooby/pom.xml

@@ -11,12 +11,12 @@
   <name>jooby</name>
   <name>jooby</name>
 
 
   <properties>
   <properties>
-    <jooby.version>3.0.5</jooby.version>
-    <dsl-json.version>1.10.0</dsl-json.version>
+    <jooby.version>3.0.8</jooby.version>
+    <dsl-json.version>2.0.2</dsl-json.version>
     <postgresql.version>42.7.2</postgresql.version>
     <postgresql.version>42.7.2</postgresql.version>
     <project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
     <project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
-    <maven.compiler.source>17</maven.compiler.source>
-    <maven.compiler.target>17</maven.compiler.target>
+    <maven.compiler.source>21</maven.compiler.source>
+    <maven.compiler.target>21</maven.compiler.target>
 
 
     <!-- Startup class -->
     <!-- Startup class -->
     <application.class>com.techempower.App</application.class>
     <application.class>com.techempower.App</application.class>
@@ -41,7 +41,7 @@
     <dependency>
     <dependency>
       <groupId>com.mysql</groupId>
       <groupId>com.mysql</groupId>
       <artifactId>mysql-connector-j</artifactId>
       <artifactId>mysql-connector-j</artifactId>
-      <version>8.0.33</version>
+      <version>8.3.0</version>
     </dependency>
     </dependency>
 
 
 
 
@@ -55,13 +55,13 @@
     <dependency>
     <dependency>
       <groupId>io.vertx</groupId>
       <groupId>io.vertx</groupId>
       <artifactId>vertx-pg-client</artifactId>
       <artifactId>vertx-pg-client</artifactId>
-      <version>4.4.4</version>
+      <version>4.5.4</version>
     </dependency>
     </dependency>
 
 
     <!-- json -->
     <!-- json -->
     <dependency>
     <dependency>
       <groupId>com.dslplatform</groupId>
       <groupId>com.dslplatform</groupId>
-      <artifactId>dsl-json-java8</artifactId>
+      <artifactId>dsl-json</artifactId>
       <version>${dsl-json.version}</version>
       <version>${dsl-json.version}</version>
     </dependency>
     </dependency>
   </dependencies>
   </dependencies>
@@ -72,7 +72,7 @@
       <plugin>
       <plugin>
         <groupId>org.codehaus.mojo</groupId>
         <groupId>org.codehaus.mojo</groupId>
         <artifactId>build-helper-maven-plugin</artifactId>
         <artifactId>build-helper-maven-plugin</artifactId>
-        <version>3.3.0</version>
+        <version>3.5.0</version>
         <executions>
         <executions>
           <execution>
           <execution>
             <id>add-source</id>
             <id>add-source</id>
@@ -91,7 +91,7 @@
       <plugin>
       <plugin>
         <groupId>com.fizzed</groupId>
         <groupId>com.fizzed</groupId>
         <artifactId>rocker-maven-plugin</artifactId>
         <artifactId>rocker-maven-plugin</artifactId>
-        <version>1.3.0</version>
+        <version>1.4.0</version>
         <executions>
         <executions>
           <execution>
           <execution>
             <id>generate-rocker-templates</id>
             <id>generate-rocker-templates</id>
@@ -110,7 +110,7 @@
       <plugin>
       <plugin>
         <groupId>org.apache.maven.plugins</groupId>
         <groupId>org.apache.maven.plugins</groupId>
         <artifactId>maven-compiler-plugin</artifactId>
         <artifactId>maven-compiler-plugin</artifactId>
-        <version>3.11.0</version>
+        <version>3.12.1</version>
         <configuration>
         <configuration>
           <annotationProcessorPaths>
           <annotationProcessorPaths>
             <path>
             <path>
@@ -120,7 +120,7 @@
             </path>
             </path>
             <path>
             <path>
               <groupId>com.dslplatform</groupId>
               <groupId>com.dslplatform</groupId>
-              <artifactId>dsl-json-java8</artifactId>
+              <artifactId>dsl-json</artifactId>
               <version>${dsl-json.version}</version>
               <version>${dsl-json.version}</version>
             </path>
             </path>
           </annotationProcessorPaths>
           </annotationProcessorPaths>
@@ -130,7 +130,7 @@
       <plugin>
       <plugin>
         <groupId>org.apache.maven.plugins</groupId>
         <groupId>org.apache.maven.plugins</groupId>
         <artifactId>maven-shade-plugin</artifactId>
         <artifactId>maven-shade-plugin</artifactId>
-        <version>3.4.1</version>
+        <version>3.5.2</version>
         <executions>
         <executions>
           <execution>
           <execution>
             <id>uber-jar</id>
             <id>uber-jar</id>

+ 1 - 1
frameworks/Java/netty/pom.xml

@@ -11,7 +11,7 @@
 	<properties>
 	<properties>
 		<maven.compiler.source>11</maven.compiler.source>
 		<maven.compiler.source>11</maven.compiler.source>
 		<maven.compiler.target>11</maven.compiler.target>
 		<maven.compiler.target>11</maven.compiler.target>
-		<netty.version>4.1.92.Final</netty.version>
+		<netty.version>4.1.108.Final</netty.version>
 		<io_uring.version>0.0.21.Final</io_uring.version>
 		<io_uring.version>0.0.21.Final</io_uring.version>
 	</properties>
 	</properties>
 
 

+ 48 - 0
frameworks/JavaScript/mesh/README.md

@@ -0,0 +1,48 @@
+# Mesh Benchmarking Test
+
+This is the [`Mesh`](https://github.com/ionited/mesh) portion of a [benchmarking test suite](../) comparing a variety of web development platforms.
+
+Information about Mesh can be found at https://github.com/ionited/mesh
+
+## Database Drivers
+
+There are individual handlers for each DB approach. The logic for each of them are found here:
+
+* [MySQL](drivers/mysql.js)
+* [MongoDB](drivers/mongodb.js)
+* [PostgreSQL](drivers/postgres.js)
+
+There are **no database endpoints** or drivers attached by default.<br>
+To initialize the application with one of these, run any _one_ of the following commands:
+
+```sh
+$ DATABASE=mysql node app.js
+$ DATABASE=mongodb node app.js
+$ DATABASE=postgres node app.js
+```
+
+## Test Endpoints
+
+> Visit the test requirements [here](https://github.com/TechEmpower/FrameworkBenchmarks/wiki/Project-Information-Framework-Tests-Overview)
+
+```sh
+$ curl localhost:8080/json
+$ curl localhost:8080/plaintext
+
+# The following are only available w/ DATABASE
+
+$ curl localhost:8080/db
+$ curl localhost:8080/fortunes
+
+$ curl localhost:8080/updates?queries=
+$ curl localhost:8080/updates?queries=2
+$ curl localhost:8080/updates?queries=1000
+$ curl localhost:8080/updates?queries=foo
+$ curl localhost:8080/updates?queries=0
+
+$ curl localhost:8080/queries?queries=
+$ curl localhost:8080/queries?queries=2
+$ curl localhost:8080/queries?queries=1000
+$ curl localhost:8080/queries?queries=foo
+$ curl localhost:8080/queries?queries=0
+```

+ 18 - 0
frameworks/JavaScript/mesh/app.js

@@ -0,0 +1,18 @@
+const cluster = require("cluster");
+const numCPUs = require("os").cpus().length;
+
+if (cluster.isPrimary) {
+  console.log(`Primary ${process.pid} is running`);
+
+  // Fork workers.
+  for (let i = 0; i < numCPUs; i++) {
+    cluster.fork();
+  }
+
+  cluster.on('exit', (worker, code, signal) => {
+    console.log(`worker ${worker.process.pid} died`);
+  });
+} else {
+  // worker task
+  require("./server");
+}

+ 92 - 0
frameworks/JavaScript/mesh/benchmark_config.json

@@ -0,0 +1,92 @@
+{
+  "framework": "mesh",
+  "tests": [
+    {
+      "default": {
+        "json_url": "/json",
+        "plaintext_url": "/plaintext",
+        "port": 8080,
+        "approach": "Realistic",
+        "classification": "Platform",
+        "database": "None",
+        "framework": "mesh",
+        "language": "JavaScript",
+        "flavor": "NodeJS",
+        "orm": "Raw",
+        "platform": "nodejs",
+        "webserver": "None",
+        "os": "Linux",
+        "database_os": "Linux",
+        "display_name": "Mesh",
+        "notes": "",
+        "versus": "nodejs"
+      },
+      "mysql": {
+        "dockerfile": "mesh-mysql.dockerfile",
+        "db_url": "/db",
+        "query_url": "/queries?queries=",
+        "fortune_url": "/fortunes",
+        "update_url": "/updates?queries=",
+        "port": 8080,
+        "approach": "Realistic",
+        "classification": "Platform",
+        "database": "MySQL",
+        "framework": "mesh",
+        "language": "JavaScript",
+        "flavor": "NodeJS",
+        "orm": "Raw",
+        "platform": "nodejs",
+        "webserver": "None",
+        "os": "Linux",
+        "database_os": "Linux",
+        "display_name": "Mesh",
+        "notes": "",
+        "versus": "nodejs"
+      },
+      "postgres": {
+        "dockerfile": "mesh-postgres.dockerfile",
+        "db_url": "/db",
+        "query_url": "/queries?queries=",
+        "fortune_url": "/fortunes",
+        "update_url": "/updates?queries=",
+        "port": 8080,
+        "approach": "Realistic",
+        "classification": "Platform",
+        "database": "Postgres",
+        "framework": "mesh",
+        "language": "JavaScript",
+        "flavor": "NodeJS",
+        "orm": "Raw",
+        "platform": "None",
+        "webserver": "None",
+        "os": "Linux",
+        "database_os": "Linux",
+        "display_name": "Mesh",
+        "notes": "",
+        "versus": "nodejs"
+      },
+      "mongodb": {
+        "dockerfile": "mesh-mongodb.dockerfile",
+        "db_url": "/db",
+        "query_url": "/queries?queries=",
+        "fortune_url": "/fortunes",
+        "update_url": "/updates?queries=",
+        "port": 8080,
+        "approach": "Realistic",
+        "classification": "Platform",
+        "database": "MongoDB",
+        "framework": "mesh",
+        "language": "JavaScript",
+        "flavor": "NodeJS",
+        "orm": "Raw",
+        "platform": "None",
+        "webserver": "None",
+        "os": "Linux",
+        "database_os": "Linux",
+        "display_name": "Mesh",
+        "notes": "",
+        "versus": "nodejs"
+      }
+    }
+  ]
+}

+ 16 - 0
frameworks/JavaScript/mesh/drivers/mongodb.js

@@ -0,0 +1,16 @@
+const { MongoClient } = require('mongodb');
+
+let World, Fortune;
+const projection = { _id:0 };
+
+MongoClient.connect('mongodb://tfb-database:27017', { useNewUrlParser:true }, (err, ctx) => {
+	const DB = ctx.db('hello_world');
+	Fortune = DB.collection('fortune');
+	World = DB.collection('world');
+});
+
+exports.fortunes = () => Fortune.find({}, { projection }).toArray();
+
+exports.find = id => World.findOne({ id }, { projection });
+
+exports.update = obj => World.replaceOne({ id:obj.id }, obj);

+ 24 - 0
frameworks/JavaScript/mesh/drivers/mysql.js

@@ -0,0 +1,24 @@
+const { createConnection } = require('mysql');
+
+const connection = createConnection({
+	host: 'tfb-database',
+	user: 'benchmarkdbuser',
+	password: 'benchmarkdbpass',
+	database: 'hello_world'
+});
+
+connection.connect();
+
+function query(text, values) {
+	return new Promise((res, rej) => {
+		connection.query(text, values || [], (err, results) => {
+			return err ? rej(err) : res(results);
+		});
+	});
+}
+
+exports.fortunes = () => query('SELECT * FROM fortune');
+
+exports.find = id => query('SELECT * FROM world WHERE id = ?', [id]).then(arr => arr[0]);
+
+exports.update = obj => query('UPDATE world SET randomNumber = ? WHERE id = ?', [obj.randomNumber, obj.id]);

+ 20 - 0
frameworks/JavaScript/mesh/drivers/postgres.js

@@ -0,0 +1,20 @@
+const { Client } = require('pg');
+
+const client = new Client({
+	host: 'tfb-database',
+  user: 'benchmarkdbuser',
+  password: 'benchmarkdbpass',
+  database: 'hello_world'
+});
+
+client.connect();
+
+function query(text, values) {
+	return client.query(text, values || []).then(r => r.rows);
+}
+
+exports.fortunes = () => query('SELECT * FROM fortune');
+
+exports.find = id => query('SELECT * FROM world WHERE id = $1', [id]).then(arr => arr[0]);
+
+exports.update = obj => query('UPDATE world SET randomNumber = $1 WHERE id = $2', [obj.randomNumber, obj.id]);

+ 12 - 0
frameworks/JavaScript/mesh/mesh-mongodb.dockerfile

@@ -0,0 +1,12 @@
+FROM node:20-slim
+
+COPY ./ ./
+
+RUN npm install
+
+ENV NODE_ENV production
+ENV DATABASE mongodb
+
+EXPOSE 8080
+
+CMD ["node", "app.js"]

+ 12 - 0
frameworks/JavaScript/mesh/mesh-mysql.dockerfile

@@ -0,0 +1,12 @@
+FROM node:20-slim
+
+COPY ./ ./
+
+RUN npm install
+
+ENV NODE_ENV production
+ENV DATABASE mysql
+
+EXPOSE 8080
+
+CMD ["node", "app.js"]

+ 12 - 0
frameworks/JavaScript/mesh/mesh-postgres.dockerfile

@@ -0,0 +1,12 @@
+FROM node:20-slim
+
+COPY ./ ./
+
+RUN npm install
+
+ENV NODE_ENV production
+ENV DATABASE postgres
+
+EXPOSE 8080
+
+CMD ["node", "app.js"]

+ 11 - 0
frameworks/JavaScript/mesh/mesh.dockerfile

@@ -0,0 +1,11 @@
+FROM node:20-slim
+
+COPY ./ ./
+
+RUN npm install
+
+ENV NODE_ENV production
+
+EXPOSE 8080
+
+CMD ["node", "app.js"]

+ 369 - 0
frameworks/JavaScript/mesh/package-lock.json

@@ -0,0 +1,369 @@
+{
+  "name": "mesh",
+  "version": "0.0.1",
+  "lockfileVersion": 3,
+  "requires": true,
+  "packages": {
+    "": {
+      "name": "mesh",
+      "version": "0.0.1",
+      "dependencies": {
+        "@ionited/mesh": "^0.6.0",
+        "mongodb": "^3.7.4",
+        "mysql": "^2.18.1",
+        "pg": "^8.11.3"
+      }
+    },
+    "node_modules/@ionited/mesh": {
+      "version": "0.6.0",
+      "resolved": "https://registry.npmjs.org/@ionited/mesh/-/mesh-0.6.0.tgz",
+      "integrity": "sha512-SnRY0JML4Sa7WZ3J6hFibti/euRONuagTOE0QnEbwsMTzPFzVDl1fdQTO4NZKcDPVyBC74XdEUTw9+AEz+ZFlA==",
+      "dependencies": {
+        "uWebSockets.js": "github:uNetworking/uWebSockets.js#v20.43.0"
+      }
+    },
+    "node_modules/bignumber.js": {
+      "version": "9.0.0",
+      "resolved": "https://registry.npmjs.org/bignumber.js/-/bignumber.js-9.0.0.tgz",
+      "integrity": "sha512-t/OYhhJ2SD+YGBQcjY8GzzDHEk9f3nerxjtfa6tlMXfe7frs/WozhvCNoGvpM0P3bNf3Gq5ZRMlGr5f3r4/N8A==",
+      "engines": {
+        "node": "*"
+      }
+    },
+    "node_modules/bl": {
+      "version": "2.2.1",
+      "resolved": "https://registry.npmjs.org/bl/-/bl-2.2.1.tgz",
+      "integrity": "sha512-6Pesp1w0DEX1N550i/uGV/TqucVL4AM/pgThFSN/Qq9si1/DF9aIHs1BxD8V/QU0HoeHO6cQRTAuYnLPKq1e4g==",
+      "dependencies": {
+        "readable-stream": "^2.3.5",
+        "safe-buffer": "^5.1.1"
+      }
+    },
+    "node_modules/bson": {
+      "version": "1.1.6",
+      "resolved": "https://registry.npmjs.org/bson/-/bson-1.1.6.tgz",
+      "integrity": "sha512-EvVNVeGo4tHxwi8L6bPj3y3itEvStdwvvlojVxxbyYfoaxJ6keLgrTuKdyfEAszFK+H3olzBuafE0yoh0D1gdg==",
+      "engines": {
+        "node": ">=0.6.19"
+      }
+    },
+    "node_modules/buffer-writer": {
+      "version": "2.0.0",
+      "resolved": "https://registry.npmjs.org/buffer-writer/-/buffer-writer-2.0.0.tgz",
+      "integrity": "sha512-a7ZpuTZU1TRtnwyCNW3I5dc0wWNC3VR9S++Ewyk2HHZdrO3CQJqSpd+95Us590V6AL7JqUAH2IwZ/398PmNFgw==",
+      "engines": {
+        "node": ">=4"
+      }
+    },
+    "node_modules/core-util-is": {
+      "version": "1.0.3",
+      "resolved": "https://registry.npmjs.org/core-util-is/-/core-util-is-1.0.3.tgz",
+      "integrity": "sha512-ZQBvi1DcpJ4GDqanjucZ2Hj3wEO5pZDS89BWbkcrvdxksJorwUDDZamX9ldFkp9aw2lmBDLgkObEA4DWNJ9FYQ=="
+    },
+    "node_modules/denque": {
+      "version": "1.5.1",
+      "resolved": "https://registry.npmjs.org/denque/-/denque-1.5.1.tgz",
+      "integrity": "sha512-XwE+iZ4D6ZUB7mfYRMb5wByE8L74HCn30FBN7sWnXksWc1LO1bPDl67pBR9o/kC4z/xSNAwkMYcGgqDV3BE3Hw==",
+      "engines": {
+        "node": ">=0.10"
+      }
+    },
+    "node_modules/inherits": {
+      "version": "2.0.4",
+      "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.4.tgz",
+      "integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ=="
+    },
+    "node_modules/isarray": {
+      "version": "1.0.0",
+      "resolved": "https://registry.npmjs.org/isarray/-/isarray-1.0.0.tgz",
+      "integrity": "sha512-VLghIWNM6ELQzo7zwmcg0NmTVyWKYjvIeM83yjp0wRDTmUnrM678fQbcKBo6n2CJEF0szoG//ytg+TKla89ALQ=="
+    },
+    "node_modules/memory-pager": {
+      "version": "1.5.0",
+      "resolved": "https://registry.npmjs.org/memory-pager/-/memory-pager-1.5.0.tgz",
+      "integrity": "sha512-ZS4Bp4r/Zoeq6+NLJpP+0Zzm0pR8whtGPf1XExKLJBAczGMnSi3It14OiNCStjQjM6NU1okjQGSxgEZN8eBYKg==",
+      "optional": true
+    },
+    "node_modules/mongodb": {
+      "version": "3.7.4",
+      "resolved": "https://registry.npmjs.org/mongodb/-/mongodb-3.7.4.tgz",
+      "integrity": "sha512-K5q8aBqEXMwWdVNh94UQTwZ6BejVbFhh1uB6c5FKtPE9eUMZPUO3sRZdgIEcHSrAWmxzpG/FeODDKL388sqRmw==",
+      "dependencies": {
+        "bl": "^2.2.1",
+        "bson": "^1.1.4",
+        "denque": "^1.4.1",
+        "optional-require": "^1.1.8",
+        "safe-buffer": "^5.1.2"
+      },
+      "engines": {
+        "node": ">=4"
+      },
+      "optionalDependencies": {
+        "saslprep": "^1.0.0"
+      },
+      "peerDependenciesMeta": {
+        "aws4": {
+          "optional": true
+        },
+        "bson-ext": {
+          "optional": true
+        },
+        "kerberos": {
+          "optional": true
+        },
+        "mongodb-client-encryption": {
+          "optional": true
+        },
+        "mongodb-extjson": {
+          "optional": true
+        },
+        "snappy": {
+          "optional": true
+        }
+      }
+    },
+    "node_modules/mysql": {
+      "version": "2.18.1",
+      "resolved": "https://registry.npmjs.org/mysql/-/mysql-2.18.1.tgz",
+      "integrity": "sha512-Bca+gk2YWmqp2Uf6k5NFEurwY/0td0cpebAucFpY/3jhrwrVGuxU2uQFCHjU19SJfje0yQvi+rVWdq78hR5lig==",
+      "dependencies": {
+        "bignumber.js": "9.0.0",
+        "readable-stream": "2.3.7",
+        "safe-buffer": "5.1.2",
+        "sqlstring": "2.3.1"
+      },
+      "engines": {
+        "node": ">= 0.6"
+      }
+    },
+    "node_modules/mysql/node_modules/sqlstring": {
+      "version": "2.3.1",
+      "resolved": "https://registry.npmjs.org/sqlstring/-/sqlstring-2.3.1.tgz",
+      "integrity": "sha512-ooAzh/7dxIG5+uDik1z/Rd1vli0+38izZhGzSa34FwR7IbelPWCCKSNIl8jlL/F7ERvy8CB2jNeM1E9i9mXMAQ==",
+      "engines": {
+        "node": ">= 0.6"
+      }
+    },
+    "node_modules/optional-require": {
+      "version": "1.1.8",
+      "resolved": "https://registry.npmjs.org/optional-require/-/optional-require-1.1.8.tgz",
+      "integrity": "sha512-jq83qaUb0wNg9Krv1c5OQ+58EK+vHde6aBPzLvPPqJm89UQWsvSuFy9X/OSNJnFeSOKo7btE0n8Nl2+nE+z5nA==",
+      "dependencies": {
+        "require-at": "^1.0.6"
+      },
+      "engines": {
+        "node": ">=4"
+      }
+    },
+    "node_modules/packet-reader": {
+      "version": "1.0.0",
+      "resolved": "https://registry.npmjs.org/packet-reader/-/packet-reader-1.0.0.tgz",
+      "integrity": "sha512-HAKu/fG3HpHFO0AA8WE8q2g+gBJaZ9MG7fcKk+IJPLTGAD6Psw4443l+9DGRbOIh3/aXr7Phy0TjilYivJo5XQ=="
+    },
+    "node_modules/pg": {
+      "version": "8.11.3",
+      "resolved": "https://registry.npmjs.org/pg/-/pg-8.11.3.tgz",
+      "integrity": "sha512-+9iuvG8QfaaUrrph+kpF24cXkH1YOOUeArRNYIxq1viYHZagBxrTno7cecY1Fa44tJeZvaoG+Djpkc3JwehN5g==",
+      "dependencies": {
+        "buffer-writer": "2.0.0",
+        "packet-reader": "1.0.0",
+        "pg-connection-string": "^2.6.2",
+        "pg-pool": "^3.6.1",
+        "pg-protocol": "^1.6.0",
+        "pg-types": "^2.1.0",
+        "pgpass": "1.x"
+      },
+      "engines": {
+        "node": ">= 8.0.0"
+      },
+      "optionalDependencies": {
+        "pg-cloudflare": "^1.1.1"
+      },
+      "peerDependencies": {
+        "pg-native": ">=3.0.1"
+      },
+      "peerDependenciesMeta": {
+        "pg-native": {
+          "optional": true
+        }
+      }
+    },
+    "node_modules/pg-cloudflare": {
+      "version": "1.1.1",
+      "resolved": "https://registry.npmjs.org/pg-cloudflare/-/pg-cloudflare-1.1.1.tgz",
+      "integrity": "sha512-xWPagP/4B6BgFO+EKz3JONXv3YDgvkbVrGw2mTo3D6tVDQRh1e7cqVGvyR3BE+eQgAvx1XhW/iEASj4/jCWl3Q==",
+      "optional": true
+    },
+    "node_modules/pg-connection-string": {
+      "version": "2.6.2",
+      "resolved": "https://registry.npmjs.org/pg-connection-string/-/pg-connection-string-2.6.2.tgz",
+      "integrity": "sha512-ch6OwaeaPYcova4kKZ15sbJ2hKb/VP48ZD2gE7i1J+L4MspCtBMAx8nMgz7bksc7IojCIIWuEhHibSMFH8m8oA=="
+    },
+    "node_modules/pg-int8": {
+      "version": "1.0.1",
+      "resolved": "https://registry.npmjs.org/pg-int8/-/pg-int8-1.0.1.tgz",
+      "integrity": "sha512-WCtabS6t3c8SkpDBUlb1kjOs7l66xsGdKpIPZsg4wR+B3+u9UAum2odSsF9tnvxg80h4ZxLWMy4pRjOsFIqQpw==",
+      "engines": {
+        "node": ">=4.0.0"
+      }
+    },
+    "node_modules/pg-pool": {
+      "version": "3.6.1",
+      "resolved": "https://registry.npmjs.org/pg-pool/-/pg-pool-3.6.1.tgz",
+      "integrity": "sha512-jizsIzhkIitxCGfPRzJn1ZdcosIt3pz9Sh3V01fm1vZnbnCMgmGl5wvGGdNN2EL9Rmb0EcFoCkixH4Pu+sP9Og==",
+      "peerDependencies": {
+        "pg": ">=8.0"
+      }
+    },
+    "node_modules/pg-protocol": {
+      "version": "1.6.0",
+      "resolved": "https://registry.npmjs.org/pg-protocol/-/pg-protocol-1.6.0.tgz",
+      "integrity": "sha512-M+PDm637OY5WM307051+bsDia5Xej6d9IR4GwJse1qA1DIhiKlksvrneZOYQq42OM+spubpcNYEo2FcKQrDk+Q=="
+    },
+    "node_modules/pg-types": {
+      "version": "2.2.0",
+      "resolved": "https://registry.npmjs.org/pg-types/-/pg-types-2.2.0.tgz",
+      "integrity": "sha512-qTAAlrEsl8s4OiEQY69wDvcMIdQN6wdz5ojQiOy6YRMuynxenON0O5oCpJI6lshc6scgAY8qvJ2On/p+CXY0GA==",
+      "dependencies": {
+        "pg-int8": "1.0.1",
+        "postgres-array": "~2.0.0",
+        "postgres-bytea": "~1.0.0",
+        "postgres-date": "~1.0.4",
+        "postgres-interval": "^1.1.0"
+      },
+      "engines": {
+        "node": ">=4"
+      }
+    },
+    "node_modules/pgpass": {
+      "version": "1.0.5",
+      "resolved": "https://registry.npmjs.org/pgpass/-/pgpass-1.0.5.tgz",
+      "integrity": "sha512-FdW9r/jQZhSeohs1Z3sI1yxFQNFvMcnmfuj4WBMUTxOrAyLMaTcE1aAMBiTlbMNaXvBCQuVi0R7hd8udDSP7ug==",
+      "dependencies": {
+        "split2": "^4.1.0"
+      }
+    },
+    "node_modules/postgres-array": {
+      "version": "2.0.0",
+      "resolved": "https://registry.npmjs.org/postgres-array/-/postgres-array-2.0.0.tgz",
+      "integrity": "sha512-VpZrUqU5A69eQyW2c5CA1jtLecCsN2U/bD6VilrFDWq5+5UIEVO7nazS3TEcHf1zuPYO/sqGvUvW62g86RXZuA==",
+      "engines": {
+        "node": ">=4"
+      }
+    },
+    "node_modules/postgres-bytea": {
+      "version": "1.0.0",
+      "resolved": "https://registry.npmjs.org/postgres-bytea/-/postgres-bytea-1.0.0.tgz",
+      "integrity": "sha512-xy3pmLuQqRBZBXDULy7KbaitYqLcmxigw14Q5sj8QBVLqEwXfeybIKVWiqAXTlcvdvb0+xkOtDbfQMOf4lST1w==",
+      "engines": {
+        "node": ">=0.10.0"
+      }
+    },
+    "node_modules/postgres-date": {
+      "version": "1.0.7",
+      "resolved": "https://registry.npmjs.org/postgres-date/-/postgres-date-1.0.7.tgz",
+      "integrity": "sha512-suDmjLVQg78nMK2UZ454hAG+OAW+HQPZ6n++TNDUX+L0+uUlLywnoxJKDou51Zm+zTCjrCl0Nq6J9C5hP9vK/Q==",
+      "engines": {
+        "node": ">=0.10.0"
+      }
+    },
+    "node_modules/postgres-interval": {
+      "version": "1.2.0",
+      "resolved": "https://registry.npmjs.org/postgres-interval/-/postgres-interval-1.2.0.tgz",
+      "integrity": "sha512-9ZhXKM/rw350N1ovuWHbGxnGh/SNJ4cnxHiM0rxE4VN41wsg8P8zWn9hv/buK00RP4WvlOyr/RBDiptyxVbkZQ==",
+      "dependencies": {
+        "xtend": "^4.0.0"
+      },
+      "engines": {
+        "node": ">=0.10.0"
+      }
+    },
+    "node_modules/process-nextick-args": {
+      "version": "2.0.1",
+      "resolved": "https://registry.npmjs.org/process-nextick-args/-/process-nextick-args-2.0.1.tgz",
+      "integrity": "sha512-3ouUOpQhtgrbOa17J7+uxOTpITYWaGP7/AhoR3+A+/1e9skrzelGi/dXzEYyvbxubEF6Wn2ypscTKiKJFFn1ag=="
+    },
+    "node_modules/readable-stream": {
+      "version": "2.3.7",
+      "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-2.3.7.tgz",
+      "integrity": "sha512-Ebho8K4jIbHAxnuxi7o42OrZgF/ZTNcsZj6nRKyUmkhLFq8CHItp/fy6hQZuZmP/n3yZ9VBUbp4zz/mX8hmYPw==",
+      "dependencies": {
+        "core-util-is": "~1.0.0",
+        "inherits": "~2.0.3",
+        "isarray": "~1.0.0",
+        "process-nextick-args": "~2.0.0",
+        "safe-buffer": "~5.1.1",
+        "string_decoder": "~1.1.1",
+        "util-deprecate": "~1.0.1"
+      }
+    },
+    "node_modules/require-at": {
+      "version": "1.0.6",
+      "resolved": "https://registry.npmjs.org/require-at/-/require-at-1.0.6.tgz",
+      "integrity": "sha512-7i1auJbMUrXEAZCOQ0VNJgmcT2VOKPRl2YGJwgpHpC9CE91Mv4/4UYIUm4chGJaI381ZDq1JUicFii64Hapd8g==",
+      "engines": {
+        "node": ">=4"
+      }
+    },
+    "node_modules/safe-buffer": {
+      "version": "5.1.2",
+      "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.1.2.tgz",
+      "integrity": "sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g=="
+    },
+    "node_modules/saslprep": {
+      "version": "1.0.3",
+      "resolved": "https://registry.npmjs.org/saslprep/-/saslprep-1.0.3.tgz",
+      "integrity": "sha512-/MY/PEMbk2SuY5sScONwhUDsV2p77Znkb/q3nSVstq/yQzYJOH/Azh29p9oJLsl3LnQwSvZDKagDGBsBwSooag==",
+      "optional": true,
+      "dependencies": {
+        "sparse-bitfield": "^3.0.3"
+      },
+      "engines": {
+        "node": ">=6"
+      }
+    },
+    "node_modules/sparse-bitfield": {
+      "version": "3.0.3",
+      "resolved": "https://registry.npmjs.org/sparse-bitfield/-/sparse-bitfield-3.0.3.tgz",
+      "integrity": "sha512-kvzhi7vqKTfkh0PZU+2D2PIllw2ymqJKujUcyPMd9Y75Nv4nPbGJZXNhxsgdQab2BmlDct1YnfQCguEvHr7VsQ==",
+      "optional": true,
+      "dependencies": {
+        "memory-pager": "^1.0.2"
+      }
+    },
+    "node_modules/split2": {
+      "version": "4.2.0",
+      "resolved": "https://registry.npmjs.org/split2/-/split2-4.2.0.tgz",
+      "integrity": "sha512-UcjcJOWknrNkF6PLX83qcHM6KHgVKNkV62Y8a5uYDVv9ydGQVwAHMKqHdJje1VTWpljG0WYpCDhrCdAOYH4TWg==",
+      "engines": {
+        "node": ">= 10.x"
+      }
+    },
+    "node_modules/string_decoder": {
+      "version": "1.1.1",
+      "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.1.1.tgz",
+      "integrity": "sha512-n/ShnvDi6FHbbVfviro+WojiFzv+s8MPMHBczVePfUpDJLwoLT0ht1l4YwBCbi8pJAveEEdnkHyPyTP/mzRfwg==",
+      "dependencies": {
+        "safe-buffer": "~5.1.0"
+      }
+    },
+    "node_modules/util-deprecate": {
+      "version": "1.0.2",
+      "resolved": "https://registry.npmjs.org/util-deprecate/-/util-deprecate-1.0.2.tgz",
+      "integrity": "sha512-EPD5q1uXyFxJpCrLnCc1nHnq3gOa6DZBocAIiI2TaSCA7VCJ1UJDMagCzIkXNsUYfD1daK//LTEQ8xiIbrHtcw=="
+    },
+    "node_modules/uWebSockets.js": {
+      "version": "20.43.0",
+      "resolved": "git+ssh://[email protected]/uNetworking/uWebSockets.js.git#1977b5039938ad863d42fc4958d48c17e5a1fa06",
+      "license": "Apache-2.0"
+    },
+    "node_modules/xtend": {
+      "version": "4.0.2",
+      "resolved": "https://registry.npmjs.org/xtend/-/xtend-4.0.2.tgz",
+      "integrity": "sha512-LKYU1iAXJXUgAXn9URjiu+MWhyUXHsvfp7mcuYm9dSUKK0/CjtrUwFAxD82/mCWbtLsGjFIad0wIsod4zrTAEQ==",
+      "engines": {
+        "node": ">=0.4"
+      }
+    }
+  }
+}

+ 13 - 0
frameworks/JavaScript/mesh/package.json

@@ -0,0 +1,13 @@
+{
+  "name": "mesh",
+  "version": "0.0.1",
+  "description": "Mesh tests for TechEmpower Framework Benchmarks.",
+  "main": "app.js",
+  "private": true,
+  "dependencies": {
+    "@ionited/mesh": "^0.6.0",
+    "mongodb": "^3.7.4",
+    "mysql": "^2.18.1",
+    "pg": "^8.11.3"
+  }
+}

+ 104 - 0
frameworks/JavaScript/mesh/server.js

@@ -0,0 +1,104 @@
+const db = process.env.DATABASE;
+
+const { App } = require('@ionited/mesh');
+
+const addHeaders = (res, contentType) => res.header('Content-Type', contentType).header('Server', 'Mesh');
+
+const escapeHTMLRules = {
+  "&": "&#38;",
+  "<": "&#60;",
+  ">": "&#62;",
+  '"': "&#34;",
+  "'": "&#39;",
+  "/": "&#47;",
+};
+
+const unsafeHTMLMatcher = /[&<>"'\/]/g;
+
+const escape = text => {
+  if (unsafeHTMLMatcher.test(text) === false) return text;
+  return text.replace(unsafeHTMLMatcher,  m => escapeHTMLRules[m] || m);
+}
+
+const random = () => Math.floor(Math.random() * 1e4) + 1;
+
+const app = new App();
+
+app
+
+.get('/json', (_, res) => {
+  addHeaders(res, 'application/json');
+
+  res.json({ message: 'Hello, World!' });
+})
+
+.get('/plaintext', (_, res) => {
+  addHeaders(res, 'text/plain');
+
+  res.send('Hello, World!');
+});
+
+if (db) {
+  const DRIVER = require(`./drivers/${db}`);
+
+  app
+  
+  .get('/db', async (_, res) => {
+    addHeaders(res, 'application/json');
+
+    res.json(await DRIVER.find(random()));
+  })
+  
+  .get('/queries', async (req, res) => {
+    const { queries } = req.query();
+
+    const count = Math.min(parseInt(queries) || 1, 500);
+
+    const arr = [];
+
+    for (let i = 0; i < count; i++) arr.push(await DRIVER.find(random()));
+
+    addHeaders(res, 'application/json');
+
+    res.json(arr);
+  })
+  
+  .get('/fortunes', async (_, res) => {
+    const items = [{
+      id: 0,
+      message: 'Additional fortune added at request time.'
+    }, ...await DRIVER.fortunes()].sort((a, b) => a.message.localeCompare(b.message));
+
+    let html = '<!DOCTYPE html><html><head><title>Fortunes</title></head><body><table><tr><th>id</th><th>message</th></tr>';
+
+    for (let i = 0; i < items.length; i++) html += `<tr><td>${items[i].id}</td><td>${escape(items[i].message)}</td></tr>`;
+
+    html += '</table></body></html>';
+
+    addHeaders(res, 'text/html; charset=utf-8');
+
+    res.send(html);
+  })
+  
+  .get('/updates', async (req, res) => {
+    const { queries } = req.query();
+
+    const count = Math.min(parseInt(queries) || 1, 500);
+
+    const arr = [];
+
+    for (let i = 0; i < count; i++) arr.push(await DRIVER.find(random()));
+
+    for (let i = 0; i < count; i++) {
+      arr[i].randomNumber = random();
+      
+      await DRIVER.update(arr[i]);
+    }
+
+    addHeaders(res, 'application/json');
+
+    res.json(arr);
+  });
+}
+
+app.listen(8080);

+ 2 - 2
frameworks/Kotlin/pellet/pellet.dockerfile

@@ -1,10 +1,10 @@
-FROM gradle:jdk18 as gradle
+FROM gradle:jdk21 as gradle
 WORKDIR /sample
 WORKDIR /sample
 COPY sample/build.gradle.kts build.gradle.kts
 COPY sample/build.gradle.kts build.gradle.kts
 COPY sample/src src
 COPY sample/src src
 RUN gradle clean shadowJar --no-daemon
 RUN gradle clean shadowJar --no-daemon
 
 
-FROM openjdk:18-jdk-buster
+FROM openjdk:21-jdk-buster
 WORKDIR /sample
 WORKDIR /sample
 COPY --from=gradle /sample/build/libs/sample-1.0.0-all.jar app.jar
 COPY --from=gradle /sample/build/libs/sample-1.0.0-all.jar app.jar
 
 

+ 9 - 13
frameworks/Kotlin/pellet/sample/build.gradle.kts

@@ -1,8 +1,8 @@
 plugins {
 plugins {
     application
     application
     id("com.github.johnrengelman.shadow") version "7.1.0"
     id("com.github.johnrengelman.shadow") version "7.1.0"
-    kotlin("jvm") version "1.7.10"
-    kotlin("plugin.serialization") version "1.7.10"
+    kotlin("jvm") version "1.9.23"
+    kotlin("plugin.serialization") version "1.9.23"
     id("nu.studer.rocker") version "3.0.4"
     id("nu.studer.rocker") version "3.0.4"
 }
 }
 
 
@@ -25,31 +25,27 @@ rocker {
 }
 }
 
 
 dependencies {
 dependencies {
-    implementation(platform("dev.pellet:pellet-bom:0.0.15"))
+    implementation(platform("dev.pellet:pellet-bom:0.0.16"))
     implementation("dev.pellet:pellet-server")
     implementation("dev.pellet:pellet-server")
     implementation("dev.pellet:pellet-logging")
     implementation("dev.pellet:pellet-logging")
     implementation("org.slf4j:slf4j-api:1.7.36")
     implementation("org.slf4j:slf4j-api:1.7.36")
-    implementation("org.jetbrains.kotlinx:kotlinx-serialization-json:1.4.0-RC")
+    implementation("org.jetbrains.kotlinx:kotlinx-serialization-json:1.6.3")
     implementation(platform(kotlin("bom")))
     implementation(platform(kotlin("bom")))
     implementation(kotlin("stdlib-jdk8"))
     implementation(kotlin("stdlib-jdk8"))
-    implementation(platform("org.jetbrains.kotlinx:kotlinx-coroutines-bom:1.6.4"))
-    implementation("org.jetbrains.kotlinx:kotlinx-coroutines-core")
-    implementation("org.jetbrains.kotlinx:kotlinx-coroutines-jdk8")
-    implementation("io.vertx:vertx-pg-client:4.3.2")
+    implementation("io.vertx:vertx-pg-client:4.5.5")
+    implementation("io.vertx:vertx-lang-kotlin:4.5.5")
     implementation("com.ongres.scram:client:2.1")
     implementation("com.ongres.scram:client:2.1")
-    implementation("io.vertx:vertx-lang-kotlin:4.3.2")
-    implementation("io.vertx:vertx-lang-kotlin-coroutines:4.3.2")
 }
 }
 
 
 java {
 java {
     toolchain {
     toolchain {
-        sourceCompatibility = JavaVersion.VERSION_18
-        targetCompatibility = JavaVersion.VERSION_18
+        sourceCompatibility = JavaVersion.VERSION_21
+        targetCompatibility = JavaVersion.VERSION_21
     }
     }
 }
 }
 
 
 tasks.withType<org.jetbrains.kotlin.gradle.tasks.KotlinCompile> {
 tasks.withType<org.jetbrains.kotlin.gradle.tasks.KotlinCompile> {
-    kotlinOptions.jvmTarget = "18"
+    kotlinOptions.jvmTarget = "21"
 }
 }
 
 
 application {
 application {

+ 8 - 9
frameworks/Kotlin/pellet/sample/src/main/kotlin/benchmark/Benchmark.kt

@@ -10,7 +10,6 @@ import dev.pellet.server.PelletConnector
 import dev.pellet.server.codec.mime.MediaType
 import dev.pellet.server.codec.mime.MediaType
 import dev.pellet.server.responder.http.PelletHTTPRouteContext
 import dev.pellet.server.responder.http.PelletHTTPRouteContext
 import dev.pellet.server.routing.http.HTTPRouteResponse
 import dev.pellet.server.routing.http.HTTPRouteResponse
-import kotlinx.coroutines.runBlocking
 import kotlinx.serialization.json.Json
 import kotlinx.serialization.json.Json
 import java.time.Instant
 import java.time.Instant
 import java.time.ZoneId
 import java.time.ZoneId
@@ -25,7 +24,7 @@ val jsonEncoder = Json {
     prettyPrint = false
     prettyPrint = false
 }
 }
 
 
-fun main() = runBlocking {
+fun main() {
     val sharedRouter = httpRouter {
     val sharedRouter = httpRouter {
         get("/plaintext", ::handlePlain)
         get("/plaintext", ::handlePlain)
         get("/json", ::handleJson)
         get("/json", ::handleJson)
@@ -44,14 +43,14 @@ fun main() = runBlocking {
             router = sharedRouter
             router = sharedRouter
         }
         }
     }
     }
-    pellet.start().join()
+    pellet.start()
 }
 }
 
 
 val dateFormatter = DateTimeFormatter
 val dateFormatter = DateTimeFormatter
     .ofPattern("EEE, dd MMM yyyy HH:mm:ss z", Locale.ENGLISH)
     .ofPattern("EEE, dd MMM yyyy HH:mm:ss z", Locale.ENGLISH)
     .withZone(ZoneId.of("GMT"))
     .withZone(ZoneId.of("GMT"))
 
 
-private suspend fun handlePlain(
+private fun handlePlain(
     context: PelletHTTPRouteContext
     context: PelletHTTPRouteContext
 ): HTTPRouteResponse {
 ): HTTPRouteResponse {
     return HTTPRouteResponse.Builder()
     return HTTPRouteResponse.Builder()
@@ -67,7 +66,7 @@ data class ResponseBody(
     val message: String
     val message: String
 )
 )
 
 
-private suspend fun handleJson(
+private fun handleJson(
     context: PelletHTTPRouteContext
     context: PelletHTTPRouteContext
 ): HTTPRouteResponse {
 ): HTTPRouteResponse {
     val responseBody = ResponseBody(message = "Hello, World!")
     val responseBody = ResponseBody(message = "Hello, World!")
@@ -81,7 +80,7 @@ private suspend fun handleJson(
 
 
 private val repository = TFBRepository()
 private val repository = TFBRepository()
 
 
-private suspend fun handleDb(
+private fun handleDb(
     context: PelletHTTPRouteContext
     context: PelletHTTPRouteContext
 ): HTTPRouteResponse {
 ): HTTPRouteResponse {
     val result = repository.fetchWorld()
     val result = repository.fetchWorld()
@@ -93,7 +92,7 @@ private suspend fun handleDb(
         .build()
         .build()
 }
 }
 
 
-private suspend fun handleQuery(
+private fun handleQuery(
     context: PelletHTTPRouteContext
     context: PelletHTTPRouteContext
 ): HTTPRouteResponse {
 ): HTTPRouteResponse {
     val rawQueries = context.firstQueryParameter("queries").getOrNull()
     val rawQueries = context.firstQueryParameter("queries").getOrNull()
@@ -110,7 +109,7 @@ private suspend fun handleQuery(
         .build()
         .build()
 }
 }
 
 
-private suspend fun handleUpdates(
+private fun handleUpdates(
     context: PelletHTTPRouteContext
     context: PelletHTTPRouteContext
 ): HTTPRouteResponse {
 ): HTTPRouteResponse {
     val rawQueries = context.firstQueryParameter("queries").getOrNull()
     val rawQueries = context.firstQueryParameter("queries").getOrNull()
@@ -133,7 +132,7 @@ private suspend fun handleUpdates(
         .build()
         .build()
 }
 }
 
 
-private suspend fun handleFortunes(
+private fun handleFortunes(
     context: PelletHTTPRouteContext
     context: PelletHTTPRouteContext
 ): HTTPRouteResponse {
 ): HTTPRouteResponse {
     val newFortune = Fortune(0, "Additional fortune added at request time.")
     val newFortune = Fortune(0, "Additional fortune added at request time.")

+ 1 - 1
frameworks/Kotlin/pellet/sample/src/main/kotlin/benchmark/data/FortuneDAO.kt

@@ -2,5 +2,5 @@ package benchmark.data
 
 
 interface FortuneDAO {
 interface FortuneDAO {
 
 
-    suspend fun fetchFortunes(): List<Fortune>
+    fun fetchFortunes(): List<Fortune>
 }
 }

+ 17 - 9
frameworks/Kotlin/pellet/sample/src/main/kotlin/benchmark/data/TFBRepository.kt

@@ -1,8 +1,7 @@
 package benchmark.data
 package benchmark.data
 
 
-import io.vertx.kotlin.coroutines.await
+import io.vertx.pgclient.PgBuilder
 import io.vertx.pgclient.PgConnectOptions
 import io.vertx.pgclient.PgConnectOptions
-import io.vertx.pgclient.PgPool
 import io.vertx.sqlclient.PoolOptions
 import io.vertx.sqlclient.PoolOptions
 import io.vertx.sqlclient.Tuple
 import io.vertx.sqlclient.Tuple
 import java.util.concurrent.ThreadLocalRandom
 import java.util.concurrent.ThreadLocalRandom
@@ -20,14 +19,19 @@ class TFBRepository: WorldDAO, FortuneDAO {
         }
         }
 
 
     private val poolOptions = PoolOptions()
     private val poolOptions = PoolOptions()
-    private val client = PgPool.client(connectOptions, poolOptions)
+    private val client = PgBuilder.client()
+        .with(poolOptions)
+        .connectingTo(connectOptions)
+        .build()
 
 
-    override suspend fun fetchWorld(): WorldDTO {
+    override fun fetchWorld(): WorldDTO {
         val worldId = ThreadLocalRandom.current().nextInt(1, 10001)
         val worldId = ThreadLocalRandom.current().nextInt(1, 10001)
         val result = client
         val result = client
             .preparedQuery("select id, randomNumber from world where id = $1")
             .preparedQuery("select id, randomNumber from world where id = $1")
             .execute(Tuple.of(worldId))
             .execute(Tuple.of(worldId))
-            .await()
+            .toCompletionStage()
+            .toCompletableFuture()
+            .get()
         val row = result.first()
         val row = result.first()
         return WorldDTO(
         return WorldDTO(
             row.getInteger(0),
             row.getInteger(0),
@@ -35,20 +39,24 @@ class TFBRepository: WorldDAO, FortuneDAO {
         )
         )
     }
     }
 
 
-    override suspend fun updateWorlds(worlds: List<WorldDTO>) {
+    override fun updateWorlds(worlds: List<WorldDTO>) {
         val batch = worlds.map {
         val batch = worlds.map {
             Tuple.of(it.id, it.randomNumber)
             Tuple.of(it.id, it.randomNumber)
         }
         }
         client
         client
             .preparedQuery("update world set randomNumber = $1 where id = $2")
             .preparedQuery("update world set randomNumber = $1 where id = $2")
             .executeBatch(batch)
             .executeBatch(batch)
-            .await()
+            .toCompletionStage()
+            .toCompletableFuture()
+            .get()
     }
     }
 
 
-    override suspend fun fetchFortunes(): List<Fortune> {
+    override fun fetchFortunes(): List<Fortune> {
         val results = client.preparedQuery("select id, message from fortune")
         val results = client.preparedQuery("select id, message from fortune")
             .execute()
             .execute()
-            .await()
+            .toCompletionStage()
+            .toCompletableFuture()
+            .get()
         return results.map {
         return results.map {
             Fortune(
             Fortune(
                 it.getInteger(0),
                 it.getInteger(0),

+ 2 - 2
frameworks/Kotlin/pellet/sample/src/main/kotlin/benchmark/data/WorldDAO.kt

@@ -2,6 +2,6 @@ package benchmark.data
 
 
 interface WorldDAO {
 interface WorldDAO {
 
 
-    suspend fun fetchWorld(): WorldDTO
-    suspend fun updateWorlds(worlds: List<WorldDTO>)
+    fun fetchWorld(): WorldDTO
+    fun updateWorlds(worlds: List<WorldDTO>)
 }
 }

+ 5 - 1
frameworks/PHP/php/php-h2o.dockerfile

@@ -4,7 +4,7 @@ ARG H2O_PREFIX=/opt/h2o
 
 
 FROM "ubuntu:${UBUNTU_VERSION}" AS compile
 FROM "ubuntu:${UBUNTU_VERSION}" AS compile
 
 
-ARG H2O_VERSION=13ba727ad12dfb2338165d2bcfb2136457e33c8a
+ARG H2O_VERSION=18b175f71ede08b50d3e5ae8303dacef3ea510fc
 
 
 ARG DEBIAN_FRONTEND=noninteractive
 ARG DEBIAN_FRONTEND=noninteractive
 ARG H2O_PREFIX
 ARG H2O_PREFIX
@@ -14,6 +14,7 @@ RUN apt-get -yqq update && \
       cmake \
       cmake \
       curl \
       curl \
       g++ \
       g++ \
+      libbpfcc-dev \
       libbrotli-dev \
       libbrotli-dev \
       libcap-dev \
       libcap-dev \
       libssl-dev \
       libssl-dev \
@@ -23,6 +24,8 @@ RUN apt-get -yqq update && \
       libz-dev \
       libz-dev \
       ninja-build \
       ninja-build \
       pkg-config \
       pkg-config \
+      rsync \
+      ruby \
       systemtap-sdt-dev && \
       systemtap-sdt-dev && \
     curl -LSs "https://github.com/h2o/h2o/archive/${H2O_VERSION}.tar.gz" | \
     curl -LSs "https://github.com/h2o/h2o/archive/${H2O_VERSION}.tar.gz" | \
       tar --strip-components=1 -xz && \
       tar --strip-components=1 -xz && \
@@ -32,6 +35,7 @@ RUN apt-get -yqq update && \
       -DCMAKE_C_FLAGS="-flto -march=native -mtune=native" \
       -DCMAKE_C_FLAGS="-flto -march=native -mtune=native" \
       -DCMAKE_INSTALL_PREFIX="${H2O_PREFIX}" \
       -DCMAKE_INSTALL_PREFIX="${H2O_PREFIX}" \
       -DCMAKE_RANLIB=/usr/bin/gcc-ranlib \
       -DCMAKE_RANLIB=/usr/bin/gcc-ranlib \
+      -DWITH_MRUBY=on \
       -G Ninja \
       -G Ninja \
       -S . && \
       -S . && \
     cmake --build build -j && \
     cmake --build build -j && \

+ 68 - 13
frameworks/Python/async-worker/Pipfile.lock

@@ -298,33 +298,72 @@
         },
         },
         "black": {
         "black": {
             "hashes": [
             "hashes": [
-                "sha256:23695358dbcb3deafe7f0a3ad89feee5999a46be5fec21f4f1d108be0bcdb3b1",
-                "sha256:8a60071a0043876a4ae96e6c69bd3a127dad2c1ca7c8083573eb82f92705d008"
+                "sha256:2818cf72dfd5d289e48f37ccfa08b460bf469e67fb7c4abb07edc2e9f16fb63f",
+                "sha256:41622020d7120e01d377f74249e677039d20e6344ff5851de8a10f11f513bf93",
+                "sha256:4acf672def7eb1725f41f38bf6bf425c8237248bb0804faa3965c036f7672d11",
+                "sha256:4be5bb28e090456adfc1255e03967fb67ca846a03be7aadf6249096100ee32d0",
+                "sha256:4f1373a7808a8f135b774039f61d59e4be7eb56b2513d3d2f02a8b9365b8a8a9",
+                "sha256:56f52cfbd3dabe2798d76dbdd299faa046a901041faf2cf33288bc4e6dae57b5",
+                "sha256:65b76c275e4c1c5ce6e9870911384bff5ca31ab63d19c76811cb1fb162678213",
+                "sha256:65c02e4ea2ae09d16314d30912a58ada9a5c4fdfedf9512d23326128ac08ac3d",
+                "sha256:6905238a754ceb7788a73f02b45637d820b2f5478b20fec82ea865e4f5d4d9f7",
+                "sha256:79dcf34b33e38ed1b17434693763301d7ccbd1c5860674a8f871bd15139e7837",
+                "sha256:7bb041dca0d784697af4646d3b62ba4a6b028276ae878e53f6b4f74ddd6db99f",
+                "sha256:7d5e026f8da0322b5662fa7a8e752b3fa2dac1c1cbc213c3d7ff9bdd0ab12395",
+                "sha256:9f50ea1132e2189d8dff0115ab75b65590a3e97de1e143795adb4ce317934995",
+                "sha256:a0c9c4a0771afc6919578cec71ce82a3e31e054904e7197deacbc9382671c41f",
+                "sha256:aadf7a02d947936ee418777e0247ea114f78aff0d0959461057cae8a04f20597",
+                "sha256:b5991d523eee14756f3c8d5df5231550ae8993e2286b8014e2fdea7156ed0959",
+                "sha256:bf21b7b230718a5f08bd32d5e4f1db7fc8788345c8aea1d155fc17852b3410f5",
+                "sha256:c45f8dff244b3c431b36e3224b6be4a127c6aca780853574c00faf99258041eb",
+                "sha256:c7ed6668cbbfcd231fa0dc1b137d3e40c04c7f786e626b405c62bcd5db5857e4",
+                "sha256:d7de8d330763c66663661a1ffd432274a2f92f07feeddd89ffd085b5744f85e7",
+                "sha256:e19cb1c6365fd6dc38a6eae2dcb691d7d83935c10215aef8e6c38edee3f77abd",
+                "sha256:e2af80566f43c85f5797365077fb64a393861a3730bd110971ab7a0c94e873e7"
             ],
             ],
             "index": "pypi",
             "index": "pypi",
-            "version": "==21.5b1"
+            "markers": "python_version >= '3.8'",
+            "version": "==24.3.0"
         },
         },
         "click": {
         "click": {
             "hashes": [
             "hashes": [
-                "sha256:8c04c11192119b1ef78ea049e0a6f0463e4c48ef00a30160c704337586f3ad7a",
-                "sha256:fba402a4a47334742d782209a7c79bc448911afe1149d07bdabdf480b3e2f4b6"
+                "sha256:ae74fb96c20a0277a1d615f1e4d73c8414f5a98db8b799a7931d1582f3390c28",
+                "sha256:ca9853ad459e787e2192211578cc907e7594e294c7ccc834310722b41b9ca6de"
             ],
             ],
-            "markers": "python_version >= '3.6'",
-            "version": "==8.0.1"
+            "markers": "python_version >= '3.7'",
+            "version": "==8.1.7"
         },
         },
         "mypy-extensions": {
         "mypy-extensions": {
             "hashes": [
             "hashes": [
-                "sha256:090fedd75945a69ae91ce1303b5824f428daf5a028d2f6ab8a299250a846f15d",
-                "sha256:2d82818f5bb3e369420cb3c4060a7970edba416647068eb4c5343488a6c604a8"
+                "sha256:4392f6c0eb8a5668a69e23d168ffa70f0be9ccfd32b5cc2d26a34ae5b844552d",
+                "sha256:75dbf8955dc00442a438fc4d0666508a9a97b6bd41aa2f0ffe9d2f2725af0782"
+            ],
+            "markers": "python_version >= '3.5'",
+            "version": "==1.0.0"
+        },
+        "packaging": {
+            "hashes": [
+                "sha256:2ddfb553fdf02fb784c234c7ba6ccc288296ceabec964ad2eae3777778130bc5",
+                "sha256:eb82c5e3e56209074766e6885bb04b8c38a0c015d0a30036ebe7ece34c9989e9"
             ],
             ],
-            "version": "==0.4.3"
+            "markers": "python_version >= '3.7'",
+            "version": "==24.0"
         },
         },
         "pathspec": {
         "pathspec": {
             "hashes": [
             "hashes": [
-                "sha256:86379d6b86d75816baba717e64b1a3a3469deb93bb76d613c9ce79edc5cb68fd",
-                "sha256:aa0cb481c4041bf52ffa7b0d8fa6cd3e88a2ca4879c533c9153882ee2556790d"
+                "sha256:a0d503e138a4c123b27490a4f7beda6a01c6f288df0e4a8b79c7eb0dc7b4cc08",
+                "sha256:a482d51503a1ab33b1c67a6c3813a26953dbdc71c31dacaef9a838c4e29f5712"
+            ],
+            "markers": "python_version >= '3.8'",
+            "version": "==0.12.1"
+        },
+        "platformdirs": {
+            "hashes": [
+                "sha256:0614df2a2f37e1a662acbd8e2b25b92ccf8632929bc6d43467e17fe89c75e068",
+                "sha256:ef0cc731df711022c174543cb70a9b5bd22e5a9337c8624ef2c2ceb8ddad8768"
             ],
             ],
-            "version": "==0.8.1"
+            "markers": "python_version >= '3.8'",
+            "version": "==4.2.0"
         },
         },
         "regex": {
         "regex": {
             "hashes": [
             "hashes": [
@@ -379,6 +418,22 @@
             ],
             ],
             "markers": "python_version >= '2.6' and python_version not in '3.0, 3.1, 3.2, 3.3'",
             "markers": "python_version >= '2.6' and python_version not in '3.0, 3.1, 3.2, 3.3'",
             "version": "==0.10.2"
             "version": "==0.10.2"
+        },
+        "tomli": {
+            "hashes": [
+                "sha256:939de3e7a6161af0c887ef91b7d41a53e7c5a1ca976325f429cb46ea9bc30ecc",
+                "sha256:de526c12914f0c550d15924c62d72abc48d6fe7364aa87328337a31007fe8a4f"
+            ],
+            "markers": "python_version < '3.11'",
+            "version": "==2.0.1"
+        },
+        "typing-extensions": {
+            "hashes": [
+                "sha256:6f1117ac0cbe64536f34520c4688cd144794f9b1d79690bfe0389aa12a347976",
+                "sha256:7427ef26efa5e4e465e3765af0e52d3897e3684c908efe20e3331e1ce51884b3"
+            ],
+            "markers": "python_version < '3.11'",
+            "version": "==4.11.0rc1"
         }
         }
     }
     }
 }
 }

+ 1 - 1
frameworks/Python/django/requirements.txt

@@ -1,4 +1,4 @@
-Django==3.2.24
+Django==3.2.25
 mysqlclient==1.4.6
 mysqlclient==1.4.6
 psycopg2==2.9.6; implementation_name=='cpython'
 psycopg2==2.9.6; implementation_name=='cpython'
 psycopg2cffi==2.9.0; implementation_name=='pypy'
 psycopg2cffi==2.9.0; implementation_name=='pypy'

+ 20 - 0
frameworks/Python/mrhttp/README.md

@@ -0,0 +1,20 @@
+# MrHTTP Benchmark Test
+
+This is the MrHTTP portion of a [benchmarking tests suite](../../)
+comparing a variety of web development platforms.
+
+The information below is specific to MrHTTP. For further guidance,
+review the [documentation](https://github.com/TechEmpower/FrameworkBenchmarks/wiki).
+Also note that there is additional information provided in
+the [Python README](../).
+
+## Description
+
+[MrHTTP](https://github.com/MarkReedZ/mrhttp) is an asynchronous web framework for python 3.5+ written in C that has hit 8.5 million requests per second.
+
+## Test Paths & Sources
+
+All of the test implementations are located within a single file ([app.py](app.py)).
+
+* [JSON Serialization](app.py): "/json"
+* [Plaintext](app.py): "/plaintext"

+ 18 - 0
frameworks/Python/mrhttp/app.py

@@ -0,0 +1,18 @@
+
+import multiprocessing
+import mrhttp
+import mrjson as json
+
+app = mrhttp.Application()
+
[email protected]('/json', _type="json")
+def j(r):
+  return json.dumps({'message': 'Hello, world!'})
+
[email protected]('/plaintext', _type="text", options=['cache'])
+def p(r):
+  return "Hello, world!"
+
+
+app.run('0.0.0.0', 8080, cores=multiprocessing.cpu_count())
+

+ 23 - 0
frameworks/Python/mrhttp/benchmark_config.json

@@ -0,0 +1,23 @@
+{
+  "framework": "mrhttp",
+  "tests": [{
+    "default": {
+      "json_url": "/json",
+      "plaintext_url": "/plaintext",
+      "port": 8080,
+      "approach": "Realistic",
+      "classification": "Micro",
+      "framework": "mrhttp",
+      "language": "Python",
+      "flavor": "Python3",
+      "platform": "None",
+      "webserver": "None",
+      "os": "Linux",
+      "orm": "Raw",
+      "database_os": "Linux",
+      "database": "None",
+      "display_name": "MrHTTP",
+      "notes": ""
+    }
+  }]
+}

+ 15 - 0
frameworks/Python/mrhttp/config.toml

@@ -0,0 +1,15 @@
+[framework]
+name = "mrhttp"
+
+[main]
+urls.plaintext = "/plaintext"
+urls.json = "/json"
+approach = "Realistic"
+classification = "Micro"
+database = "None"
+database_os = "Linux"
+os = "Linux"
+orm = "Raw"
+platform = "None"
+webserver = "None"
+versus = "None"

+ 13 - 0
frameworks/Python/mrhttp/mrhttp.dockerfile

@@ -0,0 +1,13 @@
+
+FROM python:3.8.12
+
+ADD ./ /mrhttp
+
+WORKDIR /mrhttp
+
+RUN pip3 install -r /mrhttp/requirements.txt
+
+EXPOSE 8080
+
+CMD python3 app.py
+

+ 6 - 0
frameworks/Python/mrhttp/requirements.txt

@@ -0,0 +1,6 @@
+asyncpg==0.25.0
+mrjson==1.4
+ujson==5.4.0
+mrpacker==1.5
+mrhttp==0.12
+uvloop==0.19.0

+ 3 - 1
frameworks/Ruby/h2o_mruby/h2o_mruby.dockerfile

@@ -4,7 +4,7 @@ ARG H2O_PREFIX=/opt/h2o
 
 
 FROM "ubuntu:${UBUNTU_VERSION}" AS compile
 FROM "ubuntu:${UBUNTU_VERSION}" AS compile
 
 
-ARG H2O_VERSION=13ba727ad12dfb2338165d2bcfb2136457e33c8a
+ARG H2O_VERSION=18b175f71ede08b50d3e5ae8303dacef3ea510fc
 
 
 ARG DEBIAN_FRONTEND=noninteractive
 ARG DEBIAN_FRONTEND=noninteractive
 ARG H2O_PREFIX
 ARG H2O_PREFIX
@@ -14,6 +14,7 @@ RUN apt-get -yqq update && \
       cmake \
       cmake \
       curl \
       curl \
       g++ \
       g++ \
+      libbpfcc-dev \
       libbrotli-dev \
       libbrotli-dev \
       libcap-dev \
       libcap-dev \
       libssl-dev \
       libssl-dev \
@@ -23,6 +24,7 @@ RUN apt-get -yqq update && \
       libz-dev \
       libz-dev \
       ninja-build \
       ninja-build \
       pkg-config \
       pkg-config \
+      rsync \
       ruby \
       ruby \
       systemtap-sdt-dev && \
       systemtap-sdt-dev && \
     curl -LSs "https://github.com/h2o/h2o/archive/${H2O_VERSION}.tar.gz" | \
     curl -LSs "https://github.com/h2o/h2o/archive/${H2O_VERSION}.tar.gz" | \

+ 1 - 1
frameworks/Ruby/rails/Gemfile.lock

@@ -168,7 +168,7 @@ GEM
       thor (~> 1.0, >= 1.2.2)
       thor (~> 1.0, >= 1.2.2)
       zeitwerk (~> 2.6)
       zeitwerk (~> 2.6)
     rake (13.1.0)
     rake (13.1.0)
-    rdoc (6.6.2)
+    rdoc (6.6.3.1)
       psych (>= 4.0.0)
       psych (>= 4.0.0)
     redis (5.0.7)
     redis (5.0.7)
       redis-client (>= 0.9.0)
       redis-client (>= 0.9.0)

+ 1 - 1
frameworks/Rust/actix/rust-toolchain.toml

@@ -1,2 +1,2 @@
 [toolchain]
 [toolchain]
-channel = "nightly"
+channel = "stable"

+ 1 - 1
frameworks/Rust/ohkami/.gitignore

@@ -1 +1 @@
-/target
+/target

File diff suppressed because it is too large
+ 235 - 415
frameworks/Rust/ohkami/Cargo.lock


+ 9 - 7
frameworks/Rust/ohkami/Cargo.toml

@@ -1,11 +1,13 @@
 [package]
 [package]
-name = "ohkami"
-version = "0.1.0"
+name    = "ohkami_framework_benchmarks"
+version = "0.15.0"
 edition = "2021"
 edition = "2021"
+authors = ["kanarus <[email protected]>"]
 
 
 [dependencies]
 [dependencies]
-ohkami = { version = "0.3.3", features = ["sqlx", "postgres"] }
-serde = { version = "1.0", features = ["derive"] }
-sqlx = "0.6"
-rand = "0.8.5"
-yarte = "0.15"
+ohkami       = { version = "=0.15.0", features = ["rt_tokio"] }
+tokio        = { version = "1.36.0" , features = ["full"] }
+rand         = { version = "0.8.5"  , features = ["small_rng"] }
+sqlx         = { version = "0.7.3"  , features = ["postgres", "macros", "runtime-tokio-native-tls"] }
+yarte        = { version = "0.15.7" }
+futures-util = { version = "0.3.30" }

+ 35 - 9
frameworks/Rust/ohkami/README.md

@@ -1,15 +1,41 @@
-# [ohkami](https://github.com/kana-rus/ohkami) web framework
+# [ohkami](https://github.com/kana-rus/ohkami) - Intuitive and Declarative Web Framework for Rust
 
 
 ## Description
 ## Description
-ohkami is **simple** and **macro free** wen framework.
+
+> Build web app in intuitive and declarative code
+> - *macro-less and type-safe* APIs for intuitive and declarative code
+> - *multi runtime* support:`tokio`, `async-std`
+
+- [User Guide](https://docs.rs/ohkami/latest/ohkami/)
+- [API Documentation](https://docs.rs/ohkami/latest/ohkami/)
+- Cargo package: [ohkami](https://crates.io/crates/ohkami)
 
 
 ## Database
 ## Database
-- PostgreSQL
+
+PostgreSQL with [sqlx](https://github.com/launchbadge/sqlx)
 
 
 ## Test URLs
 ## Test URLs
-- JSON Encoding: [http://localhost:8080/json](http://localhost:8080/json)
-- Single Row Query: [http://localhost:8080/db](http://localhost:8080/db)
-- Multi Row Query: [http://localhost:8080/queries](http://localhost:8080/queries)
-- Fortunes: [http://localhost:8080/fortunes](http://localhost:8080/fortunes)
-- Update Query: [http://localhost:8080/updates](http://localhost:8080/updates)
-- Plaintext: [http://localhost:8080/plaintext](http://localhost:8080/plaintext)
+
+### 1. JSON Serialization
+
+    http://localhost:8000/json
+
+### 2. Single Database Query
+
+    http://localhost:8000/db
+
+### 3. Multiple Database Queries
+
+    http://localhost:8000/queries?q={count}
+
+### 4. Fortunes
+
+    http://localhost:8000/fortunes
+
+### 5. Database Updates
+
+    http://localhost:8000/updates?q={count}
+
+### 6. Plaintext
+
+    http://localhost:8000/plaintext

+ 19 - 17
frameworks/Rust/ohkami/benchmark_config.json

@@ -3,24 +3,26 @@
     "tests": [
     "tests": [
         {
         {
             "default": {
             "default": {
-                "json_url": "/json",
-                "plaintext_url": "/plaintext",
-                "fortune_url": "/fortunes",
-                "db_url": "/db",
-                "query_url": "/queries?q=",
-                "update_url": "/updates?q=",
-                "port": 8080,
-                "approach": "Realistic",
+                "json_url":       "/json",
+                "db_url":         "/db",
+                "query_url":      "/queries?q=",
+                "fortune_url":    "/fortunes",
+                "update_url":     "/updates?q=",
+                "plaintext_url":  "/plaintext",
+                "port":           8000,
+                "approach":       "Realistic",
                 "classification": "Micro",
                 "classification": "Micro",
-                "database": "Postgres",
-                "framework": "ohkami",
-                "language": "Rust",
-                "orm": "Raw",
-                "platform": "Rust",
-                "webserver": "ohkami",
-                "os": "Linux",
-                "database_os": "Linux",
-                "display_name": "ohkami"
+                "database":       "Postgres",
+                "framework":      "ohkami",
+                "language":       "Rust",
+                "orm":            "Raw",
+                "platform":       "None",
+                "webserver":      "ohkami",
+                "os":             "Linux",
+                "database_os":    "Linux",
+                "display_name":   "ohkami",
+                "notes":          "",
+                "versus":         "None"
             }
             }
         }
         }
     ]
     ]

+ 19 - 0
frameworks/Rust/ohkami/config.toml

@@ -0,0 +1,19 @@
+[framework]
+name = "ohkami"
+
+[main]
+urls.json      = "/json"
+urls.db        = "/db"
+urls.query     = "/queries?q="
+urls.fortune   = "/fortunes"
+urls.update    = "/updates?q="
+urls.plaintext = "/plaintext"
+approach       = "Realistic"
+classification = "Micro"
+database       = "Postgres"
+database_os    = "Linux"
+os             = "Linux"
+orm            = "Raw"
+platform       = "None"
+webserver      = "ohkami"
+versus         = "None"

+ 15 - 9
frameworks/Rust/ohkami/ohkami.dockerfile

@@ -1,13 +1,19 @@
-FROM rust:1.65
+FROM rust:1.76-slim-buster
+WORKDIR /ohkami_framework_benchmarks
 
 
-RUN apt update -yqq \
- && apt install -yqq cmake g++
+ENV DATABASE_URL=postgres://benchmarkdbuser:benchmarkdbpass@tfb-database/hello_world
+ENV MAX_CONNECTIONS=56
+ENV MIN_CONNECTIONS=56
 
 
-ADD ./ /ohkami
-WORKDIR /ohkami
+COPY ./src        ./src
+COPY ./templates  ./templates
+COPY ./Cargo.toml ./Cargo.toml
+COPY ./Cargo.lock ./Cargo.lock
 
 
-RUN cargo clean \
- && RUSTFLAGS="-C target-cpu=native" cargo build --release
+RUN apt update && apt install -y --no-install-recommends \
+    libpq-dev pkg-config libssl-dev && \
+    rm -rf /var/lib/apt/lists/* 
 
 
-EXPOSE 8080
-CMD ./target/release/ohkami
+RUN RUSTFLAGS="-C target-cpu=native" cargo build --release
+EXPOSE 8000
+CMD ./target/release/ohkami_framework_benchmarks

+ 0 - 60
frameworks/Rust/ohkami/src/components.rs

@@ -1,60 +0,0 @@
-pub(crate) mod consts {
-    use std::ops::RangeInclusive;
-
-    pub const RAND_RANGE: RangeInclusive<usize>  = 1..=10000;
-    pub const DB_URL:              &'static str  = "postgres://benchmarkdbuser:benchmarkdbpass@tfb-database/hello_world?sslmode=disable";
-    pub const MAX_CONNECTIONS:     u32           = 10000;
-}
-
-pub(crate) mod models {
-    use serde::Serialize;
-    use sqlx::FromRow;
-    use yarte::Template;
-
-    #[derive(FromRow, Serialize)]
-    pub struct World {
-        id:           i32,
-        randomnumber: i32,
-    } impl World {
-        pub fn set_randomnumber(&mut self, new_randomnumber: i32) {
-            self.randomnumber = new_randomnumber
-        }
-    }
-
-    #[derive(FromRow, Serialize)]
-    pub struct Fortune {
-        pub id:      i32,
-        pub message: String,
-    }
-    #[derive(Template)]
-    #[template(path = "fortunes.hbs")]
-    pub(crate) struct FortunesTemplate {
-        pub(crate) fortunes: Vec<Fortune>
-    }
-}
-
-pub(crate) mod functions {
-    use ohkami::{prelude::Body, result::{Result, ElseResponseWithErr}, response::Response};
-    use rand::Rng;
-    use yarte::Template;
-    use super::{models::{Fortune, FortunesTemplate}, consts::RAND_RANGE};
-
-    pub fn random_i32() -> i32 {
-        rand::thread_rng().gen_range(RAND_RANGE) as i32
-    }
-    pub fn random_i32s(n: usize) -> std::vec::IntoIter<i32> {
-        let mut generator = rand::thread_rng();
-        let mut i32s = Vec::with_capacity(n);
-        for _ in 0..n {
-            i32s.push(generator.gen_range(RAND_RANGE) as i32)
-        }
-        i32s.into_iter()
-    }
-    pub fn render_html(fortunes: Vec<Fortune>) -> Result<Response> {
-        Response::OK(Body::html(
-            FortunesTemplate {fortunes}
-                .call()
-                ._else(|_| Response::InternalServerError("failed to render template"))?
-        ))
-    }
-}

+ 62 - 85
frameworks/Rust/ohkami/src/main.rs

@@ -1,97 +1,74 @@
-use ohkami::{prelude::*, json};
-use sqlx::postgres::PgPoolOptions;
-mod components; use components::{
-    consts::{DB_URL, MAX_CONNECTIONS},
-    models::{World, Fortune},
-    functions::{random_i32, random_i32s, render_html},
-};
-
-fn main() -> Result<()> {
-    let config = Config {
-        db_profile: DBprofile {
-            pool_options: PgPoolOptions::new().max_connections(MAX_CONNECTIONS),
-            url:          DB_URL,
-        },
-        log_subscribe: None,
-        ..Default::default()
-    };
-
-    Server::setup_with(config)
-        .GET("/json",      || async {Response::OK(json!("message": "Hello, World!"))})
-        .GET("/plaintext", || async {Response::OK("Hello, World!")})
-        .GET("/db",        handle_db)
-        .GET("/fortunes",  handle_fortunes)
-        .GET("/queries",   handle_queries)
-        .GET("/updates",   handle_updates)
-        .serve_on(":8080")
+mod models;
+pub use models::{Fortune, Message, World, WorldsQuery};
+
+mod postgres;
+pub use postgres::Postgres;
+
+mod templates;
+pub use templates::FortunesTemplate;
+
+use ohkami::{Ohkami, Route, Memory};
+
+
+#[tokio::main]
+async fn main() {
+    struct SetServer;
+    impl ohkami::BackFang for SetServer {
+        type Error = std::convert::Infallible;
+        #[inline(always)]
+        async fn bite(&self, res: &mut ohkami::Response, _req: &ohkami::Request) -> Result<(), Self::Error> {
+            res.headers.set().Server("ohkami");
+            Ok(())
+        }
+    }
+
+    Ohkami::with((SetServer, Postgres::init().await), (
+        "/json"     .GET(json_serialization),
+        "/db"       .GET(single_database_query),
+        "/queries"  .GET(multiple_database_query),
+        "/fortunes" .GET(fortunes),
+        "/updates"  .GET(database_updates),
+        "/plaintext".GET(plaintext),
+    )).howl("0.0.0.0:8000").await
+}
+
+async fn json_serialization() -> Message {
+    Message {
+        message: "Hello, World!"
+    }
 }
 }
 
 
-async fn handle_db(ctx: Context) -> Result<Response> {
-    let id = random_i32();
-    let world = sqlx::query_as::<_, World>(
-        "SELECT id, randomnumber FROM world WHERE id = $1"
-    ).bind(id)
-        .fetch_one(ctx.pool())
-        .await?;
-    Response::OK(json(&world)?)
+async fn single_database_query(p: Memory<'_, Postgres>) -> World {
+    p.select_random_world().await
 }
 }
 
 
-async fn handle_fortunes(ctx: Context) -> Result<Response> {
-    let mut fortunes = sqlx::query_as::<_, Fortune>(
-        "SELECT id, message FROM fortune"
-    )
-        .fetch_all(ctx.pool())
-        .await?;
+async fn multiple_database_query(q: WorldsQuery<'_>, p: Memory<'_, Postgres>) -> Vec<World> {
+    let n = q.parse();
+    p.select_n_random_worlds(n).await
+}
+
+async fn fortunes(p: Memory<'_, Postgres>) -> FortunesTemplate {
+    let mut fortunes = p.select_all_fortunes().await;
+
     fortunes.push(Fortune {
     fortunes.push(Fortune {
         id:      0,
         id:      0,
-        message: "Additional fortune added at request time.".into(),
+        message: String::from("Additional fortune added at request time."),
     });
     });
-    fortunes.sort_unstable_by(|it, next| it.message.cmp(&next.message));
-    render_html(fortunes)
+
+    fortunes.sort_unstable_by(|a, b| str::cmp(&a.message, &b.message));
+
+    FortunesTemplate { fortunes }
 }
 }
 
 
-async fn handle_queries(ctx: Context) -> Result<Response> {
-    let count = {
-        let queries = ctx.query::<&str>("q").unwrap_or("1").parse::<usize>().unwrap_or(1);
-        if queries < 1 {1} else if 500 < queries {500} else {queries}
-    };
-    let mut worlds = Vec::with_capacity(count);
-    for id in random_i32s(count) {
-        worlds.push(
-            sqlx::query_as::<_, World>(
-                "SELECT id, randomnumber FROM world WHERE id = $1"
-            ).bind(id)
-                .fetch_one(ctx.pool())
-                .await?
-        )
-    }
-    Response::OK(json(&worlds)?)
+async fn database_updates(q: WorldsQuery<'_>, p: Memory<'_, Postgres>) -> Vec<World> {
+    let n = q.parse();
+    let mut worlds = p.select_n_random_worlds(n).await;
+
+    p.update_random_ids_of_worlds(&mut worlds).await;
+
+    worlds
 }
 }
 
 
-async fn handle_updates(ctx: Context) -> Result<Response> {
-    let count = {
-        let queries = ctx.query::<&str>("q").unwrap_or("1").parse::<usize>().unwrap_or(1);
-        if queries < 1 {1} else if 500 < queries {500} else {queries}
-    };
-    let mut worlds = Vec::with_capacity(count);
-    let mut new_randomnumbers = random_i32s(count);
-    for id in random_i32s(count) {
-        let mut world = sqlx::query_as::<_, World>(
-            "SELECT id, randomnumber FROM world WHERE id = $1"
-        ).bind(id)
-            .fetch_one(ctx.pool())
-            .await?;
-
-        let new_randomnumber = new_randomnumbers.next().unwrap();
-        world.set_randomnumber(new_randomnumber);
-
-        sqlx::query("UPDATE world SET randomnumber = $1 WHERE id = $2")
-            .bind(new_randomnumber)
-            .bind(id)
-            .execute(ctx.pool())
-            .await?;
-
-        worlds.push(world)
-    }
-    Response::OK(json(&worlds)?)
+async fn plaintext() -> &'static str {
+    "Hello, World!"
 }
 }

+ 36 - 0
frameworks/Rust/ohkami/src/models.rs

@@ -0,0 +1,36 @@
+use ohkami::typed::{ResponseBody, Query};
+
+
+#[ResponseBody(JSONS)]
+pub struct Message {
+    pub message: &'static str,
+}
+
+#[derive(sqlx::FromRow)]
+pub struct Fortune {
+    pub id:      i32,
+    pub message: String,
+}
+
+#[derive(sqlx::FromRow)]
+#[ResponseBody(JSONS)]
+pub struct World {
+    pub id:           i32,
+    #[serde(rename="randomNumber")]
+    pub randomnumber: i32,
+}
+
+#[Query]
+pub struct WorldsQuery<'q> {
+    q: Option<&'q str>,
+}
+impl WorldsQuery<'_> {
+    #[inline(always)]
+    pub fn parse(self) -> usize {
+        match self.q.unwrap_or("1").parse::<usize>().unwrap_or(1) {
+            n @ 1..=500 => n,
+            0           => 1,
+            501..       => 500,
+        }
+    }
+}

+ 102 - 0
frameworks/Rust/ohkami/src/postgres.rs

@@ -0,0 +1,102 @@
+use futures_util::{stream::FuturesUnordered, TryStreamExt};
+use rand::{rngs::SmallRng, SeedableRng, Rng, thread_rng};
+use crate::models::{World, Fortune};
+
+
+#[derive(Clone)]
+pub struct Postgres(sqlx::PgPool);
+
+impl Postgres {
+    pub async fn init() -> impl ohkami::FrontFang {
+        pub struct UsePostgres(Postgres);
+
+        impl ohkami::FrontFang for UsePostgres {
+            type Error = std::convert::Infallible;
+            #[inline(always)]
+            async fn bite(&self, req: &mut ohkami::Request) -> Result<(), Self::Error> {
+                req.memorize(self.0.clone());
+                Ok(())
+            }
+        }
+
+        macro_rules! load_env {
+            ($($name:ident as $t:ty)*) => {
+                $(
+                    #[allow(non_snake_case)]
+                    let $name = ::std::env::var(stringify!($name))
+                        .expect(concat!(
+                            "Failed to load environment variable ",
+                            "`", stringify!($name), "`"
+                        ))
+                        .parse::<$t>()
+                        .unwrap();
+                )*
+            };
+        } load_env! {
+            MAX_CONNECTIONS as u32
+            MIN_CONNECTIONS as u32
+            DATABASE_URL    as String
+        }
+
+        UsePostgres(Self(
+            sqlx::postgres::PgPoolOptions::new()
+                .max_connections(MAX_CONNECTIONS)
+                .min_connections(MIN_CONNECTIONS)
+                .connect(&DATABASE_URL).await
+                .unwrap()
+        ))
+    }
+}
+
+impl Postgres {
+    pub async fn select_random_world(&self) -> World {
+        let mut rng = SmallRng::from_rng(&mut thread_rng()).unwrap();
+    
+        sqlx::query_as(
+            "SELECT id, randomnumber FROM World WHERE id = $1")
+            .bind((rng.gen::<u32>() % 10_000 + 1) as i32)
+            .fetch_one(&self.0).await
+            .expect("Failed to fetch a world")
+    }
+    
+    pub async fn select_all_fortunes(&self) -> Vec<Fortune> {
+        sqlx::query_as(
+            "SELECT id, message FROM Fortune")
+            .fetch_all(&self.0).await
+            .expect("Failed to fetch fortunes")
+    }
+    
+    pub async fn select_n_random_worlds(&self, n: usize) -> Vec<World> {
+        let mut rng = SmallRng::from_rng(&mut thread_rng()).unwrap();
+    
+        let selects = FuturesUnordered::new();
+        for _ in 0..n {
+            selects.push(
+                sqlx::query_as(
+                    "SELECT id, randomnumber FROM World WHERE id = $1")
+                    .bind((rng.gen::<u32>() % 10_000 + 1) as i32)
+                    .fetch_one(&self.0)
+            )
+        }
+    
+        selects.try_collect().await.expect("Failed to fetch worlds")
+    }
+    
+    pub async fn update_random_ids_of_worlds(&self, worlds: &mut Vec<World>) {
+        let mut rng = SmallRng::from_rng(&mut thread_rng()).unwrap();
+    
+        let updates = FuturesUnordered::new();
+        for w in worlds {
+            w.randomnumber = (rng.gen::<u32>() % 10_000 + 1) as i32;
+            updates.push(
+                sqlx::query(
+                    "UPDATE World SET randomnumber = $1 WHERE id = $2")
+                    .bind(w.randomnumber)
+                    .bind(w.id)
+                    .execute(&self.0)
+            )
+        }
+    
+        let _: sqlx::postgres::PgQueryResult = updates.try_collect().await.expect("Failed to fetch worlds");
+    }
+}

+ 18 - 0
frameworks/Rust/ohkami/src/templates.rs

@@ -0,0 +1,18 @@
+use ohkami::{Response, IntoResponse};
+use crate::models::Fortune;
+
+
+#[derive(yarte::Template)]
+#[template(path="fortunes")]
+pub struct FortunesTemplate {
+    pub fortunes: Vec<Fortune>,
+}
+impl IntoResponse for FortunesTemplate {
+    #[inline(always)]
+    fn into_response(self) -> Response {
+        ohkami::utils::HTML(
+            <Self as yarte::Template>::call(&self)
+                .expect("Failed to render fortunes template")
+        ).into_response()
+    }
+}

+ 80 - 83
frameworks/Rust/xitca-web/Cargo.lock

@@ -149,12 +149,9 @@ checksum = "a2bd12c1caf447e69cd4528f47f94d203fd2582878ecb9e9465484c4148a8223"
 
 
 [[package]]
 [[package]]
 name = "cc"
 name = "cc"
-version = "1.0.83"
+version = "1.0.90"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "f1174fb0b6ec23863f8b971027804a42614e347eafb0a95bf0b12cdae21fc4d0"
-dependencies = [
- "libc",
-]
+checksum = "8cd6604a82acf3039f1144f54b8eb34e91ffba622051189e71b781822d5ee1f5"
 
 
 [[package]]
 [[package]]
 name = "cfg-if"
 name = "cfg-if"
@@ -319,9 +316,9 @@ checksum = "4271d37baee1b8c7e4b708028c57d816cf9d2434acb33a549475f78c181f6253"
 
 
 [[package]]
 [[package]]
 name = "hermit-abi"
 name = "hermit-abi"
-version = "0.3.4"
+version = "0.3.9"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "5d3d0e0f38255e7fa3cf31335b3a56f05febd18025f4db5ef7a0cfb4f8da651f"
+checksum = "d231dfb89cfffdbc30e7fc41579ed6066ad03abda9e567ccafae602b97ec5024"
 
 
 [[package]]
 [[package]]
 name = "hmac"
 name = "hmac"
@@ -343,9 +340,9 @@ dependencies = [
 
 
 [[package]]
 [[package]]
 name = "http"
 name = "http"
-version = "1.0.0"
+version = "1.1.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "b32afd38673a8016f7c9ae69e5af41a58f81b1d31689040f2f1959594ce194ea"
+checksum = "21b9ddb458710bc376481b842f5da65cdf31522de232c1ca8146abce2a358258"
 dependencies = [
 dependencies = [
  "bytes",
  "bytes",
  "fnv",
  "fnv",
@@ -411,9 +408,9 @@ checksum = "9028f49264629065d057f340a86acb84867925865f73bbf8d47b4d149a7e88b8"
 
 
 [[package]]
 [[package]]
 name = "libc"
 name = "libc"
-version = "0.2.152"
+version = "0.2.153"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "13e3bf6590cbc649f4d1a3eefc9d5d6eb746f5200ffb04e5e142700b8faa56e7"
+checksum = "9c198f91728a82281a64e1f4f9eeb25d82cb32a5de251c6bd1b5154d63a8e7bd"
 
 
 [[package]]
 [[package]]
 name = "lock_api"
 name = "lock_api"
@@ -427,9 +424,9 @@ dependencies = [
 
 
 [[package]]
 [[package]]
 name = "log"
 name = "log"
-version = "0.4.20"
+version = "0.4.21"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "b5e6163cb8c49088c2c36f57875e58ccd8c87c7427f7fbd50ea6710b2f3f2e8f"
+checksum = "90ed8c1e510134f979dbc4f070f87d4313098b704861a105fe34231c70a3901c"
 
 
 [[package]]
 [[package]]
 name = "matchit"
 name = "matchit"
@@ -461,9 +458,9 @@ checksum = "6877bb514081ee2a7ff5ef9de3281f14a4dd4bceac4c09388074a6b5df8a139a"
 
 
 [[package]]
 [[package]]
 name = "miniz_oxide"
 name = "miniz_oxide"
-version = "0.7.1"
+version = "0.7.2"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "e7810e0be55b428ada41041c41f32c9f1a42817901b4ccf45fa3d4b6561e74c7"
+checksum = "9d811f3e15f28568be3407c8e7fdb6514c1cda3cb30683f15b6a1a1dc4ea14a7"
 dependencies = [
 dependencies = [
  "adler",
  "adler",
 ]
 ]
@@ -486,9 +483,9 @@ checksum = "6a51313c5820b0b02bd422f4b44776fbf47961755c74ce64afc73bfad10226c3"
 
 
 [[package]]
 [[package]]
 name = "num-traits"
 name = "num-traits"
-version = "0.2.17"
+version = "0.2.18"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "39e3200413f237f41ab11ad6d161bc7239c84dcb631773ccd7de3dfe4b5c267c"
+checksum = "da0df0e5185db44f69b44f26786fe401b6c293d1907744beaa7fa62b2e5a517a"
 dependencies = [
 dependencies = [
  "autocfg",
  "autocfg",
 ]
 ]
@@ -549,18 +546,18 @@ checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e"
 
 
 [[package]]
 [[package]]
 name = "pin-project"
 name = "pin-project"
-version = "1.1.3"
+version = "1.1.5"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "fda4ed1c6c173e3fc7a83629421152e01d7b1f9b7f65fb301e490e8cfc656422"
+checksum = "b6bf43b791c5b9e34c3d182969b4abb522f9343702850a2e57f460d00d09b4b3"
 dependencies = [
 dependencies = [
  "pin-project-internal",
  "pin-project-internal",
 ]
 ]
 
 
 [[package]]
 [[package]]
 name = "pin-project-internal"
 name = "pin-project-internal"
-version = "1.1.3"
+version = "1.1.5"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "4359fd9c9171ec6e8c62926d6faaf553a8dc3f64e1507e76da7911b4f6a04405"
+checksum = "2f38a4412a78282e09a2cf38d195ea5420d15ba0602cb375210efbc877243965"
 dependencies = [
 dependencies = [
  "proc-macro2",
  "proc-macro2",
  "quote",
  "quote",
@@ -625,9 +622,9 @@ dependencies = [
 
 
 [[package]]
 [[package]]
 name = "proc-macro2"
 name = "proc-macro2"
-version = "1.0.76"
+version = "1.0.78"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "95fc56cda0b5c3325f5fbbd7ff9fda9e02bb00bb3dac51252d2f1bfa1cb8cc8c"
+checksum = "e2422ad645d89c99f8f3e6b88a9fdeca7fabeac836b1002371c4367c8f984aae"
 dependencies = [
 dependencies = [
  "unicode-ident",
  "unicode-ident",
 ]
 ]
@@ -705,9 +702,9 @@ checksum = "7ffc183a10b4478d04cbbbfc96d0873219d962dd5accaff2ffbd4ceb7df837f4"
 
 
 [[package]]
 [[package]]
 name = "ryu"
 name = "ryu"
-version = "1.0.16"
+version = "1.0.17"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "f98d2aa92eebf49b69786be48e4477826b256916e84a57ff2a4f21923b48eb4c"
+checksum = "e86697c916019a8588c99b5fac3cead74ec0b4b819707a682fd4d23fa0ce1ba1"
 
 
 [[package]]
 [[package]]
 name = "sailfish"
 name = "sailfish"
@@ -768,18 +765,18 @@ checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49"
 
 
 [[package]]
 [[package]]
 name = "serde"
 name = "serde"
-version = "1.0.195"
+version = "1.0.197"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "63261df402c67811e9ac6def069e4786148c4563f4b50fd4bf30aa370d626b02"
+checksum = "3fb1c873e1b9b056a4dc4c0c198b24c3ffa059243875552b2bd0933b1aee4ce2"
 dependencies = [
 dependencies = [
  "serde_derive",
  "serde_derive",
 ]
 ]
 
 
 [[package]]
 [[package]]
 name = "serde_derive"
 name = "serde_derive"
-version = "1.0.195"
+version = "1.0.197"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "46fe8f8603d81ba86327b23a2e9cdf49e1255fb94a4c5f297f6ee0547178ea2c"
+checksum = "7eb0b34b42edc17f6b7cac84a52a1c5f0e1bb2227e997ca9011ea3dd34e8610b"
 dependencies = [
 dependencies = [
  "proc-macro2",
  "proc-macro2",
  "quote",
  "quote",
@@ -788,9 +785,9 @@ dependencies = [
 
 
 [[package]]
 [[package]]
 name = "serde_json"
 name = "serde_json"
-version = "1.0.111"
+version = "1.0.114"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "176e46fa42316f18edd598015a5166857fc835ec732f5215eac6b7bdbf0a84f4"
+checksum = "c5f09b1bd632ef549eaa9f60a1f8de742bdbc698e6cee2095fc84dde5f549ae0"
 dependencies = [
 dependencies = [
  "itoa",
  "itoa",
  "ryu",
  "ryu",
@@ -866,12 +863,12 @@ dependencies = [
 
 
 [[package]]
 [[package]]
 name = "socket2"
 name = "socket2"
-version = "0.5.5"
+version = "0.5.6"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "7b5fac59a5cb5dd637972e5fca70daf0523c9067fcdc4842f053dae04a18f8e9"
+checksum = "05ffd9c0a93b7543e062e759284fcf5f5e3b098501104bfbdde4d404db792871"
 dependencies = [
 dependencies = [
  "libc",
  "libc",
- "windows-sys 0.48.0",
+ "windows-sys 0.52.0",
 ]
 ]
 
 
 [[package]]
 [[package]]
@@ -893,9 +890,9 @@ checksum = "81cdd64d312baedb58e21336b31bc043b77e01cc99033ce76ef539f78e965ebc"
 
 
 [[package]]
 [[package]]
 name = "syn"
 name = "syn"
-version = "2.0.48"
+version = "2.0.52"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "0f3531638e407dfc0814761abb7c00a5b54992b849452a0646b7f65c9f770f3f"
+checksum = "b699d15b36d1f02c3e7c69f8ffef53de37aefae075d8488d4ba1a7788d574a07"
 dependencies = [
 dependencies = [
  "proc-macro2",
  "proc-macro2",
  "quote",
  "quote",
@@ -925,9 +922,9 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20"
 
 
 [[package]]
 [[package]]
 name = "tokio"
 name = "tokio"
-version = "1.35.1"
+version = "1.36.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "c89b4efa943be685f629b149f53829423f8f5531ea21249408e8e2f8671ec104"
+checksum = "61285f6515fa018fb2d1e46eb21223fff441ee8db5d0f1435e8ab4f5cdb80931"
 dependencies = [
 dependencies = [
  "backtrace",
  "backtrace",
  "libc",
  "libc",
@@ -935,7 +932,7 @@ dependencies = [
  "num_cpus",
  "num_cpus",
  "pin-project-lite",
  "pin-project-lite",
  "signal-hook-registry",
  "signal-hook-registry",
- "socket2 0.5.5",
+ "socket2 0.5.6",
  "windows-sys 0.48.0",
  "windows-sys 0.48.0",
 ]
 ]
 
 
@@ -971,9 +968,9 @@ dependencies = [
 
 
 [[package]]
 [[package]]
 name = "tower-http"
 name = "tower-http"
-version = "0.5.1"
+version = "0.5.2"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "0da193277a4e2c33e59e09b5861580c33dd0a637c3883d0fa74ba40c0374af2e"
+checksum = "1e9cd434a998747dd2c4276bc96ee2e0c7a2eadf3cae88e52be55a05fa9053f5"
 dependencies = [
 dependencies = [
  "bitflags 2.4.2",
  "bitflags 2.4.2",
  "bytes",
  "bytes",
@@ -1037,9 +1034,9 @@ checksum = "3354b9ac3fae1ff6755cb6db53683adb661634f67557942dea4facebec0fee4b"
 
 
 [[package]]
 [[package]]
 name = "unicode-normalization"
 name = "unicode-normalization"
-version = "0.1.22"
+version = "0.1.23"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "5c5713f0fc4b5db668a2ac63cdb7bb4469d8c9fed047b1d0292cc7b0ce2ba921"
+checksum = "a56d1686db2308d901306f92a263857ef59ea39678a5458e7cb17f01415101f5"
 dependencies = [
 dependencies = [
  "tinyvec",
  "tinyvec",
 ]
 ]
@@ -1099,7 +1096,7 @@ version = "0.52.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d"
 checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d"
 dependencies = [
 dependencies = [
- "windows-targets 0.52.0",
+ "windows-targets 0.52.4",
 ]
 ]
 
 
 [[package]]
 [[package]]
@@ -1119,17 +1116,17 @@ dependencies = [
 
 
 [[package]]
 [[package]]
 name = "windows-targets"
 name = "windows-targets"
-version = "0.52.0"
+version = "0.52.4"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "8a18201040b24831fbb9e4eb208f8892e1f50a37feb53cc7ff887feb8f50e7cd"
+checksum = "7dd37b7e5ab9018759f893a1952c9420d060016fc19a472b4bb20d1bdd694d1b"
 dependencies = [
 dependencies = [
- "windows_aarch64_gnullvm 0.52.0",
- "windows_aarch64_msvc 0.52.0",
- "windows_i686_gnu 0.52.0",
- "windows_i686_msvc 0.52.0",
- "windows_x86_64_gnu 0.52.0",
- "windows_x86_64_gnullvm 0.52.0",
- "windows_x86_64_msvc 0.52.0",
+ "windows_aarch64_gnullvm 0.52.4",
+ "windows_aarch64_msvc 0.52.4",
+ "windows_i686_gnu 0.52.4",
+ "windows_i686_msvc 0.52.4",
+ "windows_x86_64_gnu 0.52.4",
+ "windows_x86_64_gnullvm 0.52.4",
+ "windows_x86_64_msvc 0.52.4",
 ]
 ]
 
 
 [[package]]
 [[package]]
@@ -1140,9 +1137,9 @@ checksum = "2b38e32f0abccf9987a4e3079dfb67dcd799fb61361e53e2882c3cbaf0d905d8"
 
 
 [[package]]
 [[package]]
 name = "windows_aarch64_gnullvm"
 name = "windows_aarch64_gnullvm"
-version = "0.52.0"
+version = "0.52.4"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "cb7764e35d4db8a7921e09562a0304bf2f93e0a51bfccee0bd0bb0b666b015ea"
+checksum = "bcf46cf4c365c6f2d1cc93ce535f2c8b244591df96ceee75d8e83deb70a9cac9"
 
 
 [[package]]
 [[package]]
 name = "windows_aarch64_msvc"
 name = "windows_aarch64_msvc"
@@ -1152,9 +1149,9 @@ checksum = "dc35310971f3b2dbbf3f0690a219f40e2d9afcf64f9ab7cc1be722937c26b4bc"
 
 
 [[package]]
 [[package]]
 name = "windows_aarch64_msvc"
 name = "windows_aarch64_msvc"
-version = "0.52.0"
+version = "0.52.4"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "bbaa0368d4f1d2aaefc55b6fcfee13f41544ddf36801e793edbbfd7d7df075ef"
+checksum = "da9f259dd3bcf6990b55bffd094c4f7235817ba4ceebde8e6d11cd0c5633b675"
 
 
 [[package]]
 [[package]]
 name = "windows_i686_gnu"
 name = "windows_i686_gnu"
@@ -1164,9 +1161,9 @@ checksum = "a75915e7def60c94dcef72200b9a8e58e5091744960da64ec734a6c6e9b3743e"
 
 
 [[package]]
 [[package]]
 name = "windows_i686_gnu"
 name = "windows_i686_gnu"
-version = "0.52.0"
+version = "0.52.4"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "a28637cb1fa3560a16915793afb20081aba2c92ee8af57b4d5f28e4b3e7df313"
+checksum = "b474d8268f99e0995f25b9f095bc7434632601028cf86590aea5c8a5cb7801d3"
 
 
 [[package]]
 [[package]]
 name = "windows_i686_msvc"
 name = "windows_i686_msvc"
@@ -1176,9 +1173,9 @@ checksum = "8f55c233f70c4b27f66c523580f78f1004e8b5a8b659e05a4eb49d4166cca406"
 
 
 [[package]]
 [[package]]
 name = "windows_i686_msvc"
 name = "windows_i686_msvc"
-version = "0.52.0"
+version = "0.52.4"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "ffe5e8e31046ce6230cc7215707b816e339ff4d4d67c65dffa206fd0f7aa7b9a"
+checksum = "1515e9a29e5bed743cb4415a9ecf5dfca648ce85ee42e15873c3cd8610ff8e02"
 
 
 [[package]]
 [[package]]
 name = "windows_x86_64_gnu"
 name = "windows_x86_64_gnu"
@@ -1188,9 +1185,9 @@ checksum = "53d40abd2583d23e4718fddf1ebec84dbff8381c07cae67ff7768bbf19c6718e"
 
 
 [[package]]
 [[package]]
 name = "windows_x86_64_gnu"
 name = "windows_x86_64_gnu"
-version = "0.52.0"
+version = "0.52.4"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "3d6fa32db2bc4a2f5abeacf2b69f7992cd09dca97498da74a151a3132c26befd"
+checksum = "5eee091590e89cc02ad514ffe3ead9eb6b660aedca2183455434b93546371a03"
 
 
 [[package]]
 [[package]]
 name = "windows_x86_64_gnullvm"
 name = "windows_x86_64_gnullvm"
@@ -1200,9 +1197,9 @@ checksum = "0b7b52767868a23d5bab768e390dc5f5c55825b6d30b86c844ff2dc7414044cc"
 
 
 [[package]]
 [[package]]
 name = "windows_x86_64_gnullvm"
 name = "windows_x86_64_gnullvm"
-version = "0.52.0"
+version = "0.52.4"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "1a657e1e9d3f514745a572a6846d3c7aa7dbe1658c056ed9c3344c4109a6949e"
+checksum = "77ca79f2451b49fa9e2af39f0747fe999fcda4f5e241b2898624dca97a1f2177"
 
 
 [[package]]
 [[package]]
 name = "windows_x86_64_msvc"
 name = "windows_x86_64_msvc"
@@ -1212,15 +1209,15 @@ checksum = "ed94fce61571a4006852b7389a063ab983c02eb1bb37b47f8272ce92d06d9538"
 
 
 [[package]]
 [[package]]
 name = "windows_x86_64_msvc"
 name = "windows_x86_64_msvc"
-version = "0.52.0"
+version = "0.52.4"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "dff9641d1cd4be8d1a070daf9e3773c5f67e78b4d9d42263020c057706765c04"
+checksum = "32b752e52a2da0ddfbdbcc6fceadfeede4c939ed16d13e648833a61dfb611ed8"
 
 
 [[package]]
 [[package]]
 name = "xitca-codegen"
 name = "xitca-codegen"
-version = "0.1.0"
+version = "0.2.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "185ec568d3620ab5129371e5fbfbc8c7b2791f10ed4e0ff216f8784cd896127c"
+checksum = "866906a5f280481ef022ccdec1640730550304bb86b016815d9982fde2f48e3e"
 dependencies = [
 dependencies = [
  "quote",
  "quote",
  "syn",
  "syn",
@@ -1228,9 +1225,9 @@ dependencies = [
 
 
 [[package]]
 [[package]]
 name = "xitca-http"
 name = "xitca-http"
-version = "0.2.0"
+version = "0.4.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "23b15032fe8f95903c5d16d49dbf3e336c0fd7b558acc9ca385e992651b522b4"
+checksum = "8b1982607f88759de96dec5b234e9cf835d6a4995e395b65a201e656d0641974"
 dependencies = [
 dependencies = [
  "futures-core",
  "futures-core",
  "http",
  "http",
@@ -1238,7 +1235,7 @@ dependencies = [
  "httpdate",
  "httpdate",
  "itoa",
  "itoa",
  "pin-project-lite",
  "pin-project-lite",
- "socket2 0.5.5",
+ "socket2 0.5.6",
  "tokio",
  "tokio",
  "tokio-uring",
  "tokio-uring",
  "tracing",
  "tracing",
@@ -1250,9 +1247,9 @@ dependencies = [
 
 
 [[package]]
 [[package]]
 name = "xitca-io"
 name = "xitca-io"
-version = "0.1.0"
+version = "0.2.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "e887cc8153538637515e0663704f3492803c5bb48eb7947c80689154d965b7e0"
+checksum = "7581c12bcd8b2194271da2117926a577e4e974ab840c7a3f61cc187a249335c8"
 dependencies = [
 dependencies = [
  "bytes",
  "bytes",
  "tokio",
  "tokio",
@@ -1263,7 +1260,7 @@ dependencies = [
 [[package]]
 [[package]]
 name = "xitca-postgres"
 name = "xitca-postgres"
 version = "0.1.0"
 version = "0.1.0"
-source = "git+https://github.com/HFQR/xitca-web.git?rev=d79f510197e0f36534fe22b7a467c55dbd683681#d79f510197e0f36534fe22b7a467c55dbd683681"
+source = "git+https://github.com/HFQR/xitca-web.git?rev=13a28c51a2f6e23e4e6b96e508a8af1e5e79d0b6#13a28c51a2f6e23e4e6b96e508a8af1e5e79d0b6"
 dependencies = [
 dependencies = [
  "fallible-iterator",
  "fallible-iterator",
  "percent-encoding",
  "percent-encoding",
@@ -1287,11 +1284,11 @@ dependencies = [
 
 
 [[package]]
 [[package]]
 name = "xitca-server"
 name = "xitca-server"
-version = "0.1.0"
+version = "0.2.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "d07160d17cf458adf7f38a2f7bc37ecb15732909683e614bcfe5f6ac8202bda5"
+checksum = "f48656d81909cb34fcf01288cdaf2bd77848d0f0dcbdc0c429b6ed376f78f4d5"
 dependencies = [
 dependencies = [
- "socket2 0.5.5",
+ "socket2 0.5.6",
  "tokio",
  "tokio",
  "tokio-uring",
  "tokio-uring",
  "tracing",
  "tracing",
@@ -1308,9 +1305,9 @@ checksum = "09a4a38548b14925111dd99560f0a10d1eb9e3e117fa5471c35387ed6f77b58c"
 
 
 [[package]]
 [[package]]
 name = "xitca-unsafe-collection"
 name = "xitca-unsafe-collection"
-version = "0.1.0"
+version = "0.1.1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "c38c5b92c72ba986bb2c2f4fc40ec56e841194773c02278f3c8d4c9733807270"
+checksum = "552a6bf21a5d0dc470644cb3b99f98f44bd414cd6fcca74610465d8196b1d23e"
 dependencies = [
 dependencies = [
  "bytes",
  "bytes",
 ]
 ]
@@ -1337,14 +1334,14 @@ dependencies = [
  "xitca-server",
  "xitca-server",
  "xitca-service",
  "xitca-service",
  "xitca-unsafe-collection",
  "xitca-unsafe-collection",
- "xitca-web 0.2.1",
+ "xitca-web 0.4.0",
 ]
 ]
 
 
 [[package]]
 [[package]]
 name = "xitca-web"
 name = "xitca-web"
-version = "0.2.1"
+version = "0.4.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "f26cb7ab5765524ce47a8c173e16a3184146440e3de3c240ba0d14a617b0f090"
+checksum = "cfae98470b0f55b978ebdccd1317a658590aa6fecec4db61522adaa8e6943747"
 dependencies = [
 dependencies = [
  "futures-core",
  "futures-core",
  "http-body",
  "http-body",

+ 6 - 6
frameworks/Rust/xitca-web/Cargo.toml

@@ -51,18 +51,18 @@ io-uring = ["xitca-http/io-uring", "xitca-server/io-uring"]
 axum = ["dep:axum", "http-body", "tower", "tower-http", "xitca-web/tower-http-compat" ]
 axum = ["dep:axum", "http-body", "tower", "tower-http", "xitca-web/tower-http-compat" ]
 
 
 [dependencies]
 [dependencies]
-xitca-http = "0.2"
-xitca-io = "0.1"
-xitca-server = "0.1"
+xitca-http = "0.4"
+xitca-io = "0.2"
+xitca-server = "0.2"
 xitca-service = "0.1"
 xitca-service = "0.1"
-xitca-unsafe-collection = "0.1"
+xitca-unsafe-collection = "0.1.1"
 
 
 atoi = "2"
 atoi = "2"
 serde = { version = "1" }
 serde = { version = "1" }
 serde_json = { version = "1" }
 serde_json = { version = "1" }
 
 
 # web optional
 # web optional
-xitca-web = { version = "0.2", features = ["json"], optional = true }
+xitca-web = { version = "0.4", features = ["json"], optional = true }
 
 
 # raw-pg optional
 # raw-pg optional
 xitca-postgres = { version = "0.1", optional = true }
 xitca-postgres = { version = "0.1", optional = true }
@@ -92,5 +92,5 @@ codegen-units = 1
 panic = "abort"
 panic = "abort"
 
 
 [patch.crates-io]
 [patch.crates-io]
-xitca-postgres = { git = "https://github.com/HFQR/xitca-web.git", rev = "d79f510197e0f36534fe22b7a467c55dbd683681" }
+xitca-postgres = { git = "https://github.com/HFQR/xitca-web.git", rev = "13a28c51a2f6e23e4e6b96e508a8af1e5e79d0b6" }
 mio = { git = "https://github.com/fakeshadow/mio.git", rev = "52b72d372bfe5807755b7f5e3e1edf282954d6ba" }
 mio = { git = "https://github.com/fakeshadow/mio.git", rev = "52b72d372bfe5807755b7f5e3e1edf282954d6ba" }

+ 15 - 26
frameworks/Rust/xitca-web/src/db.rs

@@ -1,6 +1,8 @@
-use std::{collections::HashMap, fmt::Write, future::IntoFuture};
+use std::{collections::HashMap, fmt::Write};
 
 
-use xitca_postgres::{statement::Statement, AsyncLendingIterator, Postgres};
+use xitca_postgres::{
+    pipeline::Pipeline, statement::Statement, AsyncLendingIterator, SharedClient,
+};
 use xitca_unsafe_collection::no_hash::NoHashBuilder;
 use xitca_unsafe_collection::no_hash::NoHashBuilder;
 
 
 use super::{
 use super::{
@@ -9,7 +11,7 @@ use super::{
 };
 };
 
 
 pub struct Client {
 pub struct Client {
-    client: xitca_postgres::Client,
+    client: SharedClient,
     #[cfg(not(feature = "pg-sync"))]
     #[cfg(not(feature = "pg-sync"))]
     rng: std::cell::RefCell<Rand>,
     rng: std::cell::RefCell<Rand>,
     #[cfg(feature = "pg-sync")]
     #[cfg(feature = "pg-sync")]
@@ -19,27 +21,14 @@ pub struct Client {
     updates: HashMap<u16, Statement, NoHashBuilder>,
     updates: HashMap<u16, Statement, NoHashBuilder>,
 }
 }
 
 
-impl Drop for Client {
-    fn drop(&mut self) {
-        drop(self.fortune.clone().into_guarded(&self.client));
-        drop(self.world.clone().into_guarded(&self.client));
-        for (_, stmt) in std::mem::take(&mut self.updates) {
-            drop(stmt.into_guarded(&self.client))
-        }
-    }
-}
-
 pub async fn create() -> HandleResult<Client> {
 pub async fn create() -> HandleResult<Client> {
-    let (client, driver) = Postgres::new(DB_URL.to_string()).connect().await?;
-
-    tokio::spawn(tokio::task::unconstrained(driver.into_future()));
+    let mut client = SharedClient::new(DB_URL.to_string()).await?;
 
 
-    let fortune = client.prepare("SELECT * FROM fortune", &[]).await?.leak();
+    let fortune = client.prepare_cached("SELECT * FROM fortune", &[]).await?;
 
 
     let world = client
     let world = client
-        .prepare("SELECT * FROM world WHERE id=$1", &[])
-        .await?
-        .leak();
+        .prepare_cached("SELECT * FROM world WHERE id=$1", &[])
+        .await?;
 
 
     let mut updates = HashMap::default();
     let mut updates = HashMap::default();
 
 
@@ -59,7 +48,7 @@ pub async fn create() -> HandleResult<Client> {
         q.pop();
         q.pop();
         q.push(')');
         q.push(')');
 
 
-        let st = client.prepare(&q, &[]).await?.leak();
+        let st = client.prepare_cached(&q, &[]).await?;
         updates.insert(num, st);
         updates.insert(num, st);
     }
     }
 
 
@@ -94,11 +83,11 @@ impl Client {
             .try_next()
             .try_next()
             .await?
             .await?
             .map(|row| World::new(row.get_raw(0), row.get_raw(1)))
             .map(|row| World::new(row.get_raw(0), row.get_raw(1)))
-            .ok_or_else(|| format!("World does not exist").into())
+            .ok_or_else(|| "World does not exist".into())
     }
     }
 
 
     pub async fn get_worlds(&self, num: u16) -> HandleResult<Vec<World>> {
     pub async fn get_worlds(&self, num: u16) -> HandleResult<Vec<World>> {
-        let mut pipe = self.client.pipeline();
+        let mut pipe = Pipeline::new();
 
 
         {
         {
             let mut rng = self.borrow_rng_mut();
             let mut rng = self.borrow_rng_mut();
@@ -108,7 +97,7 @@ impl Client {
         let mut worlds = Vec::new();
         let mut worlds = Vec::new();
         worlds.reserve(num as usize);
         worlds.reserve(num as usize);
 
 
-        let mut res = pipe.run().await?;
+        let mut res = self.client.pipeline(pipe).await?;
         while let Some(mut item) = res.try_next().await? {
         while let Some(mut item) = res.try_next().await? {
             while let Some(row) = item.try_next().await? {
             while let Some(row) = item.try_next().await? {
                 worlds.push(World::new(row.get_raw(0), row.get_raw(1)))
                 worlds.push(World::new(row.get_raw(0), row.get_raw(1)))
@@ -124,7 +113,7 @@ impl Client {
         let mut params = Vec::new();
         let mut params = Vec::new();
         params.reserve(len * 3);
         params.reserve(len * 3);
 
 
-        let mut pipe = self.client.pipeline();
+        let mut pipe = Pipeline::new();
 
 
         {
         {
             let mut rng = self.borrow_rng_mut();
             let mut rng = self.borrow_rng_mut();
@@ -144,7 +133,7 @@ impl Client {
         worlds.reserve(len);
         worlds.reserve(len);
         let mut r_ids = params.into_iter().skip(1).step_by(2);
         let mut r_ids = params.into_iter().skip(1).step_by(2);
 
 
-        let mut res = pipe.run().await?;
+        let mut res = self.client.pipeline(pipe).await?;
         while let Some(mut item) = res.try_next().await? {
         while let Some(mut item) = res.try_next().await? {
             while let Some(row) = item.try_next().await? {
             while let Some(row) = item.try_next().await? {
                 let r_id = r_ids.next().unwrap();
                 let r_id = r_ids.next().unwrap();

+ 4 - 13
frameworks/Rust/xitca-web/src/main_sync.rs

@@ -6,8 +6,9 @@ mod util;
 use serde::Serialize;
 use serde::Serialize;
 use xitca_web::{
 use xitca_web::{
     codegen::route,
     codegen::route,
-    handler::{html::Html, json::Json, query::Query, state::StateOwn},
+    handler::{html::Html, json::Json, query::Query, state::StateOwn, text::Text},
     http::{header::SERVER, WebResponse},
     http::{header::SERVER, WebResponse},
+    route::get,
     App,
     App,
 };
 };
 
 
@@ -18,8 +19,8 @@ use util::{HandleResult, SERVER_HEADER_VALUE};
 fn main() -> std::io::Result<()> {
 fn main() -> std::io::Result<()> {
     App::new()
     App::new()
         .with_state(db_diesel::create()?)
         .with_state(db_diesel::create()?)
-        .at_typed(plaintext)
-        .at_typed(json)
+        .at("/plaintext", get(Text("Hello, World!")))
+        .at("/json", get(Json(ser::Message::new())))
         .at_typed(db)
         .at_typed(db)
         .at_typed(fortunes)
         .at_typed(fortunes)
         .at_typed(queries)
         .at_typed(queries)
@@ -36,16 +37,6 @@ fn header(mut res: WebResponse) -> WebResponse {
     res
     res
 }
 }
 
 
-#[route("/plaintext", method = get)]
-fn plaintext() -> &'static str {
-    "Hello, World!"
-}
-
-#[route("/json", method = get)]
-fn json() -> Json<ser::Message> {
-    Json(ser::Message::new())
-}
-
 #[route("/db", method = get)]
 #[route("/db", method = get)]
 fn db(StateOwn(pool): StateOwn<Pool>) -> HandleResult<Json<impl Serialize>> {
 fn db(StateOwn(pool): StateOwn<Pool>) -> HandleResult<Json<impl Serialize>> {
     pool.get_world().map(Json)
     pool.get_world().map(Json)

+ 12 - 27
frameworks/Rust/xitca-web/src/main_wasm.rs

@@ -1,46 +1,31 @@
 mod ser;
 mod ser;
 mod util;
 mod util;
 
 
-use std::{env, io, net::TcpListener, os::wasi::io::FromRawFd};
-
 use xitca_web::{
 use xitca_web::{
-    handler::{handler_service, json::Json},
+    handler::{json::Json, text::Text},
     http::{header::SERVER, WebResponse},
     http::{header::SERVER, WebResponse},
     route::get,
     route::get,
-    service::Service,
-    App, WebContext,
+    App,
 };
 };
 
 
-fn main() -> io::Result<()> {
-    let fd = env::var("FD_COUNT")
+fn main() -> std::io::Result<()> {
+    let listener = std::env::var("FD_COUNT")
         .ok()
         .ok()
-        .and_then(|var| var.parse().ok())
+        .and_then(|v| v.parse().ok())
+        .map(|fd| unsafe { std::os::wasi::io::FromRawFd::from_raw_fd(fd) })
         .expect("failed to parse FD_COUNT env");
         .expect("failed to parse FD_COUNT env");
 
 
-    let listener = unsafe { TcpListener::from_raw_fd(fd) };
-
     App::new()
     App::new()
-        .at(
-            "/json",
-            get(handler_service(|| async { Json(ser::Message::new()) })),
-        )
-        .at(
-            "/plaintext",
-            get(handler_service(|| async { "Hello, World!" })),
-        )
-        .enclosed_fn(middleware_fn)
+        .at("/json", get(Json(ser::Message::new())))
+        .at("/plaintext", get(Text("Hello, World!")))
+        .map(header)
         .serve()
         .serve()
         .listen(listener)?
         .listen(listener)?
         .run()
         .run()
         .wait()
         .wait()
 }
 }
 
 
-async fn middleware_fn<S, E>(service: &S, ctx: WebContext<'_>) -> Result<WebResponse, E>
-where
-    S: for<'r> Service<WebContext<'r>, Response = WebResponse, Error = E>,
-{
-    service.call(ctx).await.map(|mut res| {
-        res.headers_mut().append(SERVER, util::SERVER_HEADER_VALUE);
-        res
-    })
+fn header(mut res: WebResponse) -> WebResponse {
+    res.headers_mut().append(SERVER, util::SERVER_HEADER_VALUE);
+    res
 }
 }

+ 1 - 0
frameworks/Rust/xitca-web/src/ser.rs

@@ -11,6 +11,7 @@ use xitca_http::{
 
 
 use crate::util::Error;
 use crate::util::Error;
 
 
+#[derive(Clone)]
 pub struct Message {
 pub struct Message {
     message: &'static str,
     message: &'static str,
 }
 }

+ 1 - 1
frameworks/Rust/xitca-web/xitca-web-axum.dockerfile

@@ -1,4 +1,4 @@
-FROM rust:1.75
+FROM rust:1.76
 
 
 ADD ./ /xitca-web
 ADD ./ /xitca-web
 WORKDIR /xitca-web
 WORKDIR /xitca-web

+ 1 - 1
frameworks/Rust/xitca-web/xitca-web-iou.dockerfile

@@ -1,4 +1,4 @@
-FROM rust:1.75
+FROM rust:1.76
 
 
 ADD ./ /xitca-web
 ADD ./ /xitca-web
 WORKDIR /xitca-web
 WORKDIR /xitca-web

+ 1 - 1
frameworks/Rust/xitca-web/xitca-web-sync.dockerfile

@@ -1,4 +1,4 @@
-FROM rust:1.75
+FROM rust:1.76
 
 
 ADD ./ /xitca-web
 ADD ./ /xitca-web
 WORKDIR /xitca-web
 WORKDIR /xitca-web

+ 1 - 1
frameworks/Rust/xitca-web/xitca-web-wasm.dockerfile

@@ -1,7 +1,7 @@
 ARG WASMTIME_VERSION=15.0.0
 ARG WASMTIME_VERSION=15.0.0
 ARG WASM_TARGET=wasm32-wasi-preview1-threads
 ARG WASM_TARGET=wasm32-wasi-preview1-threads
 
 
-FROM rust:1.75 AS compile
+FROM rust:1.76 AS compile
 
 
 ARG WASMTIME_VERSION
 ARG WASMTIME_VERSION
 ARG WASM_TARGET
 ARG WASM_TARGET

+ 1 - 1
frameworks/Rust/xitca-web/xitca-web.dockerfile

@@ -1,4 +1,4 @@
-FROM rust:1.75
+FROM rust:1.76
 
 
 ADD ./ /xitca-web
 ADD ./ /xitca-web
 WORKDIR /xitca-web
 WORKDIR /xitca-web

+ 1 - 1
frameworks/TypeScript/nest/package.json

@@ -32,7 +32,7 @@
     "reflect-metadata": "0.1.13",
     "reflect-metadata": "0.1.13",
     "rimraf": "3.0.2",
     "rimraf": "3.0.2",
     "rxjs": "6.5.4",
     "rxjs": "6.5.4",
-    "typeorm": "0.2.29"
+    "typeorm": "0.3.0"
   },
   },
   "devDependencies": {
   "devDependencies": {
     "@nestjs/cli": "7.5.3",
     "@nestjs/cli": "7.5.3",

+ 2 - 0
frameworks/Zig/zap/.gitignore

@@ -0,0 +1,2 @@
+zig-cache/**/*',
+zig-out: 'zig-out/**/*',

+ 25 - 0
frameworks/Zig/zap/README.md

@@ -0,0 +1,25 @@
+
+# [Zap](https://github.com/zigzap/zap) - Blazingly fast backends in zig
+
+## Description
+
+Zap is the zig microframework for web applications.
+
+## Test URLs
+
+### Test 1: JSON Encoding
+
+    http://localhost:3000/json
+
+### Test 2: Plaintext
+
+    http://localhost:3000/plaintext
+
+### Test 2: Single Row Query
+
+    http://localhost:3000/db
+
+### Test 4: Fortunes (Template rendering)
+
+    http://localhost:3000/fortunes
+

+ 26 - 0
frameworks/Zig/zap/benchmark_config.json

@@ -0,0 +1,26 @@
+{
+  "framework": "zap",
+  "tests": [{
+    "default": {
+      "json_url": "/json",
+      "db_url": "/db",
+      "fortune_url": "/fortunes",
+      "plaintext_url": "/plaintext",
+      "port": 3000,
+      "approach": "Realistic",
+      "classification": "Fullstack",
+      "database": "Postgres",
+      "framework": "Zap",
+      "language": "Zig",
+      "flavor": "None",
+      "orm": "Full",
+      "platform": "None",
+      "webserver": "None",
+      "os": "Linux",
+      "database_os": "Linux",
+      "display_name": "Zap (Zig)",
+      "notes": "",
+      "versus": ""
+    }
+  }]
+}

+ 99 - 0
frameworks/Zig/zap/build.zig

@@ -0,0 +1,99 @@
+const std = @import("std");
+
+// Although this function looks imperative, note that its job is to
+// declaratively construct a build graph that will be executed by an external
+// runner.
+pub fn build(b: *std.Build) void {
+    // Standard target options allows the person running `zig build` to choose
+    // what target to build for. Here we do not override the defaults, which
+    // means any target is allowed, and the default is native. Other options
+    // for restricting supported target set are available.
+    const target = b.standardTargetOptions(.{});
+
+    // Standard optimization options allow the person running `zig build` to select
+    // between Debug, ReleaseSafe, ReleaseFast, and ReleaseSmall. Here we do nots
+    // set a preferred release mode, allowing the user to decide how to optimize.
+    const optimize = b.standardOptimizeOption(.{});
+
+    const exe = b.addExecutable(.{
+        .name = "zap",
+        // In this case the main source file is merely a path, however, in more
+        // complicated build scripts, this could be a generated file.
+        .root_source_file = .{ .path = "src/main.zig" },
+        .target = target,
+        .optimize = optimize,
+    });
+
+    //exe.addPackagePath("random", "src/random.zig");
+
+    const zap = b.dependency("zap", .{
+        .target = target,
+        .optimize = optimize,
+        .openssl = false, // set to true to enable TLS support
+    });
+    exe.addModule("zap", zap.module("zap"));
+
+    const pg = b.dependency("pg", .{
+        .target = target,
+        .optimize = optimize,
+    });
+    exe.addModule("pg", pg.module("pg"));
+
+    const dig = b.dependency("dig", .{
+    .target = target,
+    .optimize = optimize,
+    });
+    exe.addModule("dns", dig.module("dns"));
+
+    // const mustache = b.dependency("mustache", .{
+    //     .target = target,
+    //     .optimize = optimize,
+    // });
+    // exe.addModule("mustache", mustache.module("mustache"));
+
+    exe.linkLibrary(zap.artifact("facil.io"));
+
+    // This declares intent for the executable to be installed into the
+    // standard location when the user invokes the "install" step (the default
+    // step when running `zig build`).
+    b.installArtifact(exe);
+
+    // This *creates* a Run step in the build graph, to be executed when another
+    // step is evaluated that depends on it. The next line below will establish
+    // such a dependency.
+    const run_cmd = b.addRunArtifact(exe);
+
+    // By making the run step depend on the install step, it will be run from the
+    // installation directory rather than directly from within the cache directory.
+    // This is not necessary, however, if the application depends on other installed
+    // files, this ensures they will be present and in the expected location.
+    run_cmd.step.dependOn(b.getInstallStep());
+
+    // This allows the user to pass arguments to the application in the build
+    // command itself, like this: `zig build run -- arg1 arg2 etc`
+    if (b.args) |args| {
+        run_cmd.addArgs(args);
+    }
+
+    // This creates a build step. It will be visible in the `zig build --help` menu,
+    // and can be selected like this: `zig build run`
+    // This will evaluate the `run` step rather than the default, which is "install".
+    const run_step = b.step("run", "Run the app");
+    run_step.dependOn(&run_cmd.step);
+
+    // Creates a step for unit testing. This only builds the test executable
+    // but does not run it.
+    const unit_tests = b.addTest(.{
+        .root_source_file = .{ .path = "src/main.zig" },
+        .target = target,
+        .optimize = optimize,
+    });
+
+    const run_unit_tests = b.addRunArtifact(unit_tests);
+
+    // Similar to creating the run step earlier, this exposes a `test` step to
+    // the `zig build --help` menu, providing a way for the user to request
+    // running the unit tests.
+    const test_step = b.step("test", "Run unit tests");
+    test_step.dependOn(&run_unit_tests.step);
+}

+ 21 - 0
frameworks/Zig/zap/build.zig.zon

@@ -0,0 +1,21 @@
+.{
+    .name = "Zap testing",
+    .version = "0.1.0",
+
+    .dependencies = .{
+        // zap v0.5.1
+        .zap = .{
+            .url = "https://github.com/zigzap/zap/archive/refs/tags/v0.5.1.tar.gz",
+            .hash = "1220d4802fb09d4e99c0e7265f90d6f3cfdc3e5e31c1b05f0924ee2dd26d9d6dbbf4",
+        },
+        .pg = .{
+            .url = "https://github.com/karlseguin/pg.zig/archive/f3f4a0b3b9996bfb1bf9bd0bdd0d73b36e915a86.tar.gz",
+            .hash = "1220337202642ee66408a35f254549f22cf3a096c6fa6c28e6f87a0161d5a6c0f4ab"
+        },
+        .dig = .{
+            .url = "https://github.com/lun-4/zigdig/archive/2ec407ec3c7f347e747717977958e9ba339eb82f.tar.gz",
+            .hash = "1220dfdb3089dfe9a4e4bc1226fcff08d91d0c0853f287d98d8b81270da251790331"
+        },
+
+    }
+}

+ 3 - 0
frameworks/Zig/zap/run.sh

@@ -0,0 +1,3 @@
+echo "Waiting for ZAP to start..."
+
+zap

+ 334 - 0
frameworks/Zig/zap/src/endpoints.zig

@@ -0,0 +1,334 @@
+const std = @import("std");
+const zap = @import("zap");
+const pg = @import("pg");
+
+const Mustache = @import("zap").Mustache;
+const Thread = std.Thread;
+const Mutex = Thread.Mutex;
+
+const middleware = @import("middleware.zig");
+
+const Message = struct {
+    message: []const u8,
+};
+
+const World = struct {
+    id: i32,
+    randomNumber: i32,
+};
+
+const Fortune = struct {
+    id: i32,
+    message: []const u8,
+};
+
+pub const FortunesEndpoint = struct {
+    ep: zap.Endpoint = undefined,
+    mustache: Mustache,
+    mutex: Mutex,
+
+    const Self = @This();
+
+    pub fn init() Self {
+        const template = "<!DOCTYPE html><html><head><title>Fortunes</title></head><body><table><tr><th>id</th><th>message</th></tr>{{#fortunes}}<tr><td>{{id}}</td><td>{{message}}</td></tr>{{/fortunes}}</table></body></html>";
+        const mustache = Mustache.fromData(template) catch unreachable;
+
+        return .{
+            .ep = zap.Endpoint.init(.{
+                .path = "/fortunes",
+                .get = get,
+            }),
+            .mustache = mustache,
+            .mutex = Mutex{},
+        };
+    }
+
+    pub fn deinit(self: *Self) void {
+        self.mustache.deinit();
+    }
+
+    pub fn endpoint(self: *Self) *zap.Endpoint {
+        return &self.ep;
+    }
+
+    fn compareStrings(_: void, lhs: []const u8, rhs: []const u8) bool {
+        return std.mem.order(u8, lhs, rhs).compare(std.math.CompareOperator.lt);
+    }
+
+    fn cmpFortuneByMessage(_: void, a: Fortune, b: Fortune) bool {
+        return std.mem.order(u8, a.message, b.message).compare(std.math.CompareOperator.lt);
+    }
+
+    fn getFortunes(pool: *pg.Pool) ![]const Fortune {
+        var conn = try pool.acquire();
+        defer conn.release();
+
+        var rows = try conn.query("SELECT id, message FROM Fortune", .{});
+        rows.deinit();
+
+        var fortunes = std.ArrayList(Fortune).init(middleware.SharedAllocator.getAllocator());
+        defer fortunes.deinit();
+
+        while (try rows.next()) |row| {
+            var fortune = Fortune{ .id = row.get(i32, 0), .message = row.get([]const u8, 1) };
+            _ = try fortunes.append(fortune);
+        }
+
+        var fortune = Fortune{ .id = 0, .message = "Additional fortune added at request time." };
+        _ = try fortunes.append(fortune);
+
+        var fortunes_slice = try fortunes.toOwnedSlice();
+        std.mem.sort(Fortune, fortunes_slice, {}, cmpFortuneByMessage);
+
+        return fortunes_slice;
+    }
+
+    fn getFortunesHtml(self: *Self, pool: *pg.Pool) ![]const u8 {
+        var fortunes = try getFortunes(pool);
+
+        self.mutex.lock();
+        const ret = self.mustache.build(.{ .fortunes = fortunes });
+        defer ret.deinit();
+        self.mutex.unlock();
+
+        const raw = ret.str().?;
+
+        // std.debug.print("mustache output {s}\n", .{raw});
+
+        var html = try deescapeHtml(raw);
+
+        // std.debug.print("html output {s}\n", .{html});
+
+        return html;
+    }
+
+    pub fn get(ep: *zap.Endpoint, req: zap.Request) void {
+        const self = @fieldParentPtr(Self, "ep", ep);
+
+        if (!checkPath(ep, req)) return;
+
+        req.setHeader("content-type", "text/html; charset=utf-8") catch return;
+
+        var pool: *pg.Pool = undefined;
+
+        const maybe_context: ?*middleware.Context = req.getUserContext(middleware.Context);
+        if (maybe_context) |context| {
+            if (context.pg) |cpg| {
+                pool = cpg.pool;
+            }
+        }
+
+        var fortunes_html = getFortunesHtml(self, pool) catch return;
+
+        req.sendBody(fortunes_html) catch return;
+
+        return;
+    }
+};
+
+pub const DbEndpoint = struct {
+    ep: zap.Endpoint = undefined,
+    mutex: Mutex,
+    const Self = @This();
+
+    pub fn init() Self {
+        return .{
+            .ep = zap.Endpoint.init(.{
+                .path = "/db",
+                .get = get,
+            }),
+            .mutex = Mutex{},
+        };
+    }
+
+    pub fn endpoint(self: *Self) *zap.Endpoint {
+        return &self.ep;
+    }
+
+    pub fn get(ep: *zap.Endpoint, req: zap.Request) void {
+        const self = @fieldParentPtr(Self, "ep", ep);
+
+        if (!checkPath(ep, req)) return;
+
+        req.setContentType(.JSON) catch return;
+
+        var random_number: u32 = 0;
+        var pool: *pg.Pool = undefined;
+
+        const maybe_context: ?*middleware.Context = req.getUserContext(middleware.Context);
+        if (maybe_context) |context| {
+            if (context.prng) |prng| {
+                if (context.pg) |cpg| {
+                    pool = cpg.pool;
+
+                    self.mutex.lock();
+                    random_number = 1 + (prng.rnd.random().uintAtMost(u32, 9999));
+                    self.mutex.unlock();
+                }
+            }
+        }
+
+        // std.debug.print("Attempting to return random: {}\n", .{random_number});
+
+        if (random_number == 0) {
+            return;
+        }
+
+        var conn = pool.acquire() catch return;
+        defer conn.release();
+
+        var row_result = conn.row("SELECT id, randomNumber FROM World WHERE id = $1", .{random_number}) catch |err| {
+            std.debug.print("Error querying database: {}\n", .{err});
+            return;
+        };
+        var row = row_result.?;
+        defer row.deinit();
+
+        var world = World{ .id = row.get(i32, 0), .randomNumber = row.get(i32, 1) };
+
+        var buf: [100]u8 = undefined;
+        var json_to_send: []const u8 = undefined;
+        if (zap.stringifyBuf(&buf, world, .{})) |json_message| {
+            json_to_send = json_message;
+        } else {
+            json_to_send = "null";
+        }
+
+        req.sendBody(json_to_send) catch return;
+
+        return;
+    }
+};
+
+pub const PlaintextEndpoint = struct {
+    ep: zap.Endpoint = undefined,
+    const Self = @This();
+
+    pub fn init() Self {
+        return .{
+            .ep = zap.Endpoint.init(.{
+                .path = "/plaintext",
+                .get = get,
+            }),
+        };
+    }
+
+    pub fn endpoint(self: *Self) *zap.Endpoint {
+        return &self.ep;
+    }
+
+    pub fn get(ep: *zap.Endpoint, req: zap.Request) void {
+        const self = @fieldParentPtr(Self, "ep", ep);
+        _ = self;
+
+        if (!checkPath(ep, req)) return;
+
+        req.setContentType(.TEXT) catch return;
+
+        req.sendBody("Hello, World!") catch return;
+        return;
+    }
+};
+
+pub const JsonEndpoint = struct {
+    ep: zap.Endpoint = undefined,
+    const Self = @This();
+
+    pub fn init() Self {
+        return .{
+            .ep = zap.Endpoint.init(.{
+                .path = "/json",
+                .get = get,
+            }),
+        };
+    }
+
+    pub fn endpoint(self: *Self) *zap.Endpoint {
+        return &self.ep;
+    }
+
+    pub fn get(ep: *zap.Endpoint, req: zap.Request) void {
+        const self = @fieldParentPtr(Self, "ep", ep);
+        _ = self;
+
+        if (!checkPath(ep, req)) return;
+
+        req.setContentType(.JSON) catch return;
+
+        var message = Message{ .message = "Hello, World!" };
+
+        var buf: [100]u8 = undefined;
+        var json_to_send: []const u8 = undefined;
+        if (zap.stringifyBuf(&buf, message, .{})) |json_message| {
+            json_to_send = json_message;
+        } else {
+            json_to_send = "null";
+        }
+
+        req.sendBody(json_to_send) catch return;
+        return;
+    }
+};
+
+fn checkPath(ep: *zap.Endpoint, req: zap.Request) bool {
+    if (!std.mem.eql(u8, ep.settings.path, req.path.?)) {
+        // std.debug.print("Path mismatch: {s} != {s}\n", .{ ep.settings.path, req.path.? });
+
+        return false;
+    }
+
+    // std.debug.print("Path match: {s} == {s}\n", .{ ep.settings.path, req.path.? });
+
+    return true;
+}
+
+fn deescapeHtml(input: []const u8) ![]const u8 {
+    var output = std.ArrayList(u8).init(middleware.SharedAllocator.getAllocator());
+    defer output.deinit();
+
+    var i: usize = 0;
+    while (i < input.len) {
+        if (std.mem.startsWith(u8, input[i..], "&#32;")) {
+            try output.append(' ');
+            i += 5;
+        } else if (std.mem.startsWith(u8, input[i..], "&#34;")) {
+            try output.append('"');
+            i += 5;
+        } else if (std.mem.startsWith(u8, input[i..], "&#38;")) {
+            try output.append('&');
+            i += 5;
+        } else if (std.mem.startsWith(u8, input[i..], "&#39;")) {
+            try output.append('\'');
+            i += 5;
+        } else if (std.mem.startsWith(u8, input[i..], "&#40;")) {
+            try output.append('(');
+            i += 5;
+        } else if (std.mem.startsWith(u8, input[i..], "&#41;")) {
+            try output.append(')');
+            i += 5;
+        } else if (std.mem.startsWith(u8, input[i..], "&#43;")) {
+            try output.append('+');
+            i += 5;
+        } else if (std.mem.startsWith(u8, input[i..], "&#44;")) {
+            try output.append(',');
+            i += 5;
+        } else if (std.mem.startsWith(u8, input[i..], "&#46;")) {
+            try output.append('.');
+            i += 5;
+        } else if (std.mem.startsWith(u8, input[i..], "&#47;")) {
+            try output.append('/');
+            i += 5;
+        } else if (std.mem.startsWith(u8, input[i..], "&#58;")) {
+            try output.append(':');
+            i += 5;
+        } else if (std.mem.startsWith(u8, input[i..], "&#59;")) {
+            try output.append(';');
+            i += 5;
+        } else {
+            try output.append(input[i]);
+            i += 1;
+        }
+    }
+
+    return output.toOwnedSlice();
+}

+ 95 - 0
frameworks/Zig/zap/src/main.zig

@@ -0,0 +1,95 @@
+const std = @import("std");
+const zap = @import("zap");
+const pg = @import("pg");
+const regex = @import("regex");
+const dns = @import("dns");
+const pool = @import("pool.zig");
+
+const endpoints = @import("endpoints.zig");
+const middleware = @import("middleware.zig");
+
+const RndGen = std.rand.DefaultPrng;
+const Allocator = std.mem.Allocator;
+const Pool = pg.Pool;
+
+pub fn main() !void {
+    var gpa = std.heap.GeneralPurposeAllocator(.{
+        .thread_safe = true,
+    }){};
+
+    var tsa = std.heap.ThreadSafeAllocator{
+        .child_allocator = gpa.allocator(),
+    };
+
+    var allocator = tsa.allocator();
+
+    var pg_pool = try pool.initPool(allocator);
+    defer pg_pool.deinit();
+
+    var rnd = std.rand.DefaultPrng.init(blk: {
+        var seed: u64 = undefined;
+        try std.os.getrandom(std.mem.asBytes(&seed));
+        break :blk seed;
+    });
+
+    middleware.SharedAllocator.init(allocator);
+
+    // create the endpoint
+    var dbEndpoint = endpoints.DbEndpoint.init();
+    var plaintextEndpoint = endpoints.PlaintextEndpoint.init();
+    var jsonEndpoint = endpoints.JsonEndpoint.init();
+    var fortunesEndpoint = endpoints.FortunesEndpoint.init();
+
+    // we wrap the endpoint with a middleware handler
+    var jsonEndpointHandler = zap.Middleware.EndpointHandler(middleware.Handler, middleware.Context).init(
+        jsonEndpoint.endpoint(), // the endpoint
+        null, // no other handler (we are the last in the chain)
+        false, // break on finish. See EndpointHandler for this. Not applicable here.
+    );
+
+    var plaintextEndpointHandler = zap.Middleware.EndpointHandler(middleware.Handler, middleware.Context).init(
+        plaintextEndpoint.endpoint(),
+        jsonEndpointHandler.getHandler(),
+        false,
+    );
+
+    var fortunesEndpointHandler = zap.Middleware.EndpointHandler(middleware.Handler, middleware.Context).init(
+        fortunesEndpoint.endpoint(), // the endpoint
+        plaintextEndpointHandler.getHandler(), // no other handler (we are the last in the chain)
+        false,
+    );
+
+    var dbEndpointHandler = zap.Middleware.EndpointHandler(middleware.Handler, middleware.Context).init(
+        dbEndpoint.endpoint(), // the endpoint
+        fortunesEndpointHandler.getHandler(), // no other handler (we are the last in the chain)
+        false,
+    );
+
+    var headerHandler = middleware.HeaderMiddleWare.init(dbEndpointHandler.getHandler());
+    var prngHandler = middleware.PrngMiddleWare.init(headerHandler.getHandler(), &rnd);
+    var pgHandler = middleware.PgMiddleWare.init(prngHandler.getHandler(), pg_pool);
+
+    var listener = try zap.Middleware.Listener(middleware.Context).init(
+        .{
+            .on_request = null, // must be null
+            .port = 3000,
+            .log = false,
+            .max_clients = 100000,
+        },
+        pgHandler.getHandler(),
+
+        middleware.SharedAllocator.getAllocator,
+    );
+    try listener.listen();
+
+    const cpuCount = @as(i16, @intCast(std.Thread.getCpuCount() catch 1));
+
+    std.debug.print("Listening on 0.0.0.0:3000 on {d} threads\n", .{cpuCount});
+
+    // start worker threads
+    zap.start(.{
+        .threads = 16 * cpuCount,
+        .workers = 1,
+    });
+}
+

+ 129 - 0
frameworks/Zig/zap/src/middleware.zig

@@ -0,0 +1,129 @@
+const std = @import("std");
+const zap = @import("zap");
+const pg = @import("pg");
+
+// just a way to share our allocator via callback
+pub const SharedAllocator = struct {
+    // static
+    var allocator: std.mem.Allocator = undefined;
+
+    const Self = @This();
+
+    // just a convenience function
+    pub fn init(a: std.mem.Allocator) void {
+        allocator = a;
+    }
+
+    // static function we can pass to the listener later
+    pub fn getAllocator() std.mem.Allocator {
+        return allocator;
+    }
+};
+
+// create a combined context struct
+pub const Context = struct {
+    prng: ?PrngMiddleWare.Prng = null,
+    pg: ?PgMiddleWare.Pg = null,
+};
+
+pub const Handler = zap.Middleware.Handler(Context);
+
+pub const HeaderMiddleWare = struct {
+    handler: Handler,
+
+    const Self = @This();
+
+    pub fn init(other: ?*Handler) Self {
+        return .{
+            .handler = Handler.init(onRequest, other),
+        };
+    }
+
+    // we need the handler as a common interface to chain stuff
+    pub fn getHandler(self: *Self) *Handler {
+        return &self.handler;
+    }
+
+    // note that the first parameter is of type *Handler, not *Self !!!
+    pub fn onRequest(handler: *Handler, req: zap.Request, context: *Context) bool {
+        // this is how we would get our self pointer
+        var self = @fieldParentPtr(Self, "handler", handler);
+        _ = self;
+
+        req.setHeader("Server", "Zap") catch return false;
+
+        // continue in the chain
+        return handler.handleOther(req, context);
+    }
+};
+
+pub const PrngMiddleWare = struct {
+    handler: Handler,
+    rnd: *std.rand.DefaultPrng,
+
+    const Self = @This();
+
+    const Prng = struct {
+        rnd: *std.rand.DefaultPrng = undefined,
+    };
+
+    pub fn init(other: ?*Handler, rnd: *std.rand.DefaultPrng) Self {
+        return .{
+            .handler = Handler.init(onRequest, other),
+            .rnd = rnd,
+        };
+    }
+
+    // we need the handler as a common interface to chain stuff
+    pub fn getHandler(self: *Self) *Handler {
+        return &self.handler;
+    }
+
+    // note that the first parameter is of type *Handler, not *Self !!!
+    pub fn onRequest(handler: *Handler, req: zap.Request, context: *Context) bool {
+
+        // this is how we would get our self pointer
+        var self = @fieldParentPtr(Self, "handler", handler);
+
+        context.prng = Prng{ .rnd = self.rnd };
+
+        // continue in the chain
+        return handler.handleOther(req, context);
+    }
+};
+
+pub const PgMiddleWare = struct {
+    handler: Handler,
+    pool: *pg.Pool,
+
+    const Self = @This();
+
+    const Pg = struct {
+        pool: *pg.Pool = undefined,
+    };
+
+    pub fn init(other: ?*Handler, pool: *pg.Pool) Self {
+        return .{
+            .handler = Handler.init(onRequest, other),
+            .pool = pool,
+        };
+    }
+
+    // we need the handler as a common interface to chain stuff
+    pub fn getHandler(self: *Self) *Handler {
+        return &self.handler;
+    }
+
+    // note that the first parameter is of type *Handler, not *Self !!!
+    pub fn onRequest(handler: *Handler, req: zap.Request, context: *Context) bool {
+
+        // this is how we would get our self pointer
+        var self = @fieldParentPtr(Self, "handler", handler);
+
+        // do our work: fill in the user field of the context
+        context.pg = Pg{ .pool = self.pool };
+
+        // continue in the chain
+        return handler.handleOther(req, context);
+    }
+};

+ 78 - 0
frameworks/Zig/zap/src/pool.zig

@@ -0,0 +1,78 @@
+const std = @import("std");
+const pg = @import("pg");
+const regex = @import("regex");
+const dns = @import("dns");
+
+const Allocator = std.mem.Allocator;
+const Pool = pg.Pool;
+const ArrayList = std.ArrayList;
+const Regex = regex.Regex;
+
+pub fn initPool(allocator: Allocator) !*pg.Pool {
+    const info = try parsePostgresConnStr();
+    std.debug.print("Cconnection info: {s}:{s}@{s}:{d}/{s}\n", .{ info.username, info.password, info.hostname, info.port, info.database });
+
+    const hostname = std.os.getenv("PG_HOST") orelse "localhost";
+    var addresses = try dns.helpers.getAddressList(hostname, allocator);
+    defer addresses.deinit();
+
+    var hostAddress = std.net.Address.parseIp("127.0.0.1", 0) catch unreachable;
+
+    for (addresses.addrs) |address| {
+        hostAddress = address;
+    }
+
+    std.debug.print("tfb hostname {}\n", .{hostAddress.in});
+
+    const host = try addressAsString(hostAddress);
+
+    var pg_pool = try Pool.init(allocator, .{ .size = 28, .connect = .{
+        .port = info.port,
+        .host = host,
+    }, .auth = .{
+        .username = info.username,
+        .database = info.database,
+        .password = info.password,
+    }, .timeout = 10_000,});
+
+    return pg_pool;
+}
+
+pub const ConnectionInfo = struct {
+    username: []const u8,
+    password: []const u8,
+    hostname: []const u8,
+    port: u16,
+    database: []const u8,
+};
+
+fn addressAsString(address: std.net.Address) ![]const u8 {
+    const bytes = @as(*const [4]u8, @ptrCast(&address.in.sa.addr));
+
+    var buffer: [256]u8 = undefined;
+    var source = std.io.StreamSource{ .buffer = std.io.fixedBufferStream(&buffer) };
+    var writer = source.writer();
+
+    //try writer.writeAll("Hello, World!");
+
+    try writer.print("{}.{}.{}.{}", .{
+        bytes[0],
+        bytes[1],
+        bytes[2],
+        bytes[3],
+    });
+
+    const output = source.buffer.getWritten();
+
+    return output;
+}
+
+fn parsePostgresConnStr() !ConnectionInfo {
+    return ConnectionInfo{
+        .username = std.os.getenv("PG_USER") orelse "benchmarkdbuser",
+        .password = std.os.getenv("PG_PASS") orelse "benchmarkdbpass",
+        .hostname = std.os.getenv("PG_HOST") orelse "localhost", // ,
+        .port = try std.fmt.parseInt(u16, std.os.getenv("PG_PORT") orelse "5432", 0),
+        .database = std.os.getenv("PG_DB") orelse "hello_world",
+    };
+}

+ 42 - 0
frameworks/Zig/zap/zap.dockerfile

@@ -0,0 +1,42 @@
+#FROM ziglang/static-base:llvm15-aarch64-3 as build
+FROM buddyspencer/ziglang:0.11.0-r3 as build
+
+WORKDIR /zap
+
+COPY src src
+
+COPY build.zig.zon build.zig.zon
+COPY build.zig build.zig
+
+RUN apk update
+RUN apk add yaml-dev sqlite-dev
+RUN apk add bind-tools
+RUN apk add --no-cache bash
+RUN dig +short localhost | head -n 1
+RUN zig build -Doptimize=ReleaseFast --prefix-exe-dir /usr/bin
+RUN zig version
+RUN ls
+
+EXPOSE 3000
+
+CMD ["sh", "run.sh"]
+
+FROM alpine:3.19
+
+WORKDIR /zap
+
+ENV PG_USER=benchmarkdbuser
+ENV PG_PASS=benchmarkdbpass
+ENV PG_DB=hello_world
+ENV PG_HOST=tfb-database
+ENV PG_PORT=5432
+
+COPY run.sh run.sh
+
+RUN apk update
+
+COPY --from=build /usr/bin/zap /usr/bin/zap
+
+EXPOSE 3000
+
+CMD ["sh", "run.sh"]

+ 5 - 1
toolset/databases/postgres/config.sh

@@ -1 +1,5 @@
-cat /tmp/postgresql.conf >> $PGDATA/postgresql.conf
+#!/bin/bash
+
+set -e
+
+cat /tmp/postgresql.conf >> "${PGDATA}/postgresql.conf"

+ 0 - 5
toolset/databases/postgres/create-postgres-database.sql

@@ -1,5 +0,0 @@
-CREATE USER benchmarkdbuser WITH PASSWORD 'benchmarkdbpass';
-
-ALTER USER benchmarkdbuser WITH SUPERUSER;
-
-CREATE DATABASE hello_world WITH TEMPLATE = template0 ENCODING 'UTF8';

+ 0 - 100
toolset/databases/postgres/pg_hba.conf

@@ -1,100 +0,0 @@
-# PostgreSQL Client Authentication Configuration File
-# ===================================================
-#
-# Refer to the "Client Authentication" section in the PostgreSQL
-# documentation for a complete description of this file.  A short
-# synopsis follows.
-#
-# This file controls: which hosts are allowed to connect, how clients
-# are authenticated, which PostgreSQL user names they can use, which
-# databases they can access.  Records take one of these forms:
-#
-# local      DATABASE  USER  METHOD  [OPTIONS]
-# host       DATABASE  USER  ADDRESS  METHOD  [OPTIONS]
-# hostssl    DATABASE  USER  ADDRESS  METHOD  [OPTIONS]
-# hostnossl  DATABASE  USER  ADDRESS  METHOD  [OPTIONS]
-#
-# (The uppercase items must be replaced by actual values.)
-#
-# The first field is the connection type: "local" is a Unix-domain
-# socket, "host" is either a plain or SSL-encrypted TCP/IP socket,
-# "hostssl" is an SSL-encrypted TCP/IP socket, and "hostnossl" is a
-# plain TCP/IP socket.
-#
-# DATABASE can be "all", "sameuser", "samerole", "replication", a
-# database name, or a comma-separated list thereof. The "all"
-# keyword does not match "replication". Access to replication
-# must be enabled in a separate record (see example below).
-#
-# USER can be "all", a user name, a group name prefixed with "+", or a
-# comma-separated list thereof.  In both the DATABASE and USER fields
-# you can also write a file name prefixed with "@" to include names
-# from a separate file.
-#
-# ADDRESS specifies the set of hosts the record matches.  It can be a
-# host name, or it is made up of an IP address and a CIDR mask that is
-# an integer (between 0 and 32 (IPv4) or 128 (IPv6) inclusive) that
-# specifies the number of significant bits in the mask.  A host name
-# that starts with a dot (.) matches a suffix of the actual host name.
-# Alternatively, you can write an IP address and netmask in separate
-# columns to specify the set of hosts.  Instead of a CIDR-address, you
-# can write "samehost" to match any of the server's own IP addresses,
-# or "samenet" to match any address in any subnet that the server is
-# directly connected to.
-#
-# METHOD can be "trust", "reject", "md5", "password", "gss", "sspi",
-# "krb5", "ident", "peer", "pam", "ldap", "radius" or "cert".  Note that
-# "password" sends passwords in clear text; "md5" is preferred since
-# it sends encrypted passwords.
-#
-# OPTIONS are a set of options for the authentication in the format
-# NAME=VALUE.  The available options depend on the different
-# authentication methods -- refer to the "Client Authentication"
-# section in the documentation for a list of which options are
-# available for which authentication methods.
-#
-# Database and user names containing spaces, commas, quotes and other
-# special characters must be quoted.  Quoting one of the keywords
-# "all", "sameuser", "samerole" or "replication" makes the name lose
-# its special character, and just match a database or username with
-# that name.
-#
-# This file is read on server startup and when the postmaster receives
-# a SIGHUP signal.  If you edit the file on a running system, you have
-# to SIGHUP the postmaster for the changes to take effect.  You can
-# use "pg_ctl reload" to do that.
-
-# Put your actual configuration here
-# ----------------------------------
-#
-# If you want to allow non-local connections, you need to add more
-# "host" records.  In that case you will also need to make PostgreSQL
-# listen on a non-local interface via the listen_addresses
-# configuration parameter, or via the -i or -h command line switches.
-
-
-
-
-# DO NOT DISABLE!
-# If you change this first entry you will need to make sure that the
-# database superuser can access the database using some other method.
-# Noninteractive access to all databases is required during automatic
-# maintenance (custom daily cronjobs, replication, and similar tasks).
-#
-# Database administrative login by Unix domain socket
-local   all             postgres                                peer
-
-# TYPE  DATABASE        USER            ADDRESS                 METHOD
-
-# "local" is for Unix domain socket connections only
-local   all             all                                     peer
-# IPv4 local connections:
-host    all             all             127.0.0.1/32            md5
-# IPv6 local connections:
-host    all             all             ::1/128                 md5
-# Allow replication connections from localhost, by a user with the
-# replication privilege.
-#local   replication     postgres                                peer
-#host    replication     postgres        127.0.0.1/32            md5
-#host    replication     postgres        ::1/128                 md5
-host	all		all		0.0.0.0/0		md5

+ 8 - 10
toolset/databases/postgres/postgres.dockerfile

@@ -1,16 +1,14 @@
 FROM postgres:16-bookworm
 FROM postgres:16-bookworm
 
 
-ENV POSTGRES_USER=benchmarkdbuser
-ENV POSTGRES_PASSWORD=benchmarkdbpass
-ENV POSTGRES_DB=hello_world
+ENV PGDATA=/ssd/postgresql \
+    POSTGRES_DB=hello_world \
+    POSTGRES_HOST_AUTH_METHOD=md5 \
+    POSTGRES_INITDB_ARGS=--auth-host=md5 \
+    POSTGRES_PASSWORD=benchmarkdbpass \
+    POSTGRES_USER=benchmarkdbuser
 
 
-ENV POSTGRES_HOST_AUTH_METHOD=md5
-ENV POSTGRES_INITDB_ARGS=--auth-host=md5
-ENV PGDATA=/ssd/postgresql
+COPY postgresql.conf /tmp/
 
 
-COPY postgresql-min.conf /tmp/postgresql.conf
-
-COPY create-postgres.sql /docker-entrypoint-initdb.d/
-COPY config.sh /docker-entrypoint-initdb.d/
+COPY config.sh create-postgres.sql /docker-entrypoint-initdb.d/
 
 
 COPY 60-postgresql-shm.conf /etc/sysctl.d/60-postgresql-shm.conf
 COPY 60-postgresql-shm.conf /etc/sysctl.d/60-postgresql-shm.conf

+ 0 - 143
toolset/databases/postgres/postgresql-min.conf

@@ -1,143 +0,0 @@
-# See postgresql.conf.sample for a full conf file
-
-listen_addresses = '*'		# what IP address(es) to listen on;
-max_connections = 2000			# (change requires restart)
-
-ssl = false                             # (change requires restart)
-
-# - Memory -
-# values from: http://blog.pgaddict.com/posts/performance-since-postgresql-7-4-to-9-4-pgbench
-# details: http://www.postgresql.org/docs/9.4/static/runtime-config-resource.html
-# http://www.postgresql.org/docs/9.4/static/runtime-config-wal.html
-# http://www.postgresql.org/docs/9.4/static/runtime-config-query.html
-shared_buffers = 256MB                    # min 128kB
-work_mem = 64MB                                # min 64kB
-maintenance_work_mem = 512MB            # min 1MB
-# checkpoint_segments = 64
-checkpoint_completion_target = 0.9
-effective_cache_size = 8GB
-
-# when executed on the SSD (otherwise 4)
-random_page_cost = 2
-
-shared_preload_libraries = 'pg_stat_statements'		# (change requires restart)
-pg_stat_statements.track = all
-pg_stat_statements.max = 500000
-track_activity_query_size = 2048
-
-#------------------------------------------------------------------------------
-# WRITE AHEAD LOG
-#------------------------------------------------------------------------------
-
-# - Settings -
-
-wal_level = minimal			# minimal, archive, or hot_standby
-
-# WARNING: disabling synchronous commit may be dangerous in certain cases.
-# See http://www.postgresql.org/docs/current/static/runtime-config-wal.html
-# for details.
-synchronous_commit = off
-
-#------------------------------------------------------------------------------
-# REPLICATION
-#------------------------------------------------------------------------------
-
-# - Master Server -
-
-# These settings are ignored on a standby server
-
-max_wal_senders = 0		# max number of walsender processes
-				# (change requires restart)
-
-#------------------------------------------------------------------------------
-# ERROR REPORTING AND LOGGING
-#------------------------------------------------------------------------------
-
-log_line_prefix = '%t '			# special values:
-					#   %a = application name
-					#   %u = user name
-					#   %d = database name
-					#   %r = remote host and port
-					#   %h = remote host
-					#   %p = process ID
-					#   %t = timestamp without milliseconds
-					#   %m = timestamp with milliseconds
-					#   %i = command tag
-					#   %e = SQL state
-					#   %c = session ID
-					#   %l = session line number
-					#   %s = session start timestamp
-					#   %v = virtual transaction ID
-					#   %x = transaction ID (0 if none)
-					#   %q = stop here in non-session
-					#        processes
-					#   %% = '%'
-					# e.g. '<%u%%%d> '
-
-#------------------------------------------------------------------------------
-# CLIENT CONNECTION DEFAULTS
-#------------------------------------------------------------------------------
-
-# - Statement Behavior -
-
-#search_path = '"$user",public'		# schema names
-#default_tablespace = ''		# a tablespace name, '' uses the default
-#temp_tablespaces = ''			# a list of tablespace names, '' uses
-					# only default tablespace
-#check_function_bodies = on
-#default_transaction_isolation = 'read committed'
-#default_transaction_read_only = off
-#default_transaction_deferrable = off
-#session_replication_role = 'origin'
-#statement_timeout = 0			# in milliseconds, 0 is disabled
-#vacuum_freeze_min_age = 50000000
-#vacuum_freeze_table_age = 150000000
-#bytea_output = 'hex'			# hex, escape
-#xmlbinary = 'base64'
-#xmloption = 'content'
-
-# - Locale and Formatting -
-
-#datestyle = 'iso, mdy'
-#intervalstyle = 'postgres'
-#timezone = '(defaults to server environment setting)'
-#timezone_abbreviations = 'Default'     # Select the set of available time zone
-					# abbreviations.  Currently, there are
-					#   Default
-					#   Australia
-					#   India
-					# You can create your own file in
-					# share/timezonesets/.
-#extra_float_digits = 0			# min -15, max 3
-#client_encoding = sql_ascii		# actually, defaults to database
-					# encoding
-
-# These settings are initialized by initdb, but they can be changed.
-#lc_messages = 'en_US.UTF-8'			# locale for system error message
-					# strings
-#lc_monetary = 'en_US.UTF-8'			# locale for monetary formatting
-#lc_numeric = 'en_US.UTF-8'			# locale for number formatting
-#lc_time = 'en_US.UTF-8'				# locale for time formatting
-
-# default configuration for text search
-#default_text_search_config = 'pg_catalog.english'
-
-# - Other Defaults -
-
-#dynamic_library_path = '$libdir'
-#local_preload_libraries = ''
-
-
-#------------------------------------------------------------------------------
-# LOCK MANAGEMENT
-#------------------------------------------------------------------------------
-
-#deadlock_timeout = 1s
-#max_locks_per_transaction = 64		# min 10
-					# (change requires restart)
-# Note:  Each lock table slot uses ~270 bytes of shared memory, and there are
-# max_locks_per_transaction * (max_connections + max_prepared_transactions)
-# lock table slots.
-max_pred_locks_per_transaction = 256	# min 10
-					# (change requires restart)
-

+ 8 - 441
toolset/databases/postgres/postgresql.conf

@@ -1,108 +1,10 @@
-# -----------------------------
-# PostgreSQL configuration file
-# -----------------------------
-#
-# This file consists of lines of the form:
-#
-#   name = value
-#
-# (The "=" is optional.)  Whitespace may be used.  Comments are introduced with
-# "#" anywhere on a line.  The complete list of parameter names and allowed
-# values can be found in the PostgreSQL documentation.
-#
-# The commented-out settings shown in this file represent the default values.
-# Re-commenting a setting is NOT sufficient to revert it to the default value;
-# you need to reload the server.
-#
-# This file is read on server startup and when the server receives a SIGHUP
-# signal.  If you edit the file on a running system, you have to SIGHUP the
-# server for the changes to take effect, or use "pg_ctl reload".  Some
-# parameters, which are marked below, require a server shutdown and restart to
-# take effect.
-#
-# Any parameter can also be given as a command-line option to the server, e.g.,
-# "postgres -c log_connections=on".  Some parameters can be changed at run time
-# with the "SET" SQL command.
-#
-# Memory units:  kB = kilobytes        Time units:  ms  = milliseconds
-#                MB = megabytes                     s   = seconds
-#                GB = gigabytes                     min = minutes
-#                                                   h   = hours
-#                                                   d   = days
-
-
-#------------------------------------------------------------------------------
-# FILE LOCATIONS
-#------------------------------------------------------------------------------
-
-# The default values of these variables are driven from the -D command-line
-# option or PGDATA environment variable, represented here as ConfigDir.
-
-data_directory = '/ssd/postgresql'		# use data in another directory
-					# (change requires restart)
-hba_file = '/etc/postgresql/PG_VERSION/main/pg_hba.conf'	# host-based authentication file
-					# (change requires restart)
-ident_file = '/etc/postgresql/PG_VERSION/main/pg_ident.conf'	# ident configuration file
-					# (change requires restart)
-
-# If external_pid_file is not explicitly set, no extra PID file is written.
-external_pid_file = '/var/run/postgresql/PG_VERSION-main.pid'		# write an extra PID file
-					# (change requires restart)
-
-
-#------------------------------------------------------------------------------
-# CONNECTIONS AND AUTHENTICATION
-#------------------------------------------------------------------------------
-
-# - Connection Settings -
+# For a full conf file see:
+# https://github.com/TechEmpower/FrameworkBenchmarks/blob/d8f043d183d1ccbba41157bd57314ef61059edb8/toolset/databases/postgres/postgresql.conf.sample
 
 
 listen_addresses = '*'		# what IP address(es) to listen on;
 listen_addresses = '*'		# what IP address(es) to listen on;
-					# comma-separated list of addresses;
-					# defaults to 'localhost', '*' = all
-					# (change requires restart)
-port = 5432				# (change requires restart)
 max_connections = 2000			# (change requires restart)
 max_connections = 2000			# (change requires restart)
-# Note:  Increasing max_connections costs ~400 bytes of shared memory per
-# connection slot, plus lock space (see max_locks_per_transaction).
-#superuser_reserved_connections = 3	# (change requires restart)
-unix_socket_directories = '/var/run/postgresql'		# (change requires restart)
-#unix_socket_group = ''			# (change requires restart)
-#unix_socket_permissions = 0777		# begin with 0 to use octal notation
-					# (change requires restart)
-#bonjour = off				# advertise server via Bonjour
-					# (change requires restart)
-#bonjour_name = ''			# defaults to the computer name
-					# (change requires restart)
-
-# - Security and Authentication -
 
 
-#authentication_timeout = 1min		# 1s-600s
 ssl = false                             # (change requires restart)
 ssl = false                             # (change requires restart)
-#ssl_ciphers = 'ALL:!ADH:!LOW:!EXP:!MD5:@STRENGTH'	# allowed SSL ciphers
-					# (change requires restart)
-#ssl_renegotiation_limit = 512MB	# amount of data between renegotiations
-password_encryption = md5
-#db_user_namespace = off
-
-# Kerberos and GSSAPI
-#krb_server_keyfile = ''
-#krb_srvname = 'postgres'		# (Kerberos only)
-#krb_caseins_users = off
-
-# - TCP Keepalives -
-# see "man 7 tcp" for details
-
-#tcp_keepalives_idle = 0		# TCP_KEEPIDLE, in seconds;
-					# 0 selects the system default
-#tcp_keepalives_interval = 0		# TCP_KEEPINTVL, in seconds;
-					# 0 selects the system default
-#tcp_keepalives_count = 0		# TCP_KEEPCNT;
-					# 0 selects the system default
-
-
-#------------------------------------------------------------------------------
-# RESOURCE USAGE (except WAL)
-#------------------------------------------------------------------------------
 
 
 # - Memory -
 # - Memory -
 # values from: http://blog.pgaddict.com/posts/performance-since-postgresql-7-4-to-9-4-pgbench
 # values from: http://blog.pgaddict.com/posts/performance-since-postgresql-7-4-to-9-4-pgbench
@@ -119,47 +21,11 @@ effective_cache_size = 8GB
 # when executed on the SSD (otherwise 4)
 # when executed on the SSD (otherwise 4)
 random_page_cost = 2
 random_page_cost = 2
 
 
-#shared_buffers = 32MB			# min 128kB
-					# (change requires restart)
-#temp_buffers = 8MB			# min 800kB
-#max_prepared_transactions = 0		# zero disables the feature
-					# (change requires restart)
-# Note:  Increasing max_prepared_transactions costs ~600 bytes of shared memory
-# per transaction slot, plus lock space (see max_locks_per_transaction).
-# It is not advisable to set max_prepared_transactions nonzero unless you
-# actively intend to use prepared transactions.
-#work_mem = 1MB				# min 64kB
-#maintenance_work_mem = 16MB		# min 1MB
-#max_stack_depth = 2MB			# min 100kB
-
-# - Kernel Resource Usage -
-
-#max_files_per_process = 1000		# min 25
-					# (change requires restart)
 shared_preload_libraries = 'pg_stat_statements'		# (change requires restart)
 shared_preload_libraries = 'pg_stat_statements'		# (change requires restart)
 pg_stat_statements.track = all
 pg_stat_statements.track = all
 pg_stat_statements.max = 500000
 pg_stat_statements.max = 500000
 track_activity_query_size = 2048
 track_activity_query_size = 2048
 
 
-# - Cost-Based Vacuum Delay -
-
-#vacuum_cost_delay = 0ms		# 0-100 milliseconds
-#vacuum_cost_page_hit = 1		# 0-10000 credits
-#vacuum_cost_page_miss = 10		# 0-10000 credits
-#vacuum_cost_page_dirty = 20		# 0-10000 credits
-#vacuum_cost_limit = 200		# 1-10000 credits
-
-# - Background Writer -
-
-#bgwriter_delay = 200ms			# 10-10000ms between rounds
-#bgwriter_lru_maxpages = 100		# 0-1000 max buffers written/round
-#bgwriter_lru_multiplier = 2.0		# 0-10.0 multipler on buffers scanned/round
-
-# - Asynchronous Behavior -
-
-#effective_io_concurrency = 1		# 1-1000. 0 disables prefetching
-
-
 #------------------------------------------------------------------------------
 #------------------------------------------------------------------------------
 # WRITE AHEAD LOG
 # WRITE AHEAD LOG
 #------------------------------------------------------------------------------
 #------------------------------------------------------------------------------
@@ -167,45 +33,12 @@ track_activity_query_size = 2048
 # - Settings -
 # - Settings -
 
 
 wal_level = minimal			# minimal, archive, or hot_standby
 wal_level = minimal			# minimal, archive, or hot_standby
-					# (change requires restart)
-#fsync = on				# turns forced synchronization on or off
 
 
 # WARNING: disabling synchronous commit may be dangerous in certain cases.
 # WARNING: disabling synchronous commit may be dangerous in certain cases.
 # See http://www.postgresql.org/docs/current/static/runtime-config-wal.html
 # See http://www.postgresql.org/docs/current/static/runtime-config-wal.html
 # for details.
 # for details.
 synchronous_commit = off
 synchronous_commit = off
 
 
-#wal_sync_method = fsync		# the default is the first option
-					# supported by the operating system:
-					#   open_datasync
-					#   fdatasync (default on Linux)
-					#   fsync
-					#   fsync_writethrough
-					#   open_sync
-#full_page_writes = on			# recover from partial page writes
-#wal_buffers = -1			# min 32kB, -1 sets based on shared_buffers
-					# (change requires restart)
-#wal_writer_delay = 200ms		# 1-10000 milliseconds
-
-#commit_delay = 0			# range 0-100000, in microseconds
-#commit_siblings = 5			# range 1-1000
-
-# - Checkpoints -
-
-#checkpoint_segments = 3		# in logfile segments, min 1, 16MB each
-#checkpoint_timeout = 5min		# range 30s-1h
-#checkpoint_completion_target = 0.5	# checkpoint target duration, 0.0 - 1.0
-#checkpoint_warning = 30s		# 0 disables
-
-# - Archiving -
-
-#archive_mode = off		# allows archiving to be done
-				# (change requires restart)
-#archive_command = ''		# command to use to archive a logfile segment
-#archive_timeout = 0		# force a logfile segment switch after this
-				# number of seconds; 0 disables
-
-
 #------------------------------------------------------------------------------
 #------------------------------------------------------------------------------
 # REPLICATION
 # REPLICATION
 #------------------------------------------------------------------------------
 #------------------------------------------------------------------------------
@@ -216,185 +49,11 @@ synchronous_commit = off
 
 
 max_wal_senders = 0		# max number of walsender processes
 max_wal_senders = 0		# max number of walsender processes
 				# (change requires restart)
 				# (change requires restart)
-#wal_sender_delay = 1s		# walsender cycle time, 1-10000 milliseconds
-#wal_keep_segments = 0		# in logfile segments, 16MB each; 0 disables
-#vacuum_defer_cleanup_age = 0	# number of xacts by which cleanup is delayed
-#replication_timeout = 60s	# in milliseconds; 0 disables
-#synchronous_standby_names = ''	# standby servers that provide sync rep
-				# comma-separated list of application_name
-				# from standby(s); '*' = all
-
-# - Standby Servers -
-
-# These settings are ignored on a master server
-
-#hot_standby = off			# "on" allows queries during recovery
-					# (change requires restart)
-#max_standby_archive_delay = 30s	# max delay before canceling queries
-					# when reading WAL from archive;
-					# -1 allows indefinite delay
-#max_standby_streaming_delay = 30s	# max delay before canceling queries
-					# when reading streaming WAL;
-					# -1 allows indefinite delay
-#wal_receiver_status_interval = 10s	# send replies at least this often
-					# 0 disables
-#hot_standby_feedback = off		# send info from standby to prevent
-					# query conflicts
-
-
-#------------------------------------------------------------------------------
-# QUERY TUNING
-#------------------------------------------------------------------------------
-
-# - Planner Method Configuration -
-
-#enable_bitmapscan = on
-#enable_hashagg = on
-#enable_hashjoin = on
-#enable_indexscan = on
-#enable_material = on
-#enable_mergejoin = on
-#enable_nestloop = on
-#enable_seqscan = on
-#enable_sort = on
-#enable_tidscan = on
-
-# - Planner Cost Constants -
-
-#seq_page_cost = 1.0			# measured on an arbitrary scale
-#random_page_cost = 4.0			# same scale as above
-#cpu_tuple_cost = 0.01			# same scale as above
-#cpu_index_tuple_cost = 0.005		# same scale as above
-#cpu_operator_cost = 0.0025		# same scale as above
-#effective_cache_size = 128MB
-
-# - Genetic Query Optimizer -
-
-#geqo = on
-#geqo_threshold = 12
-#geqo_effort = 5			# range 1-10
-#geqo_pool_size = 0			# selects default based on effort
-#geqo_generations = 0			# selects default based on effort
-#geqo_selection_bias = 2.0		# range 1.5-2.0
-#geqo_seed = 0.0			# range 0.0-1.0
-
-# - Other Planner Options -
-
-#default_statistics_target = 100	# range 1-10000
-#constraint_exclusion = partition	# on, off, or partition
-#cursor_tuple_fraction = 0.1		# range 0.0-1.0
-#from_collapse_limit = 8
-#join_collapse_limit = 8		# 1 disables collapsing of explicit
-					# JOIN clauses
-
 
 
 #------------------------------------------------------------------------------
 #------------------------------------------------------------------------------
 # ERROR REPORTING AND LOGGING
 # ERROR REPORTING AND LOGGING
 #------------------------------------------------------------------------------
 #------------------------------------------------------------------------------
 
 
-# - Where to Log -
-
-#log_destination = 'stderr'		# Valid values are combinations of
-					# stderr, csvlog, syslog, and eventlog,
-					# depending on platform.  csvlog
-					# requires logging_collector to be on.
-
-# This is used when logging to stderr:
-#logging_collector = off		# Enable capturing of stderr and csvlog
-					# into log files. Required to be on for
-					# csvlogs.
-					# (change requires restart)
-
-# These are only used if logging_collector is on:
-#log_directory = 'pg_log'		# directory where log files are written,
-					# can be absolute or relative to PGDATA
-#log_filename = 'postgresql-%Y-%m-%d_%H%M%S.log'	# log file name pattern,
-					# can include strftime() escapes
-#log_file_mode = 0600			# creation mode for log files,
-					# begin with 0 to use octal notation
-#log_truncate_on_rotation = off		# If on, an existing log file with the
-					# same name as the new log file will be
-					# truncated rather than appended to.
-					# But such truncation only occurs on
-					# time-driven rotation, not on restarts
-					# or size-driven rotation.  Default is
-					# off, meaning append to existing files
-					# in all cases.
-#log_rotation_age = 1d			# Automatic rotation of logfiles will
-					# happen after that time.  0 disables.
-#log_rotation_size = 10MB		# Automatic rotation of logfiles will
-					# happen after that much log output.
-					# 0 disables.
-
-# These are relevant when logging to syslog:
-#syslog_facility = 'LOCAL0'
-#syslog_ident = 'postgres'
-
-#silent_mode = off			# Run server silently.
-					# DO NOT USE without syslog or
-					# logging_collector
-					# (change requires restart)
-
-
-# - When to Log -
-
-#client_min_messages = notice		# values in order of decreasing detail:
-					#   debug5
-					#   debug4
-					#   debug3
-					#   debug2
-					#   debug1
-					#   log
-					#   notice
-					#   warning
-					#   error
-
-#log_min_messages = warning		# values in order of decreasing detail:
-					#   debug5
-					#   debug4
-					#   debug3
-					#   debug2
-					#   debug1
-					#   info
-					#   notice
-					#   warning
-					#   error
-					#   log
-					#   fatal
-					#   panic
-
-#log_min_error_statement = error	# values in order of decreasing detail:
-				 	#   debug5
-					#   debug4
-					#   debug3
-					#   debug2
-					#   debug1
-				 	#   info
-					#   notice
-					#   warning
-					#   error
-					#   log
-					#   fatal
-					#   panic (effectively off)
-
-#log_min_duration_statement = -1	# -1 is disabled, 0 logs all statements
-					# and their durations, > 0 logs only
-					# statements running at least this number
-					# of milliseconds
-
-
-# - What to Log -
-
-#debug_print_parse = off
-#debug_print_rewritten = off
-#debug_print_plan = off
-#debug_pretty_print = on
-#log_checkpoints = off
-#log_connections = off
-#log_disconnections = off
-#log_duration = off
-#log_error_verbosity = default		# terse, default, or verbose messages
-#log_hostname = off
 log_line_prefix = '%t '			# special values:
 log_line_prefix = '%t '			# special values:
 					#   %a = application name
 					#   %a = application name
 					#   %u = user name
 					#   %u = user name
@@ -415,64 +74,6 @@ log_line_prefix = '%t '			# special values:
 					#        processes
 					#        processes
 					#   %% = '%'
 					#   %% = '%'
 					# e.g. '<%u%%%d> '
 					# e.g. '<%u%%%d> '
-#log_lock_waits = off			# log lock waits >= deadlock_timeout
-#log_statement = 'none'			# none, ddl, mod, all
-#log_temp_files = -1			# log temporary files equal or larger
-					# than the specified size in kilobytes;
-					# -1 disables, 0 logs all temp files
-#log_timezone = '(defaults to server environment setting)'
-
-
-#------------------------------------------------------------------------------
-# RUNTIME STATISTICS
-#------------------------------------------------------------------------------
-
-# - Query/Index Statistics Collector -
-
-#track_activities = on
-#track_counts = on
-#track_functions = none			# none, pl, all
-#track_activity_query_size = 1024 	# (change requires restart)
-#update_process_title = on
-#stats_temp_directory = 'pg_stat_tmp'
-
-
-# - Statistics Monitoring -
-
-#log_parser_stats = off
-#log_planner_stats = off
-#log_executor_stats = off
-#log_statement_stats = off
-
-
-#------------------------------------------------------------------------------
-# AUTOVACUUM PARAMETERS
-#------------------------------------------------------------------------------
-
-#autovacuum = on			# Enable autovacuum subprocess?  'on'
-					# requires track_counts to also be on.
-#log_autovacuum_min_duration = -1	# -1 disables, 0 logs all actions and
-					# their durations, > 0 logs only
-					# actions running at least this number
-					# of milliseconds.
-#autovacuum_max_workers = 3		# max number of autovacuum subprocesses
-					# (change requires restart)
-#autovacuum_naptime = 1min		# time between autovacuum runs
-#autovacuum_vacuum_threshold = 50	# min number of row updates before
-					# vacuum
-#autovacuum_analyze_threshold = 50	# min number of row updates before
-					# analyze
-#autovacuum_vacuum_scale_factor = 0.2	# fraction of table size before vacuum
-#autovacuum_analyze_scale_factor = 0.1	# fraction of table size before analyze
-#autovacuum_freeze_max_age = 200000000	# maximum XID age before forced vacuum
-					# (change requires restart)
-#autovacuum_vacuum_cost_delay = 20ms	# default vacuum cost delay for
-					# autovacuum, in milliseconds;
-					# -1 means use vacuum_cost_delay
-#autovacuum_vacuum_cost_limit = -1	# default vacuum cost limit for
-					# autovacuum, -1 means use
-					# vacuum_cost_limit
-
 
 
 #------------------------------------------------------------------------------
 #------------------------------------------------------------------------------
 # CLIENT CONNECTION DEFAULTS
 # CLIENT CONNECTION DEFAULTS
@@ -498,7 +99,7 @@ log_line_prefix = '%t '			# special values:
 
 
 # - Locale and Formatting -
 # - Locale and Formatting -
 
 
-datestyle = 'iso, mdy'
+#datestyle = 'iso, mdy'
 #intervalstyle = 'postgres'
 #intervalstyle = 'postgres'
 #timezone = '(defaults to server environment setting)'
 #timezone = '(defaults to server environment setting)'
 #timezone_abbreviations = 'Default'     # Select the set of available time zone
 #timezone_abbreviations = 'Default'     # Select the set of available time zone
@@ -513,14 +114,14 @@ datestyle = 'iso, mdy'
 					# encoding
 					# encoding
 
 
 # These settings are initialized by initdb, but they can be changed.
 # These settings are initialized by initdb, but they can be changed.
-lc_messages = 'en_US.UTF-8'			# locale for system error message
+#lc_messages = 'en_US.UTF-8'			# locale for system error message
 					# strings
 					# strings
-lc_monetary = 'en_US.UTF-8'			# locale for monetary formatting
-lc_numeric = 'en_US.UTF-8'			# locale for number formatting
-lc_time = 'en_US.UTF-8'				# locale for time formatting
+#lc_monetary = 'en_US.UTF-8'			# locale for monetary formatting
+#lc_numeric = 'en_US.UTF-8'			# locale for number formatting
+#lc_time = 'en_US.UTF-8'				# locale for time formatting
 
 
 # default configuration for text search
 # default configuration for text search
-default_text_search_config = 'pg_catalog.english'
+#default_text_search_config = 'pg_catalog.english'
 
 
 # - Other Defaults -
 # - Other Defaults -
 
 
@@ -541,37 +142,3 @@ default_text_search_config = 'pg_catalog.english'
 max_pred_locks_per_transaction = 256	# min 10
 max_pred_locks_per_transaction = 256	# min 10
 					# (change requires restart)
 					# (change requires restart)
 
 
-#------------------------------------------------------------------------------
-# VERSION/PLATFORM COMPATIBILITY
-#------------------------------------------------------------------------------
-
-# - Previous PostgreSQL Versions -
-
-#array_nulls = on
-#backslash_quote = safe_encoding	# on, off, or safe_encoding
-#default_with_oids = off
-#escape_string_warning = on
-#lo_compat_privileges = off
-#quote_all_identifiers = off
-#sql_inheritance = on
-#standard_conforming_strings = on
-#synchronize_seqscans = on
-
-# - Other Platforms and Clients -
-
-#transform_null_equals = off
-
-
-#------------------------------------------------------------------------------
-# ERROR HANDLING
-#------------------------------------------------------------------------------
-
-#exit_on_error = off				# terminate session on any error?
-#restart_after_crash = on			# reinitialize after backend crash?
-
-
-#------------------------------------------------------------------------------
-# CUSTOMIZED OPTIONS
-#------------------------------------------------------------------------------
-
-#custom_variable_classes = ''		# list of custom variable class names

+ 0 - 813
toolset/databases/postgres/postgresql.conf.sample

@@ -1,813 +0,0 @@
-# -----------------------------
-# PostgreSQL configuration file
-# -----------------------------
-#
-# This file consists of lines of the form:
-#
-#   name = value
-#
-# (The "=" is optional.)  Whitespace may be used.  Comments are introduced with
-# "#" anywhere on a line.  The complete list of parameter names and allowed
-# values can be found in the PostgreSQL documentation.
-#
-# The commented-out settings shown in this file represent the default values.
-# Re-commenting a setting is NOT sufficient to revert it to the default value;
-# you need to reload the server.
-#
-# This file is read on server startup and when the server receives a SIGHUP
-# signal.  If you edit the file on a running system, you have to SIGHUP the
-# server for the changes to take effect, run "pg_ctl reload", or execute
-# "SELECT pg_reload_conf()".  Some parameters, which are marked below,
-# require a server shutdown and restart to take effect.
-#
-# Any parameter can also be given as a command-line option to the server, e.g.,
-# "postgres -c log_connections=on".  Some parameters can be changed at run time
-# with the "SET" SQL command.
-#
-# Memory units:  B  = bytes            Time units:  us  = microseconds
-#                kB = kilobytes                     ms  = milliseconds
-#                MB = megabytes                     s   = seconds
-#                GB = gigabytes                     min = minutes
-#                TB = terabytes                     h   = hours
-#                                                   d   = days
-
-
-#------------------------------------------------------------------------------
-# FILE LOCATIONS
-#------------------------------------------------------------------------------
-
-# The default values of these variables are driven from the -D command-line
-# option or PGDATA environment variable, represented here as ConfigDir.
-
-#data_directory = 'ConfigDir'		# use data in another directory
-					# (change requires restart)
-#hba_file = 'ConfigDir/pg_hba.conf'	# host-based authentication file
-					# (change requires restart)
-#ident_file = 'ConfigDir/pg_ident.conf'	# ident configuration file
-					# (change requires restart)
-
-# If external_pid_file is not explicitly set, no extra PID file is written.
-#external_pid_file = ''			# write an extra PID file
-					# (change requires restart)
-
-
-#------------------------------------------------------------------------------
-# CONNECTIONS AND AUTHENTICATION
-#------------------------------------------------------------------------------
-
-# - Connection Settings -
-
-#listen_addresses = 'localhost'		# what IP address(es) to listen on;
-					# comma-separated list of addresses;
-					# defaults to 'localhost'; use '*' for all
-					# (change requires restart)
-#port = 5432				# (change requires restart)
-#max_connections = 100			# (change requires restart)
-#reserved_connections = 0		# (change requires restart)
-#superuser_reserved_connections = 3	# (change requires restart)
-#unix_socket_directories = '/tmp'	# comma-separated list of directories
-					# (change requires restart)
-#unix_socket_group = ''			# (change requires restart)
-#unix_socket_permissions = 0777		# begin with 0 to use octal notation
-					# (change requires restart)
-#bonjour = off				# advertise server via Bonjour
-					# (change requires restart)
-#bonjour_name = ''			# defaults to the computer name
-					# (change requires restart)
-
-# - TCP settings -
-# see "man tcp" for details
-
-#tcp_keepalives_idle = 0		# TCP_KEEPIDLE, in seconds;
-					# 0 selects the system default
-#tcp_keepalives_interval = 0		# TCP_KEEPINTVL, in seconds;
-					# 0 selects the system default
-#tcp_keepalives_count = 0		# TCP_KEEPCNT;
-					# 0 selects the system default
-#tcp_user_timeout = 0			# TCP_USER_TIMEOUT, in milliseconds;
-					# 0 selects the system default
-
-#client_connection_check_interval = 0	# time between checks for client
-					# disconnection while running queries;
-					# 0 for never
-
-# - Authentication -
-
-#authentication_timeout = 1min		# 1s-600s
-#password_encryption = scram-sha-256	# scram-sha-256 or md5
-#db_user_namespace = off
-
-# GSSAPI using Kerberos
-#krb_server_keyfile = 'FILE:${sysconfdir}/krb5.keytab'
-#krb_caseins_users = off
-
-# - SSL -
-
-#ssl = off
-#ssl_ca_file = ''
-#ssl_cert_file = 'server.crt'
-#ssl_crl_file = ''
-#ssl_crl_dir = ''
-#ssl_key_file = 'server.key'
-#ssl_ciphers = 'HIGH:MEDIUM:+3DES:!aNULL' # allowed SSL ciphers
-#ssl_prefer_server_ciphers = on
-#ssl_ecdh_curve = 'prime256v1'
-#ssl_min_protocol_version = 'TLSv1.2'
-#ssl_max_protocol_version = ''
-#ssl_dh_params_file = ''
-#ssl_passphrase_command = ''
-#ssl_passphrase_command_supports_reload = off
-
-
-#------------------------------------------------------------------------------
-# RESOURCE USAGE (except WAL)
-#------------------------------------------------------------------------------
-
-# - Memory -
-
-#shared_buffers = 128MB			# min 128kB
-					# (change requires restart)
-#huge_pages = try			# on, off, or try
-					# (change requires restart)
-#huge_page_size = 0			# zero for system default
-					# (change requires restart)
-#temp_buffers = 8MB			# min 800kB
-#max_prepared_transactions = 0		# zero disables the feature
-					# (change requires restart)
-# Caution: it is not advisable to set max_prepared_transactions nonzero unless
-# you actively intend to use prepared transactions.
-#work_mem = 4MB				# min 64kB
-#hash_mem_multiplier = 2.0		# 1-1000.0 multiplier on hash table work_mem
-#maintenance_work_mem = 64MB		# min 1MB
-#autovacuum_work_mem = -1		# min 1MB, or -1 to use maintenance_work_mem
-#logical_decoding_work_mem = 64MB	# min 64kB
-#max_stack_depth = 2MB			# min 100kB
-#shared_memory_type = mmap		# the default is the first option
-					# supported by the operating system:
-					#   mmap
-					#   sysv
-					#   windows
-					# (change requires restart)
-#dynamic_shared_memory_type = posix	# the default is usually the first option
-					# supported by the operating system:
-					#   posix
-					#   sysv
-					#   windows
-					#   mmap
-					# (change requires restart)
-#min_dynamic_shared_memory = 0MB	# (change requires restart)
-
-# - Disk -
-
-#temp_file_limit = -1			# limits per-process temp file space
-					# in kilobytes, or -1 for no limit
-
-# - Kernel Resources -
-
-#max_files_per_process = 1000		# min 64
-					# (change requires restart)
-
-# - Cost-Based Vacuum Delay -
-
-#vacuum_cost_delay = 0			# 0-100 milliseconds (0 disables)
-#vacuum_cost_page_hit = 1		# 0-10000 credits
-#vacuum_cost_page_miss = 2		# 0-10000 credits
-#vacuum_cost_page_dirty = 20		# 0-10000 credits
-#vacuum_cost_limit = 200		# 1-10000 credits
-
-# - Background Writer -
-
-#bgwriter_delay = 200ms			# 10-10000ms between rounds
-#bgwriter_lru_maxpages = 100		# max buffers written/round, 0 disables
-#bgwriter_lru_multiplier = 2.0		# 0-10.0 multiplier on buffers scanned/round
-#bgwriter_flush_after = 0		# measured in pages, 0 disables
-
-# - Asynchronous Behavior -
-
-#backend_flush_after = 0		# measured in pages, 0 disables
-#effective_io_concurrency = 1		# 1-1000; 0 disables prefetching
-#maintenance_io_concurrency = 10	# 1-1000; 0 disables prefetching
-#max_worker_processes = 8		# (change requires restart)
-#max_parallel_workers_per_gather = 2	# taken from max_parallel_workers
-#max_parallel_maintenance_workers = 2	# taken from max_parallel_workers
-#max_parallel_workers = 8		# maximum number of max_worker_processes that
-					# can be used in parallel operations
-#parallel_leader_participation = on
-#old_snapshot_threshold = -1		# 1min-60d; -1 disables; 0 is immediate
-					# (change requires restart)
-
-
-#------------------------------------------------------------------------------
-# WRITE-AHEAD LOG
-#------------------------------------------------------------------------------
-
-# - Settings -
-
-#wal_level = replica			# minimal, replica, or logical
-					# (change requires restart)
-#fsync = on				# flush data to disk for crash safety
-					# (turning this off can cause
-					# unrecoverable data corruption)
-#synchronous_commit = on		# synchronization level;
-					# off, local, remote_write, remote_apply, or on
-#wal_sync_method = fsync		# the default is the first option
-					# supported by the operating system:
-					#   open_datasync
-					#   fdatasync (default on Linux and FreeBSD)
-					#   fsync
-					#   fsync_writethrough
-					#   open_sync
-#full_page_writes = on			# recover from partial page writes
-#wal_log_hints = off			# also do full page writes of non-critical updates
-					# (change requires restart)
-#wal_compression = off			# enables compression of full-page writes;
-					# off, pglz, lz4, zstd, or on
-#wal_init_zero = on			# zero-fill new WAL files
-#wal_recycle = on			# recycle WAL files
-#wal_buffers = -1			# min 32kB, -1 sets based on shared_buffers
-					# (change requires restart)
-#wal_writer_delay = 200ms		# 1-10000 milliseconds
-#wal_writer_flush_after = 1MB		# measured in pages, 0 disables
-#wal_skip_threshold = 2MB
-
-#commit_delay = 0			# range 0-100000, in microseconds
-#commit_siblings = 5			# range 1-1000
-
-# - Checkpoints -
-
-#checkpoint_timeout = 5min		# range 30s-1d
-#checkpoint_completion_target = 0.9	# checkpoint target duration, 0.0 - 1.0
-#checkpoint_flush_after = 0		# measured in pages, 0 disables
-#checkpoint_warning = 30s		# 0 disables
-#max_wal_size = 1GB
-#min_wal_size = 80MB
-
-# - Prefetching during recovery -
-
-#recovery_prefetch = try		# prefetch pages referenced in the WAL?
-#wal_decode_buffer_size = 512kB		# lookahead window used for prefetching
-					# (change requires restart)
-
-# - Archiving -
-
-#archive_mode = off		# enables archiving; off, on, or always
-				# (change requires restart)
-#archive_library = ''		# library to use to archive a WAL file
-				# (empty string indicates archive_command should
-				# be used)
-#archive_command = ''		# command to use to archive a WAL file
-				# placeholders: %p = path of file to archive
-				#               %f = file name only
-				# e.g. 'test ! -f /mnt/server/archivedir/%f && cp %p /mnt/server/archivedir/%f'
-#archive_timeout = 0		# force a WAL file switch after this
-				# number of seconds; 0 disables
-
-# - Archive Recovery -
-
-# These are only used in recovery mode.
-
-#restore_command = ''		# command to use to restore an archived WAL file
-				# placeholders: %p = path of file to restore
-				#               %f = file name only
-				# e.g. 'cp /mnt/server/archivedir/%f %p'
-#archive_cleanup_command = ''	# command to execute at every restartpoint
-#recovery_end_command = ''	# command to execute at completion of recovery
-
-# - Recovery Target -
-
-# Set these only when performing a targeted recovery.
-
-#recovery_target = ''		# 'immediate' to end recovery as soon as a
-                                # consistent state is reached
-				# (change requires restart)
-#recovery_target_name = ''	# the named restore point to which recovery will proceed
-				# (change requires restart)
-#recovery_target_time = ''	# the time stamp up to which recovery will proceed
-				# (change requires restart)
-#recovery_target_xid = ''	# the transaction ID up to which recovery will proceed
-				# (change requires restart)
-#recovery_target_lsn = ''	# the WAL LSN up to which recovery will proceed
-				# (change requires restart)
-#recovery_target_inclusive = on # Specifies whether to stop:
-				# just after the specified recovery target (on)
-				# just before the recovery target (off)
-				# (change requires restart)
-#recovery_target_timeline = 'latest'	# 'current', 'latest', or timeline ID
-				# (change requires restart)
-#recovery_target_action = 'pause'	# 'pause', 'promote', 'shutdown'
-				# (change requires restart)
-
-
-#------------------------------------------------------------------------------
-# REPLICATION
-#------------------------------------------------------------------------------
-
-# - Sending Servers -
-
-# Set these on the primary and on any standby that will send replication data.
-
-#max_wal_senders = 10		# max number of walsender processes
-				# (change requires restart)
-#max_replication_slots = 10	# max number of replication slots
-				# (change requires restart)
-#wal_keep_size = 0		# in megabytes; 0 disables
-#max_slot_wal_keep_size = -1	# in megabytes; -1 disables
-#wal_sender_timeout = 60s	# in milliseconds; 0 disables
-#track_commit_timestamp = off	# collect timestamp of transaction commit
-				# (change requires restart)
-
-# - Primary Server -
-
-# These settings are ignored on a standby server.
-
-#synchronous_standby_names = ''	# standby servers that provide sync rep
-				# method to choose sync standbys, number of sync standbys,
-				# and comma-separated list of application_name
-				# from standby(s); '*' = all
-#vacuum_defer_cleanup_age = 0	# number of xacts by which cleanup is delayed
-
-# - Standby Servers -
-
-# These settings are ignored on a primary server.
-
-#primary_conninfo = ''			# connection string to sending server
-#primary_slot_name = ''			# replication slot on sending server
-#hot_standby = on			# "off" disallows queries during recovery
-					# (change requires restart)
-#max_standby_archive_delay = 30s	# max delay before canceling queries
-					# when reading WAL from archive;
-					# -1 allows indefinite delay
-#max_standby_streaming_delay = 30s	# max delay before canceling queries
-					# when reading streaming WAL;
-					# -1 allows indefinite delay
-#wal_receiver_create_temp_slot = off	# create temp slot if primary_slot_name
-					# is not set
-#wal_receiver_status_interval = 10s	# send replies at least this often
-					# 0 disables
-#hot_standby_feedback = off		# send info from standby to prevent
-					# query conflicts
-#wal_receiver_timeout = 60s		# time that receiver waits for
-					# communication from primary
-					# in milliseconds; 0 disables
-#wal_retrieve_retry_interval = 5s	# time to wait before retrying to
-					# retrieve WAL after a failed attempt
-#recovery_min_apply_delay = 0		# minimum delay for applying changes during recovery
-
-# - Subscribers -
-
-# These settings are ignored on a publisher.
-
-#max_logical_replication_workers = 4	# taken from max_worker_processes
-					# (change requires restart)
-#max_sync_workers_per_subscription = 2	# taken from max_logical_replication_workers
-#max_parallel_apply_workers_per_subscription = 2	# taken from max_logical_replication_workers
-
-
-#------------------------------------------------------------------------------
-# QUERY TUNING
-#------------------------------------------------------------------------------
-
-# - Planner Method Configuration -
-
-#enable_async_append = on
-#enable_bitmapscan = on
-#enable_gathermerge = on
-#enable_hashagg = on
-#enable_hashjoin = on
-#enable_incremental_sort = on
-#enable_indexscan = on
-#enable_indexonlyscan = on
-#enable_material = on
-#enable_memoize = on
-#enable_mergejoin = on
-#enable_nestloop = on
-#enable_parallel_append = on
-#enable_parallel_hash = on
-#enable_partition_pruning = on
-#enable_partitionwise_join = off
-#enable_partitionwise_aggregate = off
-#enable_presorted_aggregate = on
-#enable_seqscan = on
-#enable_sort = on
-#enable_tidscan = on
-
-# - Planner Cost Constants -
-
-#seq_page_cost = 1.0			# measured on an arbitrary scale
-#random_page_cost = 4.0			# same scale as above
-#cpu_tuple_cost = 0.01			# same scale as above
-#cpu_index_tuple_cost = 0.005		# same scale as above
-#cpu_operator_cost = 0.0025		# same scale as above
-#parallel_setup_cost = 1000.0	# same scale as above
-#parallel_tuple_cost = 0.1		# same scale as above
-#min_parallel_table_scan_size = 8MB
-#min_parallel_index_scan_size = 512kB
-#effective_cache_size = 4GB
-
-#jit_above_cost = 100000		# perform JIT compilation if available
-					# and query more expensive than this;
-					# -1 disables
-#jit_inline_above_cost = 500000		# inline small functions if query is
-					# more expensive than this; -1 disables
-#jit_optimize_above_cost = 500000	# use expensive JIT optimizations if
-					# query is more expensive than this;
-					# -1 disables
-
-# - Genetic Query Optimizer -
-
-#geqo = on
-#geqo_threshold = 12
-#geqo_effort = 5			# range 1-10
-#geqo_pool_size = 0			# selects default based on effort
-#geqo_generations = 0			# selects default based on effort
-#geqo_selection_bias = 2.0		# range 1.5-2.0
-#geqo_seed = 0.0			# range 0.0-1.0
-
-# - Other Planner Options -
-
-#default_statistics_target = 100	# range 1-10000
-#constraint_exclusion = partition	# on, off, or partition
-#cursor_tuple_fraction = 0.1		# range 0.0-1.0
-#from_collapse_limit = 8
-#jit = on				# allow JIT compilation
-#join_collapse_limit = 8		# 1 disables collapsing of explicit
-					# JOIN clauses
-#plan_cache_mode = auto			# auto, force_generic_plan or
-					# force_custom_plan
-#recursive_worktable_factor = 10.0	# range 0.001-1000000
-
-
-#------------------------------------------------------------------------------
-# REPORTING AND LOGGING
-#------------------------------------------------------------------------------
-
-# - Where to Log -
-
-#log_destination = 'stderr'		# Valid values are combinations of
-					# stderr, csvlog, jsonlog, syslog, and
-					# eventlog, depending on platform.
-					# csvlog and jsonlog require
-					# logging_collector to be on.
-
-# This is used when logging to stderr:
-#logging_collector = off		# Enable capturing of stderr, jsonlog,
-					# and csvlog into log files. Required
-					# to be on for csvlogs and jsonlogs.
-					# (change requires restart)
-
-# These are only used if logging_collector is on:
-#log_directory = 'log'			# directory where log files are written,
-					# can be absolute or relative to PGDATA
-#log_filename = 'postgresql-%Y-%m-%d_%H%M%S.log'	# log file name pattern,
-					# can include strftime() escapes
-#log_file_mode = 0600			# creation mode for log files,
-					# begin with 0 to use octal notation
-#log_rotation_age = 1d			# Automatic rotation of logfiles will
-					# happen after that time.  0 disables.
-#log_rotation_size = 10MB		# Automatic rotation of logfiles will
-					# happen after that much log output.
-					# 0 disables.
-#log_truncate_on_rotation = off		# If on, an existing log file with the
-					# same name as the new log file will be
-					# truncated rather than appended to.
-					# But such truncation only occurs on
-					# time-driven rotation, not on restarts
-					# or size-driven rotation.  Default is
-					# off, meaning append to existing files
-					# in all cases.
-
-# These are relevant when logging to syslog:
-#syslog_facility = 'LOCAL0'
-#syslog_ident = 'postgres'
-#syslog_sequence_numbers = on
-#syslog_split_messages = on
-
-# This is only relevant when logging to eventlog (Windows):
-# (change requires restart)
-#event_source = 'PostgreSQL'
-
-# - When to Log -
-
-#log_min_messages = warning		# values in order of decreasing detail:
-					#   debug5
-					#   debug4
-					#   debug3
-					#   debug2
-					#   debug1
-					#   info
-					#   notice
-					#   warning
-					#   error
-					#   log
-					#   fatal
-					#   panic
-
-#log_min_error_statement = error	# values in order of decreasing detail:
-					#   debug5
-					#   debug4
-					#   debug3
-					#   debug2
-					#   debug1
-					#   info
-					#   notice
-					#   warning
-					#   error
-					#   log
-					#   fatal
-					#   panic (effectively off)
-
-#log_min_duration_statement = -1	# -1 is disabled, 0 logs all statements
-					# and their durations, > 0 logs only
-					# statements running at least this number
-					# of milliseconds
-
-#log_min_duration_sample = -1		# -1 is disabled, 0 logs a sample of statements
-					# and their durations, > 0 logs only a sample of
-					# statements running at least this number
-					# of milliseconds;
-					# sample fraction is determined by log_statement_sample_rate
-
-#log_statement_sample_rate = 1.0	# fraction of logged statements exceeding
-					# log_min_duration_sample to be logged;
-					# 1.0 logs all such statements, 0.0 never logs
-
-
-#log_transaction_sample_rate = 0.0	# fraction of transactions whose statements
-					# are logged regardless of their duration; 1.0 logs all
-					# statements from all transactions, 0.0 never logs
-
-#log_startup_progress_interval = 10s	# Time between progress updates for
-					# long-running startup operations.
-					# 0 disables the feature, > 0 indicates
-					# the interval in milliseconds.
-
-# - What to Log -
-
-#debug_print_parse = off
-#debug_print_rewritten = off
-#debug_print_plan = off
-#debug_pretty_print = on
-#log_autovacuum_min_duration = 10min	# log autovacuum activity;
-					# -1 disables, 0 logs all actions and
-					# their durations, > 0 logs only
-					# actions running at least this number
-					# of milliseconds.
-#log_checkpoints = on
-#log_connections = off
-#log_disconnections = off
-#log_duration = off
-#log_error_verbosity = default		# terse, default, or verbose messages
-#log_hostname = off
-#log_line_prefix = '%m [%p] '		# special values:
-					#   %a = application name
-					#   %u = user name
-					#   %d = database name
-					#   %r = remote host and port
-					#   %h = remote host
-					#   %b = backend type
-					#   %p = process ID
-					#   %P = process ID of parallel group leader
-					#   %t = timestamp without milliseconds
-					#   %m = timestamp with milliseconds
-					#   %n = timestamp with milliseconds (as a Unix epoch)
-					#   %Q = query ID (0 if none or not computed)
-					#   %i = command tag
-					#   %e = SQL state
-					#   %c = session ID
-					#   %l = session line number
-					#   %s = session start timestamp
-					#   %v = virtual transaction ID
-					#   %x = transaction ID (0 if none)
-					#   %q = stop here in non-session
-					#        processes
-					#   %% = '%'
-					# e.g. '<%u%%%d> '
-#log_lock_waits = off			# log lock waits >= deadlock_timeout
-#log_recovery_conflict_waits = off	# log standby recovery conflict waits
-					# >= deadlock_timeout
-#log_parameter_max_length = -1		# when logging statements, limit logged
-					# bind-parameter values to N bytes;
-					# -1 means print in full, 0 disables
-#log_parameter_max_length_on_error = 0	# when logging an error, limit logged
-					# bind-parameter values to N bytes;
-					# -1 means print in full, 0 disables
-#log_statement = 'none'			# none, ddl, mod, all
-#log_replication_commands = off
-#log_temp_files = -1			# log temporary files equal or larger
-					# than the specified size in kilobytes;
-					# -1 disables, 0 logs all temp files
-#log_timezone = 'GMT'
-
-# - Process Title -
-
-#cluster_name = ''			# added to process titles if nonempty
-					# (change requires restart)
-#update_process_title = on
-
-
-#------------------------------------------------------------------------------
-# STATISTICS
-#------------------------------------------------------------------------------
-
-# - Cumulative Query and Index Statistics -
-
-#track_activities = on
-#track_activity_query_size = 1024	# (change requires restart)
-#track_counts = on
-#track_io_timing = off
-#track_wal_io_timing = off
-#track_functions = none			# none, pl, all
-#stats_fetch_consistency = cache
-
-
-# - Monitoring -
-
-#compute_query_id = auto
-#log_statement_stats = off
-#log_parser_stats = off
-#log_planner_stats = off
-#log_executor_stats = off
-
-
-#------------------------------------------------------------------------------
-# AUTOVACUUM
-#------------------------------------------------------------------------------
-
-#autovacuum = on			# Enable autovacuum subprocess?  'on'
-					# requires track_counts to also be on.
-#autovacuum_max_workers = 3		# max number of autovacuum subprocesses
-					# (change requires restart)
-#autovacuum_naptime = 1min		# time between autovacuum runs
-#autovacuum_vacuum_threshold = 50	# min number of row updates before
-					# vacuum
-#autovacuum_vacuum_insert_threshold = 1000	# min number of row inserts
-					# before vacuum; -1 disables insert
-					# vacuums
-#autovacuum_analyze_threshold = 50	# min number of row updates before
-					# analyze
-#autovacuum_vacuum_scale_factor = 0.2	# fraction of table size before vacuum
-#autovacuum_vacuum_insert_scale_factor = 0.2	# fraction of inserts over table
-					# size before insert vacuum
-#autovacuum_analyze_scale_factor = 0.1	# fraction of table size before analyze
-#autovacuum_freeze_max_age = 200000000	# maximum XID age before forced vacuum
-					# (change requires restart)
-#autovacuum_multixact_freeze_max_age = 400000000	# maximum multixact age
-					# before forced vacuum
-					# (change requires restart)
-#autovacuum_vacuum_cost_delay = 2ms	# default vacuum cost delay for
-					# autovacuum, in milliseconds;
-					# -1 means use vacuum_cost_delay
-#autovacuum_vacuum_cost_limit = -1	# default vacuum cost limit for
-					# autovacuum, -1 means use
-					# vacuum_cost_limit
-
-
-#------------------------------------------------------------------------------
-# CLIENT CONNECTION DEFAULTS
-#------------------------------------------------------------------------------
-
-# - Statement Behavior -
-
-#client_min_messages = notice		# values in order of decreasing detail:
-					#   debug5
-					#   debug4
-					#   debug3
-					#   debug2
-					#   debug1
-					#   log
-					#   notice
-					#   warning
-					#   error
-#search_path = '"$user", public'	# schema names
-#row_security = on
-#default_table_access_method = 'heap'
-#default_tablespace = ''		# a tablespace name, '' uses the default
-#default_toast_compression = 'pglz'	# 'pglz' or 'lz4'
-#temp_tablespaces = ''			# a list of tablespace names, '' uses
-					# only default tablespace
-#check_function_bodies = on
-#default_transaction_isolation = 'read committed'
-#default_transaction_read_only = off
-#default_transaction_deferrable = off
-#session_replication_role = 'origin'
-#statement_timeout = 0			# in milliseconds, 0 is disabled
-#lock_timeout = 0			# in milliseconds, 0 is disabled
-#idle_in_transaction_session_timeout = 0	# in milliseconds, 0 is disabled
-#idle_session_timeout = 0		# in milliseconds, 0 is disabled
-#vacuum_freeze_table_age = 150000000
-#vacuum_freeze_min_age = 50000000
-#vacuum_failsafe_age = 1600000000
-#vacuum_multixact_freeze_table_age = 150000000
-#vacuum_multixact_freeze_min_age = 5000000
-#vacuum_multixact_failsafe_age = 1600000000
-#bytea_output = 'hex'			# hex, escape
-#xmlbinary = 'base64'
-#xmloption = 'content'
-#gin_pending_list_limit = 4MB
-#createrole_self_grant = ''		# set and/or inherit
-
-# - Locale and Formatting -
-
-#datestyle = 'iso, mdy'
-#intervalstyle = 'postgres'
-#timezone = 'GMT'
-#timezone_abbreviations = 'Default'     # Select the set of available time zone
-					# abbreviations.  Currently, there are
-					#   Default
-					#   Australia (historical usage)
-					#   India
-					# You can create your own file in
-					# share/timezonesets/.
-#extra_float_digits = 1			# min -15, max 3; any value >0 actually
-					# selects precise output mode
-#client_encoding = sql_ascii		# actually, defaults to database
-					# encoding
-
-# These settings are initialized by initdb, but they can be changed.
-#lc_messages = 'C'			# locale for system error message
-					# strings
-#lc_monetary = 'C'			# locale for monetary formatting
-#lc_numeric = 'C'			# locale for number formatting
-#lc_time = 'C'				# locale for time formatting
-
-# default configuration for text search
-#default_text_search_config = 'pg_catalog.simple'
-
-# - Shared Library Preloading -
-
-#local_preload_libraries = ''
-#session_preload_libraries = ''
-#shared_preload_libraries = ''	# (change requires restart)
-#jit_provider = 'llvmjit'		# JIT library to use
-
-# - Other Defaults -
-
-#dynamic_library_path = '$libdir'
-#gin_fuzzy_search_limit = 0
-
-
-#------------------------------------------------------------------------------
-# LOCK MANAGEMENT
-#------------------------------------------------------------------------------
-
-#deadlock_timeout = 1s
-#max_locks_per_transaction = 64		# min 10
-					# (change requires restart)
-#max_pred_locks_per_transaction = 64	# min 10
-					# (change requires restart)
-#max_pred_locks_per_relation = -2	# negative values mean
-					# (max_pred_locks_per_transaction
-					#  / -max_pred_locks_per_relation) - 1
-#max_pred_locks_per_page = 2            # min 0
-
-
-#------------------------------------------------------------------------------
-# VERSION AND PLATFORM COMPATIBILITY
-#------------------------------------------------------------------------------
-
-# - Previous PostgreSQL Versions -
-
-#array_nulls = on
-#backslash_quote = safe_encoding	# on, off, or safe_encoding
-#escape_string_warning = on
-#lo_compat_privileges = off
-#quote_all_identifiers = off
-#standard_conforming_strings = on
-#synchronize_seqscans = on
-
-# - Other Platforms and Clients -
-
-#transform_null_equals = off
-
-
-#------------------------------------------------------------------------------
-# ERROR HANDLING
-#------------------------------------------------------------------------------
-
-#exit_on_error = off			# terminate session on any error?
-#restart_after_crash = on		# reinitialize after backend crash?
-#data_sync_retry = off			# retry or panic on failure to fsync
-					# data?
-					# (change requires restart)
-#recovery_init_sync_method = fsync	# fsync, syncfs (Linux 5.8+)
-
-
-#------------------------------------------------------------------------------
-# CONFIG FILE INCLUDES
-#------------------------------------------------------------------------------
-
-# These options allow settings to be loaded from files other than the
-# default postgresql.conf.  Note that these are directives, not variable
-# assignments, so they can usefully be given more than once.
-
-#include_dir = '...'			# include files ending in '.conf' from
-					# a directory, e.g., 'conf.d'
-#include_if_exists = '...'		# include file only if it exists
-#include = '...'			# include file
-
-
-#------------------------------------------------------------------------------
-# CUSTOMIZED OPTIONS
-#------------------------------------------------------------------------------
-
-# Add settings for extensions here

Some files were not shown because too many files changed in this diff