瀏覽代碼

Merge pull request #25 from TechEmpower/master

aa
三刀 2 年之前
父節點
當前提交
66beba8672
共有 100 個文件被更改,包括 3158 次插入839 次删除
  1. 27 25
      frameworks/C++/cutelyst/benchmark_config.json
  2. 13 13
      frameworks/C++/cutelyst/build.sh
  3. 1 1
      frameworks/C++/cutelyst/cutelyst-nginx-my.dockerfile
  4. 1 1
      frameworks/C++/cutelyst/cutelyst-nginx-pg.dockerfile
  5. 1 1
      frameworks/C++/cutelyst/cutelyst-nginx.dockerfile
  6. 1 1
      frameworks/C++/cutelyst/cutelyst-pf-apg-batch.dockerfile
  7. 1 1
      frameworks/C++/cutelyst/cutelyst-pf-apg.dockerfile
  8. 1 1
      frameworks/C++/cutelyst/cutelyst-pf-my.dockerfile
  9. 1 1
      frameworks/C++/cutelyst/cutelyst-pf-pg.dockerfile
  10. 1 1
      frameworks/C++/cutelyst/cutelyst-t-apg-cutelee.dockerfile
  11. 1 1
      frameworks/C++/cutelyst/cutelyst-thread-apg-batch.dockerfile
  12. 1 1
      frameworks/C++/cutelyst/cutelyst-thread-apg.dockerfile
  13. 1 1
      frameworks/C++/cutelyst/cutelyst-thread-my-cutelee.dockerfile
  14. 1 1
      frameworks/C++/cutelyst/cutelyst-thread-my.dockerfile
  15. 1 1
      frameworks/C++/cutelyst/cutelyst-thread-pg-cutelee.dockerfile
  16. 1 1
      frameworks/C++/cutelyst/cutelyst-thread-pg.dockerfile
  17. 1 1
      frameworks/C++/cutelyst/cutelyst-thread-pico.dockerfile
  18. 1 1
      frameworks/C++/cutelyst/cutelyst-thread.dockerfile
  19. 1 1
      frameworks/C++/cutelyst/cutelyst.dockerfile
  20. 16 14
      frameworks/C++/cutelyst/src/CMakeLists.txt
  21. 70 0
      frameworks/C++/cutelyst/src/cachedqueries.cpp
  22. 19 0
      frameworks/C++/cutelyst/src/cachedqueries.h
  23. 16 7
      frameworks/C++/cutelyst/src/cutelyst-benchmarks.cpp
  24. 39 38
      frameworks/C++/cutelyst/src/databaseupdatestest.cpp
  25. 4 4
      frameworks/C++/cutelyst/src/databaseupdatestest.h
  26. 6 4
      frameworks/C++/cutelyst/src/fortunetest.cpp
  27. 6 6
      frameworks/C++/cutelyst/src/fortunetest.h
  28. 7 5
      frameworks/C++/cutelyst/src/multipledatabasequeriestest.cpp
  29. 3 3
      frameworks/C++/cutelyst/src/multipledatabasequeriestest.h
  30. 0 37
      frameworks/C++/cutelyst/src/root.cpp
  31. 0 22
      frameworks/C++/cutelyst/src/root.h
  32. 39 6
      frameworks/C++/cutelyst/src/singledatabasequerytest.cpp
  33. 7 4
      frameworks/C++/cutelyst/src/singledatabasequerytest.h
  34. 6 6
      frameworks/C++/suil/benchmark/src/main.cpp
  35. 1 1
      frameworks/C++/suil/suil.dockerfile
  36. 6 6
      frameworks/C++/treefrog/config/application.ini
  37. 10 10
      frameworks/C++/treefrog/treefrog-epoll.dockerfile
  38. 9 9
      frameworks/C++/treefrog/treefrog-mongodb.dockerfile
  39. 9 9
      frameworks/C++/treefrog/treefrog-mysql.dockerfile
  40. 9 9
      frameworks/C++/treefrog/treefrog.dockerfile
  41. 23 11
      frameworks/C/h2o/CMakeLists.txt
  42. 11 12
      frameworks/C/h2o/README.md
  43. 2 2
      frameworks/C/h2o/h2o.dockerfile
  44. 4 3
      frameworks/C/h2o/h2o.sh
  45. 495 300
      frameworks/C/h2o/src/database.c
  46. 23 20
      frameworks/C/h2o/src/database.h
  47. 5 4
      frameworks/C/h2o/src/event_loop.c
  48. 2 2
      frameworks/C/h2o/src/event_loop.h
  49. 3 2
      frameworks/C/h2o/src/global_data.h
  50. 18 13
      frameworks/C/h2o/src/handlers/fortune.c
  51. 4 3
      frameworks/C/h2o/src/handlers/fortune.h
  52. 7 4
      frameworks/C/h2o/src/handlers/request_handler_data.h
  53. 110 89
      frameworks/C/h2o/src/handlers/world.c
  54. 12 6
      frameworks/C/h2o/src/handlers/world.h
  55. 39 13
      frameworks/C/h2o/src/main.c
  56. 18 14
      frameworks/C/h2o/src/request_handler.c
  57. 9 6
      frameworks/C/h2o/src/request_handler.h
  58. 3 8
      frameworks/C/h2o/src/thread.c
  59. 4 10
      frameworks/C/h2o/src/thread.h
  60. 1 1
      frameworks/C/h2o/src/utility.c
  61. 10 10
      frameworks/CSharp/fastendpoints/Benchmarks/Benchmarks.csproj
  62. 1 1
      frameworks/CSharp/fastendpoints/Benchmarks/Endpoints/JsonEndpoint.cs
  63. 1 1
      frameworks/CSharp/fastendpoints/Benchmarks/Endpoints/PlainTextEndpoint.cs
  64. 2 2
      frameworks/CSharp/fastendpoints/fastendpoints.dockerfile
  65. 3 1
      frameworks/Crystal/grip/grip.cr
  66. 2 1
      frameworks/Crystal/grip/shard.yml
  67. 18 0
      frameworks/D/archttp/README.md
  68. 12 0
      frameworks/D/archttp/archttp.dockerfile
  69. 24 0
      frameworks/D/archttp/benchmark_config.json
  70. 5 5
      frameworks/D/archttp/config.toml
  71. 5 0
      frameworks/D/archttp/dub.sdl
  72. 19 0
      frameworks/D/archttp/source/main.d
  73. 2 2
      frameworks/D/vibed/dub.json
  74. 9 8
      frameworks/D/vibed/dub.selections.json
  75. 10 3
      frameworks/D/vibed/source/mongodb.d
  76. 22 0
      frameworks/Dart/angel3/angel3-mysql.dockerfile
  77. 2 2
      frameworks/Dart/angel3/angel3.dockerfile
  78. 49 25
      frameworks/Dart/angel3/benchmark_config.json
  79. 1 0
      frameworks/Dart/angel3/orm-mysql/analysis_options.yaml
  80. 12 0
      frameworks/Dart/angel3/orm-mysql/config/default.yaml
  81. 2 0
      frameworks/Dart/angel3/orm-mysql/config/development.yaml
  82. 3 0
      frameworks/Dart/angel3/orm-mysql/config/production.yaml
  83. 2 0
      frameworks/Dart/angel3/orm-mysql/lib/models.dart
  84. 19 0
      frameworks/Dart/angel3/orm-mysql/lib/orm_mysql_app.dart
  85. 35 0
      frameworks/Dart/angel3/orm-mysql/lib/src/config/config.dart
  86. 75 0
      frameworks/Dart/angel3/orm-mysql/lib/src/config/plugins/orm.dart
  87. 10 0
      frameworks/Dart/angel3/orm-mysql/lib/src/config/plugins/plugins.dart
  88. 16 0
      frameworks/Dart/angel3/orm-mysql/lib/src/models/fortune.dart
  89. 212 0
      frameworks/Dart/angel3/orm-mysql/lib/src/models/fortune.g.dart
  90. 16 0
      frameworks/Dart/angel3/orm-mysql/lib/src/models/world.dart
  91. 216 0
      frameworks/Dart/angel3/orm-mysql/lib/src/models/world.g.dart
  92. 131 0
      frameworks/Dart/angel3/orm-mysql/lib/src/routes/controllers/controllers.dart
  93. 62 0
      frameworks/Dart/angel3/orm-mysql/lib/src/routes/routes.dart
  94. 13 0
      frameworks/Dart/angel3/orm-mysql/lib/src/services/services.dart
  95. 845 0
      frameworks/Dart/angel3/orm-mysql/pubspec.lock
  96. 40 0
      frameworks/Dart/angel3/orm-mysql/pubspec.yaml
  97. 28 0
      frameworks/Dart/angel3/orm-mysql/run/dev.dart
  98. 29 0
      frameworks/Dart/angel3/orm-mysql/run/prod.dart
  99. 20 0
      frameworks/Dart/angel3/orm-mysql/templates/fortunes.mustache
  100. 43 0
      frameworks/Dart/angel3/orm-mysql/test/all_test.dart

+ 27 - 25
frameworks/C++/cutelyst/benchmark_config.json

@@ -20,10 +20,11 @@
                 "versus": ""
             },
             "pf-apg": {
-                "db_url": "/dbp",
-                "query_url": "/queriesp?queries=",
+                "db_url": "/pg",
+                "query_url": "/querAPG?queries=",
                 "update_url": "/updatep?queries=",
-                "fortune_url": "/fortunes_raw_p",
+                "fortune_url": "/f_RW_APG",
+                "cached_query_url": "/cached_queries?count=",
                 "port": 8080,
                 "approach": "Realistic",
                 "classification": "Fullstack",
@@ -57,10 +58,10 @@
                 "versus": ""
             },
             "pf-pg": {
-                "db_url": "/db_postgres",
-                "query_url": "/query_postgres?queries=",
-                "update_url": "/updates_postgres?queries=",
-                "fortune_url": "/fortunes_raw_postgres",
+                "db_url": "/PG",
+                "query_url": "/queryPG?queries=",
+                "update_url": "/ups_QPG?queries=",
+                "fortune_url": "/f_RW_QPG",
                 "port": 8080,
                 "approach": "Realistic",
                 "classification": "Fullstack",
@@ -77,10 +78,10 @@
                 "versus": ""
             },
             "pf-my": {
-                "db_url": "/db_mysql",
-                "query_url": "/query_mysql?queries=",
-                "update_url": "/updates_mysql?queries=",
-                "fortune_url": "/fortunes_raw_mysql",
+                "db_url": "/MY",
+                "query_url": "/queryMY?queries=",
+                "update_url": "/ups_QMY?queries=",
+                "fortune_url": "/f_RW_QMY",
                 "port": 8080,
                 "approach": "Realistic",
                 "classification": "Fullstack",
@@ -132,10 +133,11 @@
                 "versus": ""
             },
             "thread-apg": {
-                "db_url": "/dbp",
-                "query_url": "/queriesp?queries=",
+                "db_url": "/pg",
+                "query_url": "/querAPG?queries=",
                 "update_url": "/updatep?queries=",
-                "fortune_url": "/fortunes_raw_p",
+                "fortune_url": "/f_RW_APG",
+                "cached_query_url": "/cached_queries?count=",
                 "port": 8080,
                 "approach": "Realistic",
                 "classification": "Fullstack",
@@ -169,10 +171,10 @@
                 "versus": ""
             },
             "thread-pg": {
-                "db_url": "/db_postgres",
-                "query_url": "/query_postgres?queries=",
-                "update_url": "/updates_postgres?queries=",
-                "fortune_url": "/fortunes_raw_postgres",
+                "db_url": "/PG",
+                "query_url": "/queryPG?queries=",
+                "update_url": "/ups_QPG?queries=",
+                "fortune_url": "/f_RW_QPG",
                 "port": 8080,
                 "approach": "Realistic",
                 "classification": "Fullstack",
@@ -189,7 +191,7 @@
                 "versus": ""
             },
             "thread-pg-cutelee": {
-                "fortune_url": "/fortunes_cutelee_postgres",
+                "fortune_url": "/f_CL_QPG",
                 "port": 8080,
                 "approach": "Realistic",
                 "classification": "Fullstack",
@@ -206,7 +208,7 @@
                 "versus": ""
             },
             "t-apg-cutelee": {
-                "fortune_url": "/fortunes_c_p",
+                "fortune_url": "/f_CL_APG",
                 "port": 8080,
                 "approach": "Realistic",
                 "classification": "Fullstack",
@@ -223,10 +225,10 @@
                 "versus": ""
             },
             "thread-my": {
-                "db_url": "/db_mysql",
-                "query_url": "/query_mysql?queries=",
-                "update_url": "/updates_mysql?queries=",
-                "fortune_url": "/fortunes_raw_mysql",
+                "db_url": "/MY",
+                "query_url": "/queryMY?queries=",
+                "update_url": "/ups_QMY?queries=",
+                "fortune_url": "/f_RW_QMY",
                 "port": 8080,
                 "approach": "Realistic",
                 "classification": "Fullstack",
@@ -243,7 +245,7 @@
                 "versus": ""
             },
             "thread-my-cutelee": {
-                "fortune_url": "/fortunes_cutelee_mysql",
+                "fortune_url": "/f_CL_QMY",
                 "port": 8080,
                 "approach": "Realistic",
                 "classification": "Fullstack",

+ 13 - 13
frameworks/C++/cutelyst/build.sh

@@ -1,28 +1,28 @@
 #!/bin/bash
 
-export ASQL_VER=0.46.0
-export CUTELEE_VER=6.0.0
-export CUTELYST_VER=3.1.0
+export ASQL_VER=0.74.0
+export CUTELEE_VER=6.1.0
+export CUTELYST_VER=3.7.0
 
 apt update -qq && \
     apt install -yqq --no-install-recommends \
     cmake \
     git \
     pkg-config \
-    qtbase5-dev \
-    libqt5sql5-mysql \
-    libqt5sql5-psql \
-    qtdeclarative5-dev \
+    qt6-base-dev \
+    libqt6sql6-mysql \
+    libqt6sql6-psql \
+    libegl1-mesa-dev \
     postgresql-server-dev-all
 
-wget -q https://github.com/cutelyst/cutelee/releases/download/v${CUTELEE_VER}/cutelee_${CUTELEE_VER}_amd64.deb && \
-    apt install -yqq ./cutelee_${CUTELEE_VER}_amd64.deb
+wget -q https://github.com/cutelyst/cutelee/releases/download/v${CUTELEE_VER}/cutelee6-qt6_${CUTELEE_VER}_amd64.deb && \
+    apt install -yqq ./cutelee6-qt6_${CUTELEE_VER}_amd64.deb
 
-wget -q https://github.com/cutelyst/asql/releases/download/v${ASQL_VER}/libasql_${ASQL_VER}_amd64.deb && \
-    apt install -yqq ./libasql_${ASQL_VER}_amd64.deb
+wget -q https://github.com/cutelyst/asql/releases/download/v${ASQL_VER}/libasql0-qt6_${ASQL_VER}_amd64.deb && \
+    apt install -yqq ./libasql0-qt6_${ASQL_VER}_amd64.deb
 
-wget -q https://github.com/cutelyst/cutelyst/releases/download/v${CUTELYST_VER}/cutelyst_${CUTELYST_VER}_amd64.deb && \
-    apt install -yqq ./cutelyst_${CUTELYST_VER}_amd64.deb
+wget -q https://github.com/cutelyst/cutelyst/releases/download/v${CUTELYST_VER}/cutelyst3-qt6_${CUTELYST_VER}_amd64.deb && \
+    apt install -yqq ./cutelyst3-qt6_${CUTELYST_VER}_amd64.deb
 
 cd ${TROOT} && \
     mkdir -p build && \

+ 1 - 1
frameworks/C++/cutelyst/cutelyst-nginx-my.dockerfile

@@ -1,4 +1,4 @@
-FROM ubuntu:20.04
+FROM ubuntu:22.04
 
 RUN apt-get update -qq && \
     apt-get install -yqq locales wget build-essential

+ 1 - 1
frameworks/C++/cutelyst/cutelyst-nginx-pg.dockerfile

@@ -1,4 +1,4 @@
-FROM ubuntu:20.04
+FROM ubuntu:22.04
 
 RUN apt-get update -qq && \
     apt-get install -yqq locales wget build-essential

+ 1 - 1
frameworks/C++/cutelyst/cutelyst-nginx.dockerfile

@@ -1,4 +1,4 @@
-FROM ubuntu:20.04
+FROM ubuntu:22.04
 
 RUN apt-get update -qq && \
     apt-get install -yqq locales wget build-essential

+ 1 - 1
frameworks/C++/cutelyst/cutelyst-pf-apg-batch.dockerfile

@@ -1,4 +1,4 @@
-FROM ubuntu:20.04
+FROM ubuntu:22.04
 
 RUN apt-get update -qq && \
     apt-get install -yqq locales wget build-essential

+ 1 - 1
frameworks/C++/cutelyst/cutelyst-pf-apg.dockerfile

@@ -1,4 +1,4 @@
-FROM ubuntu:20.04
+FROM ubuntu:22.04
 
 RUN apt-get update -qq && \
     apt-get install -yqq locales wget build-essential

+ 1 - 1
frameworks/C++/cutelyst/cutelyst-pf-my.dockerfile

@@ -1,4 +1,4 @@
-FROM ubuntu:20.04
+FROM ubuntu:22.04
 
 RUN apt-get update -qq && \
     apt-get install -yqq locales wget build-essential

+ 1 - 1
frameworks/C++/cutelyst/cutelyst-pf-pg.dockerfile

@@ -1,4 +1,4 @@
-FROM ubuntu:20.04
+FROM ubuntu:22.04
 
 RUN apt-get update -qq && \
     apt-get install -yqq locales wget build-essential

+ 1 - 1
frameworks/C++/cutelyst/cutelyst-t-apg-cutelee.dockerfile

@@ -1,4 +1,4 @@
-FROM ubuntu:20.04
+FROM ubuntu:22.04
 
 RUN apt-get update -qq && \
     apt-get install -yqq locales wget build-essential

+ 1 - 1
frameworks/C++/cutelyst/cutelyst-thread-apg-batch.dockerfile

@@ -1,4 +1,4 @@
-FROM ubuntu:20.04
+FROM ubuntu:22.04
 
 RUN apt-get update -qq && \
     apt-get install -yqq locales wget build-essential

+ 1 - 1
frameworks/C++/cutelyst/cutelyst-thread-apg.dockerfile

@@ -1,4 +1,4 @@
-FROM ubuntu:20.04
+FROM ubuntu:22.04
 
 RUN apt-get update -qq && \
     apt-get install -yqq locales wget build-essential

+ 1 - 1
frameworks/C++/cutelyst/cutelyst-thread-my-cutelee.dockerfile

@@ -1,4 +1,4 @@
-FROM ubuntu:20.04
+FROM ubuntu:22.04
 
 RUN apt-get update -qq && \
     apt-get install -yqq locales wget build-essential

+ 1 - 1
frameworks/C++/cutelyst/cutelyst-thread-my.dockerfile

@@ -1,4 +1,4 @@
-FROM ubuntu:20.04
+FROM ubuntu:22.04
 
 RUN apt-get update -qq && \
     apt-get install -yqq locales wget build-essential

+ 1 - 1
frameworks/C++/cutelyst/cutelyst-thread-pg-cutelee.dockerfile

@@ -1,4 +1,4 @@
-FROM ubuntu:20.04
+FROM ubuntu:22.04
 
 RUN apt-get update -qq && \
     apt-get install -yqq locales wget build-essential

+ 1 - 1
frameworks/C++/cutelyst/cutelyst-thread-pg.dockerfile

@@ -1,4 +1,4 @@
-FROM ubuntu:20.04
+FROM ubuntu:22.04
 
 RUN apt-get update -qq && \
     apt-get install -yqq locales wget build-essential

+ 1 - 1
frameworks/C++/cutelyst/cutelyst-thread-pico.dockerfile

@@ -1,4 +1,4 @@
-FROM ubuntu:20.04
+FROM ubuntu:22.04
 
 RUN apt-get update -qq && \
     apt-get install -yqq locales wget build-essential

+ 1 - 1
frameworks/C++/cutelyst/cutelyst-thread.dockerfile

@@ -1,4 +1,4 @@
-FROM ubuntu:20.04
+FROM ubuntu:22.04
 
 RUN apt-get update -qq && \
     apt-get install -yqq locales wget build-essential

+ 1 - 1
frameworks/C++/cutelyst/cutelyst.dockerfile

@@ -1,4 +1,4 @@
-FROM ubuntu:20.04
+FROM ubuntu:22.04
 
 RUN apt-get update -qq && \
     apt-get install -yqq locales wget build-essential

+ 16 - 14
frameworks/C++/cutelyst/src/CMakeLists.txt

@@ -13,10 +13,10 @@ FetchContent_Declare(
 )
 FetchContent_MakeAvailable(mimalloc)
 
-find_package(Qt5 5.6.0 REQUIRED COMPONENTS Core Network Sql)
-find_package(ASqlQt5 0.43.0 REQUIRED)
-find_package(Cutelyst3Qt5 3.1 REQUIRED)
-find_package(Cutelee6Qt5 6.0.0 REQUIRED)
+find_package(Qt6 6.2.0 REQUIRED COMPONENTS Core Network Sql)
+find_package(ASqlQt6 0.74 REQUIRED)
+find_package(Cutelyst3Qt6 3.6 REQUIRED)
+find_package(Cutelee6Qt6 6.1.0 REQUIRED)
 find_package(PostgreSQL REQUIRED)
 
 # Auto generate moc files
@@ -44,10 +44,10 @@ set(cutelyst_benchmarks_SRCS
     multipledatabasequeriestest.h
     plaintexttest.cpp
     plaintexttest.h
-    root.cpp
-    root.h
     singledatabasequerytest.cpp
     singledatabasequerytest.h
+    cachedqueries.h
+    cachedqueries.cpp
     ${TEMPLATES_SRC}
 )
 
@@ -60,10 +60,11 @@ target_link_libraries(cutelyst_benchmarks
     Cutelyst::Core
     Cutelyst::Utils::Sql
     Cutelyst::View::Cutelee
-    Qt5::Core
-    Qt5::Network
-    Qt5::Sql
-    ASqlQt5::Core
+    Qt::Core
+    Qt::Network
+    Qt::Sql
+    ASql::Core
+    ASql::Pg
 )
 
 add_executable(cutelyst-benchmarks ${cutelyst_benchmarks_SRCS} main.cpp)
@@ -73,10 +74,11 @@ target_link_libraries(cutelyst-benchmarks
     Cutelyst::Server
     Cutelyst::Utils::Sql
     Cutelyst::View::Cutelee
-    Qt5::Core
-    Qt5::Network
-    Qt5::Sql
-    ASqlQt5::Core
+    Qt::Core
+    Qt::Network
+    Qt::Sql
+    ASql::Core
+    ASql::Pg
     mimalloc
 )
 if (mimalloc_FOUND)

+ 70 - 0
frameworks/C++/cutelyst/src/cachedqueries.cpp

@@ -0,0 +1,70 @@
+#include "cachedqueries.h"
+
+#include <apool.h>
+#include <aresult.h>
+#include <apreparedquery.h>
+
+#include <QJsonDocument>
+#include <QJsonObject>
+#include <QJsonArray>
+
+#include <QCache>
+
+using namespace ASql;
+
+CachedQueries::CachedQueries(QObject *parent)
+    : Controller{parent}
+{
+
+}
+
+void CachedQueries::cached_queries(Context *c)
+{
+    int queries = c->request()->queryParam(QStringLiteral("count")).toInt();
+    if (queries < 1) {
+        queries = 1;
+    } else if (queries > 500) {
+        queries = 500;
+    }
+
+    static thread_local QCache<int, QJsonObject> cache(1024);
+
+    auto array = std::shared_ptr<QJsonArray>(new QJsonArray);
+
+    ASync async(c);
+    static thread_local auto db = APool::database();
+    for (int i = 0; i < queries; ++i) {
+        const int id = (rand() % 10000) + 1;
+
+        QJsonObject *obj = cache[id];
+        if (obj) {
+            array->append(*obj);
+            continue;
+        }
+
+        db.exec(APreparedQueryLiteral(u8"SELECT id, randomNumber FROM world WHERE id=$1"),
+                               {id}, c, [c, async, i, queries, array] (AResult &result) {
+            if (Q_LIKELY(!result.error() && result.size())) {
+                auto it = result.begin();
+                int id = it[0].toInt();
+                auto obj = new QJsonObject({
+                                               {QStringLiteral("id"), id},
+                                               {QStringLiteral("randomNumber"), it[1].toInt()}
+                                           });
+                array->append(*obj);
+                cache.insert(id, obj, 1);
+
+                if (array->size() == queries) {
+                    c->response()->setJsonArrayBody(*array);
+                }
+                return;
+            }
+
+            c->res()->setStatus(Response::InternalServerError);
+        });
+    }
+
+    if (array->size() == queries) {
+        c->response()->setJsonArrayBody(*array);
+    }
+}

+ 19 - 0
frameworks/C++/cutelyst/src/cachedqueries.h

@@ -0,0 +1,19 @@
+#ifndef CACHEDQUERIES_H
+#define CACHEDQUERIES_H
+
+#include <Cutelyst/Controller>
+
+using namespace Cutelyst;
+
+class CachedQueries : public Controller
+{
+    Q_OBJECT
+    C_NAMESPACE("")
+public:
+    explicit CachedQueries(QObject *parent = nullptr);
+
+    C_ATTR(cached_queries, :Local :AutoArgs)
+    void cached_queries(Context *c);
+};
+
+#endif // CACHEDQUERIES_H

+ 16 - 7
frameworks/C++/cutelyst/src/cutelyst-benchmarks.cpp

@@ -13,6 +13,7 @@
 #include <QDir>
 
 #include <apool.h>
+#include <apg.h>
 
 #include "jsontest.h"
 #include "singledatabasequerytest.h"
@@ -20,14 +21,17 @@
 #include "databaseupdatestest.h"
 #include "fortunetest.h"
 #include "plaintexttest.h"
+#include "cachedqueries.h"
 
 using namespace Cutelyst;
+using namespace ASql;
 
 static QMutex mutex;
 
 cutelyst_benchmarks::cutelyst_benchmarks(QObject *parent) : Application(parent)
 {
-    qsrand(QDateTime::currentMSecsSinceEpoch());
+    static std::once_flag once;
+    std::call_once(once, []() { srand(time(NULL)); });
 }
 
 cutelyst_benchmarks::~cutelyst_benchmarks()
@@ -56,6 +60,7 @@ bool cutelyst_benchmarks::init()
     new DatabaseUpdatesTest(this);
     new FortuneTest(this);
     new PlaintextTest(this);
+    new CachedQueries(this);
 
     if (defaultHeaders().server().isEmpty()) {
         defaultHeaders().setServer(QStringLiteral("Cutelyst"));
@@ -71,7 +76,7 @@ bool cutelyst_benchmarks::postFork()
 
     QSqlDatabase db;
     const auto driver = config(QStringLiteral("Driver")).toString();
-    if (driver == QLatin1String("QPSQL")) {
+    if (driver == u"QPSQL") {
         db = QSqlDatabase::addDatabase(driver, Sql::databaseNameThread(QStringLiteral("postgres")));
         db.setDatabaseName(QStringLiteral("hello_world"));
         db.setUserName(QStringLiteral("benchmarkdbuser"));
@@ -81,7 +86,7 @@ bool cutelyst_benchmarks::postFork()
             qDebug() << "Error opening PostgreSQL db:" << db << db.connectionName() << db.lastError().databaseText();
             return false;
         }
-    } else if (driver == QLatin1String("QMYSQL")) {
+    } else if (driver == u"QMYSQL") {
         db = QSqlDatabase::addDatabase(driver, Sql::databaseNameThread(QStringLiteral("mysql")));
         db.setDatabaseName(QStringLiteral("hello_world"));
         db.setUserName(QStringLiteral("benchmarkdbuser"));
@@ -91,13 +96,17 @@ bool cutelyst_benchmarks::postFork()
             qDebug() << "Error opening MySQL db:" << db << db.connectionName() << db.lastError().databaseText();
             return false;
         }
-    } else if (driver == QLatin1String("postgres")) {
+    } else if (driver == u"postgres") {
         QUrl uri(QStringLiteral("postgresql://benchmarkdbuser:benchmarkdbpass@server/hello_world"));
         uri.setHost(config(QStringLiteral("DatabaseHostName")).toString());
         qDebug() << "ASql URI:" << uri.toString();
 
-        APool::addDatabase(uri.toString());
-        APool::setDatabaseMaxIdleConnections(128);
+        APool::create(ASql::APg::factory(uri.toString()));
+        APool::setMaxIdleConnections(128);
+        APool::setSetupCallback([](ADatabase &db) {
+            // Enable Pipeline mode
+            db.enterPipelineMode(500);
+        });
     }
 
     qDebug() << "Connections" << QCoreApplication::applicationPid() << QThread::currentThread() << QSqlDatabase::connectionNames();
@@ -110,4 +119,4 @@ bool cutelyst_benchmarks::postFork()
     return true;
 }
 
-#include "moc_cutelyst-benchmarks.cpp"
+//#include "moc_cutelyst-benchmarks.cpp"

+ 39 - 38
frameworks/C++/cutelyst/src/databaseupdatestest.cpp

@@ -16,6 +16,8 @@
 
 #include "picojson.h"
 
+using namespace ASql;
+
 DatabaseUpdatesTest::DatabaseUpdatesTest(QObject *parent) : Controller(parent)
 {
 
@@ -34,29 +36,29 @@ void DatabaseUpdatesTest::updatep(Context *c)
     ASync async(c);
     static thread_local auto db = APool::database();
     for (int i = 0; i < queries; ++i) {
-        int id = (qrand() % 10000) + 1;
+        int id = (rand() % 10000) + 1;
 
-        int randomNumber = (qrand() % 10000) + 1;
+        int randomNumber = (rand() % 10000) + 1;
 
         array.emplace_back(picojson::object({
                             {"id", picojson::value(double(id))},
                             {"randomNumber", picojson::value(double(randomNumber))}
                         }));
 
-        db.exec(APreparedQueryLiteral(u"SELECT randomNumber, id FROM world WHERE id=$1"),
-                               {id}, [c, async] (AResult &result) {
+        db.exec(APreparedQueryLiteral(u8"SELECT randomNumber, id FROM world WHERE id=$1"),
+                               {id}, c, [c, async] (AResult &result) {
             if (Q_UNLIKELY(result.error() || !result.size())) {
                 c->res()->setStatus(Response::InternalServerError);
                 return;
             }
-        }, c);
-        db.exec(APreparedQueryLiteral(u"UPDATE world SET randomNumber=$1 WHERE id=$2"),
-                               {randomNumber, id}, [c, async] (AResult &result) {
+        });
+        db.exec(APreparedQueryLiteral(u8"UPDATE world SET randomNumber=$1 WHERE id=$2"),
+                               {randomNumber, id}, c, [c, async] (AResult &result) {
             if (Q_UNLIKELY(result.error())) {
                 c->res()->setStatus(Response::InternalServerError);
                 return;
             }
-        }, c);
+        });
     }
 
     c->response()->setJsonBody(QByteArray::fromStdString(picojson::value(array).serialize()));
@@ -78,9 +80,9 @@ void DatabaseUpdatesTest::updateb(Context *c)
     ASync async(c);
     static thread_local auto db = APool::database();
     for (int i = 0; i < queries; ++i) {
-        int id = (qrand() % 10000) + 1;
+        int id = (rand() % 10000) + 1;
 
-        int randomNumber = (qrand() % 10000) + 1;
+        int randomNumber = (rand() % 10000) + 1;
 
         argsIds.append(id);
         args.append(id);
@@ -91,23 +93,23 @@ void DatabaseUpdatesTest::updateb(Context *c)
                             {"randomNumber", picojson::value(double(randomNumber))}
                         }));
 
-        db.exec(APreparedQueryLiteral(u"SELECT randomNumber, id FROM world WHERE id=$1"),
-                               {id}, [c, async] (AResult &result) {
+        db.exec(APreparedQueryLiteral(u8"SELECT randomNumber, id FROM world WHERE id=$1"),
+                               {id}, c, [c, async] (AResult &result) {
             if (Q_UNLIKELY(result.error() || !result.size())) {
                 c->res()->setStatus(Response::InternalServerError);
                 return;
             }
-        }, c);
+        });
     }
     args.append(argsIds);
 
     const APreparedQuery pq = getSql(queries);
-    db.exec(pq, args, [c, async] (AResult &result) {
+    db.exec(pq, args, c, [c, async] (AResult &result) {
         if (Q_UNLIKELY(result.error())) {
             c->res()->setStatus(Response::InternalServerError);
             return;
         }
-    }, c);
+    });
 
     c->response()->setJsonBody(QByteArray::fromStdString(picojson::value(array).serialize()));
 }
@@ -149,7 +151,7 @@ void DatabaseUpdatesTest::processQuery(Context *c, QSqlQuery &query, QSqlQuery &
     ids.reserve(queries);
     randomNumbers.reserve(queries);
     for (int i = 0; i < queries; ++i) {
-        int id = (qrand() % 10000) + 1;
+        int id = (rand() % 10000) + 1;
 
         query.bindValue(QStringLiteral(":id"), id);
         if (Q_UNLIKELY(!query.exec() || !query.next())) {
@@ -157,7 +159,7 @@ void DatabaseUpdatesTest::processQuery(Context *c, QSqlQuery &query, QSqlQuery &
             return;
         }
 
-        int randomNumber = (qrand() % 10000) + 1;
+        int randomNumber = (rand() % 10000) + 1;
         ids.append(id);
         randomNumbers.append(randomNumber);
 
@@ -179,29 +181,28 @@ void DatabaseUpdatesTest::processQuery(Context *c, QSqlQuery &query, QSqlQuery &
 APreparedQuery DatabaseUpdatesTest::getSql(int count)
 {
     auto iter = m_sqlMap.find(count);
-    if (iter != m_sqlMap.end())
-    {
-        return iter.value();
-    }
-    QString sql = QStringLiteral("UPDATE WORLD SET randomnumber=CASE id ");
-    sql.reserve(80 + count * 25);
-    int placeholdersCounter = 1;
-    for (int i = 0; i < count; i++) {
-        sql.append(QStringLiteral("WHEN $%1 THEN $%2 ").arg(placeholdersCounter).arg(placeholdersCounter + 1));
-        placeholdersCounter += 2;
-    }
-    sql.append(QStringLiteral("ELSE randomnumber END WHERE id IN ("));
+    if (Q_UNLIKELY(iter == m_sqlMap.end())) {
+        QString sql = QStringLiteral("UPDATE WORLD SET randomnumber=CASE id ");
+        sql.reserve(80 + count * 25);
+        int placeholdersCounter = 1;
+        for (int i = 0; i < count; i++) {
+            sql.append(QStringLiteral("WHEN $%1 THEN $%2 ").arg(placeholdersCounter).arg(placeholdersCounter + 1));
+            placeholdersCounter += 2;
+        }
+        sql.append(QStringLiteral("ELSE randomnumber END WHERE id IN ("));
 
-    for (int i = 0; i < count; i++) {
-        sql.append(QLatin1Char('$') + QString::number(placeholdersCounter) + QLatin1Char(','));
-        ++placeholdersCounter;
-    }
+        for (int i = 0; i < count; i++) {
+            sql.append(QLatin1Char('$') + QString::number(placeholdersCounter) + QLatin1Char(','));
+            ++placeholdersCounter;
+        }
+
+        if (count) {
+            sql.remove(sql.size() - 1, 1);
+        }
+        sql.append(QLatin1Char(')'));
 
-    if (count) {
-        sql.remove(sql.size() - 1, 1);
+        iter = m_sqlMap.insert(count, APreparedQuery(sql));
     }
-    sql.append(QLatin1Char(')'));
-    m_sqlMap.insert(count, sql);
 
-    return sql;
+    return iter.value();
 }

+ 4 - 4
frameworks/C++/cutelyst/src/databaseupdatestest.h

@@ -20,17 +20,17 @@ public:
     C_ATTR(updateb, :Local :AutoArgs)
     void updateb(Context *c);
 
-    C_ATTR(updates_postgres, :Local :AutoArgs)
+    C_ATTR(updates_postgres, :Path('ups_QPG') :AutoArgs)
     void updates_postgres(Context *c);
 
-    C_ATTR(updates_mysql, :Local :AutoArgs)
+    C_ATTR(updates_mysql, :Path('ups_QMY') :AutoArgs)
     void updates_mysql(Context *c);
 
 private:
     inline void processQuery(Context *c, QSqlQuery &query, QSqlQuery &updateQuery);
-    inline APreparedQuery getSql(int count);
+    inline ASql::APreparedQuery getSql(int count);
 
-    QMap<int, APreparedQuery> m_sqlMap;
+    QMap<int, ASql::APreparedQuery> m_sqlMap;
 };
 
 #endif // DATABASEUPDATESTEST_H

+ 6 - 4
frameworks/C++/cutelyst/src/fortunetest.cpp

@@ -9,6 +9,8 @@
 
 #include <QSqlQuery>
 
+using namespace ASql;
+
 FortuneTest::FortuneTest(QObject *parent) : Controller(parent)
 {
 
@@ -18,7 +20,7 @@ void FortuneTest::fortunes_raw_p(Context *c)
 {
     ASync async(c);
     static thread_local auto db = APool::database();
-    db.exec(APreparedQueryLiteral(u"SELECT id, message FROM fortune"), [c, async, this] (AResult &result) {
+    db.exec(APreparedQueryLiteral(u8"SELECT id, message FROM fortune"), c, [c, async, this] (AResult &result) {
         if (Q_UNLIKELY(result.error() && !result.size())) {
             c->res()->setStatus(Response::InternalServerError);
             return;
@@ -38,7 +40,7 @@ void FortuneTest::fortunes_raw_p(Context *c)
         });
 
         renderRaw(c, fortunes);
-    }, c);
+    });
 }
 
 void FortuneTest::fortunes_raw_postgres(Context *c)
@@ -63,7 +65,7 @@ void FortuneTest::fortunes_c_p(Context *c)
 {
     ASync async(c);
     static thread_local auto db = APool::database();
-    db.exec(APreparedQueryLiteral(u"SELECT id, message FROM fortune"), [c, async] (AResult &result) {
+    db.exec(APreparedQueryLiteral(u8"SELECT id, message FROM fortune"), c, [c, async] (AResult &result) {
         if (Q_UNLIKELY(result.error() && !result.size())) {
             c->res()->setStatus(Response::InternalServerError);
             return;
@@ -91,7 +93,7 @@ void FortuneTest::fortunes_c_p(Context *c)
         static thread_local View *view = c->view();
         view->execute(c);
         c->response()->setContentType(QStringLiteral("text/html; charset=UTF-8"));
-    }, c);
+    });
 }
 
 void FortuneTest::fortunes_cutelee_postgres(Context *c)

+ 6 - 6
frameworks/C++/cutelyst/src/fortunetest.h

@@ -19,22 +19,22 @@ class FortuneTest : public Controller
 public:
     explicit FortuneTest(QObject *parent = 0);
 
-    C_ATTR(fortunes_raw_p, :Local :AutoArgs)
+    C_ATTR(fortunes_raw_p, :Path('f_RW_APG') :AutoArgs)
     void fortunes_raw_p(Context *c);
 
-    C_ATTR(fortunes_raw_postgres, :Local :AutoArgs)
+    C_ATTR(fortunes_raw_postgres, :Path('f_RW_QPG') :AutoArgs)
     void fortunes_raw_postgres(Context *c);
 
-    C_ATTR(fortunes_raw_mysql, :Local :AutoArgs)
+    C_ATTR(fortunes_raw_mysql, :Path('f_RW_QMY') :AutoArgs)
     void fortunes_raw_mysql(Context *c);
 
-    C_ATTR(fortunes_c_p, :Local :AutoArgs)
+    C_ATTR(fortunes_c_p, :Path('f_CL_APG') :AutoArgs)
     void fortunes_c_p(Context *c);
 
-    C_ATTR(fortunes_cutelee_postgres, :Local :AutoArgs)
+    C_ATTR(fortunes_cutelee_postgres, :Path('f_CL_QPG') :AutoArgs)
     void fortunes_cutelee_postgres(Context *c);
 
-    C_ATTR(fortunes_cutelee_mysql, :Local :AutoArgs)
+    C_ATTR(fortunes_cutelee_mysql, :Path('f_CL_QMY') :AutoArgs)
     void fortunes_cutelee_mysql(Context *c);
 
 private:

+ 7 - 5
frameworks/C++/cutelyst/src/multipledatabasequeriestest.cpp

@@ -12,6 +12,8 @@
 #include <QJsonObject>
 #include <QJsonArray>
 
+using namespace ASql;
+
 MultipleDatabaseQueriesTest::MultipleDatabaseQueriesTest(QObject *parent) : Controller(parent)
 {
 
@@ -30,10 +32,10 @@ void MultipleDatabaseQueriesTest::queriesp(Context *c)
     ASync async(c);
     static thread_local auto db = APool::database();
     for (int i = 0; i < queries; ++i) {
-        const int id = (qrand() % 10000) + 1;
+        const int id = (rand() % 10000) + 1;
 
-        db.exec(APreparedQueryLiteral(u"SELECT id, randomNumber FROM world WHERE id=$1"),
-                               {id}, [c, async, i, queries, array] (AResult &result) {
+        db.exec(APreparedQueryLiteral(u8"SELECT id, randomNumber FROM world WHERE id=$1"),
+                               {id}, c, [c, async, i, queries, array] (AResult &result) {
             if (Q_LIKELY(!result.error() && result.size())) {
                 auto it = result.begin();
                 array->append(QJsonObject{
@@ -48,7 +50,7 @@ void MultipleDatabaseQueriesTest::queriesp(Context *c)
             }
 
             c->res()->setStatus(Response::InternalServerError);
-        }, c);
+        });
     }
 }
 
@@ -80,7 +82,7 @@ void MultipleDatabaseQueriesTest::processQuery(Context *c, QSqlQuery &query)
     }
 
     for (int i = 0; i < queries; ++i) {
-        const int id = (qrand() % 10000) + 1;
+        const int id = (rand() % 10000) + 1;
 
         query.bindValue(QStringLiteral(":id"), id);
         if (Q_LIKELY(query.exec() && query.next())) {

+ 3 - 3
frameworks/C++/cutelyst/src/multipledatabasequeriestest.h

@@ -13,13 +13,13 @@ class MultipleDatabaseQueriesTest : public Controller
 public:
     explicit MultipleDatabaseQueriesTest(QObject *parent = 0);
 
-    C_ATTR(queriesp, :Local :AutoArgs)
+    C_ATTR(queriesp, :Path('querAPG') :AutoArgs)
     void queriesp(Context *c);
 
-    C_ATTR(query_postgres, :Local :AutoArgs)
+    C_ATTR(query_postgres, :Path('queryPG') :AutoArgs)
     void query_postgres(Context *c);
 
-    C_ATTR(query_mysql, :Local :AutoArgs)
+    C_ATTR(query_mysql, :Path('queryMY') :AutoArgs)
     void query_mysql(Context *c);
 
 private:

+ 0 - 37
frameworks/C++/cutelyst/src/root.cpp

@@ -1,37 +0,0 @@
-#include "root.h"
-
-#include <QElapsedTimer>
-
-using namespace Cutelyst;
-
-Root::Root(QObject *parent) : Controller(parent)
-{
-}
-
-Root::~Root()
-{
-}
-
-QElapsedTimer timerSetup(Context *c)
-{
-    QElapsedTimer timer;
-    timer.start();
-    return timer;
-}
-
-QString setupHeader(Context *c)
-{
-    return c->response()->headers().setDateWithDateTime(QDateTime::currentDateTimeUtc());
-}
-
-void Root::End(Context *c)
-{
-    static thread_local QString lastDate = setupHeader(c);
-    static thread_local QElapsedTimer timer = timerSetup(c);
-    if (timer.hasExpired(1000)) {
-        lastDate = setupHeader(c);
-        timer.restart();
-    } else {
-        c->response()->setHeader(QStringLiteral("date"), lastDate);
-    }
-}

+ 0 - 22
frameworks/C++/cutelyst/src/root.h

@@ -1,22 +0,0 @@
-#ifndef ROOT_H
-#define ROOT_H
-
-#include <Cutelyst/Controller>
-
-using namespace Cutelyst;
-
-class Root : public Controller
-{
-    Q_OBJECT
-    C_NAMESPACE("")
-public:
-    explicit Root(QObject *parent = 0);
-    ~Root();
-
-private:
-    C_ATTR(End, :AutoArgs)
-    void End(Context *c);
-};
-
-#endif //ROOT_H
-

+ 39 - 6
frameworks/C++/cutelyst/src/singledatabasequerytest.cpp

@@ -10,22 +10,55 @@
 
 #include <QJsonDocument>
 #include <QJsonObject>
+#include <QTimer>
 
 #include "picojson.h"
 
+using namespace ASql;
+
 SingleDatabaseQueryTest::SingleDatabaseQueryTest(QObject *parent) : Controller(parent)
 {
 
 }
 
-void SingleDatabaseQueryTest::dbp(Context *c)
+void SingleDatabaseQueryTest::db_asql_pg(Context *c)
 {
-    const int id = (qrand() % 10000) + 1;
+    const int id = (rand() % 10000) + 1;
 
     ASync async(c);
     static thread_local auto db = APool::database();
-    db.exec(APreparedQueryLiteral(u"SELECT id, randomNumber FROM world WHERE id=$1"),
-                           {id}, [c, async] (AResult &result) {
+
+    db.exec(APreparedQueryLiteral(u8"SELECT id, randomNumber FROM world WHERE id=$1"),
+                           {id}, c, [c, async] (AResult &result) {
+        if (Q_LIKELY(!result.error() && result.size())) {
+            auto it = result.begin();
+            c->response()->setJsonBody(QByteArray::fromStdString(
+                            picojson::value(picojson::object({
+                                                {"id", picojson::value(double(it[0].toInt()))},
+                                                {"randomNumber", picojson::value(double(it[1].toInt()))}
+                                            })).serialize()));
+            return;
+        }
+
+        c->res()->setStatus(Response::InternalServerError);
+    });
+}
+
+ADatabase getPipelineEnabledDatabase()
+{
+    auto db = APool::database();
+    db.enterPipelineMode(300);
+    return db;
+}
+
+void SingleDatabaseQueryTest::db_asql_pipeline_pg(Context *c)
+{
+    const int id = (rand() % 10000) + 1;
+
+    ASync async(c);
+    static thread_local auto db = getPipelineEnabledDatabase();
+    db.exec(APreparedQueryLiteral(u8"SELECT id, randomNumber FROM world WHERE id=$1"),
+                           {id}, c, [c, async] (AResult &result) {
         if (Q_LIKELY(!result.error() && result.size())) {
             auto it = result.begin();
             c->response()->setJsonBody(QByteArray::fromStdString(
@@ -37,7 +70,7 @@ void SingleDatabaseQueryTest::dbp(Context *c)
         }
 
         c->res()->setStatus(Response::InternalServerError);
-    }, c);
+    });
 }
 
 void SingleDatabaseQueryTest::db_postgres(Context *c)
@@ -58,7 +91,7 @@ void SingleDatabaseQueryTest::db_mysql(Context *c)
 
 void SingleDatabaseQueryTest::processQuery(Context *c, QSqlQuery &query)
 {
-    int id = (qrand() % 10000) + 1;
+    int id = (rand() % 10000) + 1;
 
     query.bindValue(QStringLiteral(":id"), id);
     if (Q_UNLIKELY(!query.exec() || !query.next())) {

+ 7 - 4
frameworks/C++/cutelyst/src/singledatabasequerytest.h

@@ -13,13 +13,16 @@ class SingleDatabaseQueryTest : public Controller
 public:
     explicit SingleDatabaseQueryTest(QObject *parent = 0);
 
-    C_ATTR(dbp, :Local :AutoArgs)
-    void dbp(Context *c);
+    C_ATTR(db_asql_pg, :Path('pg') :AutoArgs)
+    void db_asql_pg(Context *c);
 
-    C_ATTR(db_postgres, :Local :AutoArgs)
+    C_ATTR(db_asql_pipeline_pg, :Path('Pg') :AutoArgs)
+    void db_asql_pipeline_pg(Context *c);
+
+    C_ATTR(db_postgres, :Path('PG') :AutoArgs)
     void db_postgres(Context *c);
 
-    C_ATTR(db_mysql, :Local :AutoArgs)
+    C_ATTR(db_mysql, :Path('MY') :AutoArgs)
     void db_mysql(Context *c);
 
 private:

+ 6 - 6
frameworks/C++/suil/benchmark/src/main.cpp

@@ -58,19 +58,19 @@ int main(int argc, char *argv[])
 
     Endpoint<SystemAttrs, PgSqlMiddleware> ep{"/",
           opt(serverConfig, std::move(config)),
-          opt(numberOfWorkers, 0)   /* Will run with number of available cores */
+          opt(connectionTimeout, 60_sec)
     };
 
     ep.middleware<PgSqlMiddleware>().setup(
             suil::env("POSTGRES_CONN", DEFAULT_POSTGRES_CONN),
-            opt(ASYNC,   true),   // connections are async
-            opt(TIMEOUT, 5_sec),  // timeout on db transactions
-            opt(EXPIRES, 30_sec)  // connections are cached for 30 seconds
+            opt(ASYNC,   true),    // connections are async
+            opt(TIMEOUT, 10_sec),  // timeout on db transactions
+            opt(EXPIRES, 30_sec)   // connections are cached for 30 seconds
     );
 
 #if SUIL_BENCH_DEV == 1
     {
-        scoped(conn, ep.middleware<PgSqlMiddleware>().conn());
+        scoped(conn, ep.middleware<PgSqlMiddleware>().conn(false));
         seedDatabase(conn);
     }
 #endif
@@ -152,4 +152,4 @@ int main(int argc, char *argv[])
     });
 
     return ep.start();
-}
+}

+ 1 - 1
frameworks/C++/suil/suil.dockerfile

@@ -2,7 +2,7 @@ FROM suilteam/base:alpine
 
 COPY ./ suil-bench
 
-ENV SUIL_VERSION=0.1.0
+ENV SUIL_VERSION=0.1.1
 ENV SUIL_VERSION_TAG=alpha
 ENV SUIL_CONFIGURATION=Release
 

+ 6 - 6
frameworks/C++/treefrog/config/application.ini

@@ -138,7 +138,7 @@ MPM.thread.MaxAppServers=
 # Maximum number of action threads allowed to start simultaneously
 # per server process. Set max_connections parameter of the DBMS
 # to (MaxAppServers * MaxThreadsPerAppServer) or more.
-MPM.thread.MaxThreadsPerAppServer=100
+MPM.thread.MaxThreadsPerAppServer=128
 
 ##
 ## MPM epoll section
@@ -241,14 +241,14 @@ ActionMailer.smtp.DelayedDelivery=false
 # Comment out the following line.
 Cache.SettingsFile=cache.ini
 
-# Specify the cache backend, such as 'sqlite', 'mongodb'
-# or 'redis'.
-Cache.Backend=sqlite
+# Specify the cache backend, such as 'sqlite', 'mongodb', 'redis' or
+# 'memory'.
+Cache.Backend=memory
 
 # Probability of starting garbage collection (GC) for cache.
 # If 100 is specified, GC will be started at a rate of once per 100
 # sets. If 0 is specified, the GC never starts.
-Cache.GcProbability=10000000
+Cache.GcProbability=100000000
 
 # If true, enable LZ4 compression when storing data.
-Cache.EnableCompression=no
+Cache.EnableCompression=false

+ 10 - 10
frameworks/C++/treefrog/treefrog-epoll.dockerfile

@@ -1,21 +1,21 @@
-FROM buildpack-deps:focal
+FROM buildpack-deps:jammy
 
 ENV DEBIAN_FRONTEND noninteractive
 ENV DEBCONF_NOWARNINGS yes
-ENV TFVER=2.2.0
+ENV TFVER=2.5.0
 
-RUN apt-get update -yqq && apt-get upgrade -yq && apt-get install -yqq --no-install-recommends \
-    software-properties-common unzip wget make cmake gcc clang libjemalloc-dev qt5-qmake qt5-default qtbase5-dev \
-    qtbase5-dev-tools libqt5sql5 libqt5sql5-mysql libqt5sql5-psql libqt5qml5 libqt5xml5 \
-    qtdeclarative5-dev libqt5quick5 libqt5quickparticles5 libqt5gui5 libqt5printsupport5 \
-    libqt5widgets5 libqt5opengl5-dev libqt5quicktest5 libqt5sql5-sqlite libsqlite3-dev libmongoc-dev libbson-dev \
-    redis-server
+RUN apt-get update -yqq && apt-get upgrade -yq && \
+    apt-get install -yqq --no-install-recommends software-properties-common unzip wget libjemalloc-dev \
+    qmake6 qt6-base-dev qt6-base-dev-tools qt6-tools-dev-tools qt6-declarative-dev libqt6sql6-mysql \
+    libqt6sql6-psql libqt6sql6-odbc libqt6sql6-sqlite libqt6core6 libqt6qml6 libqt6xml6 libpq5 libodbc1 \
+    libmongoc-dev libbson-dev gcc g++ clang make cmake pkg-config redis-server
+RUN rm -f /usr/bin/qmake; ln -sf /usr/bin/qmake6 /usr/bin/qmake
 
 WORKDIR /usr/src
 RUN wget -q https://github.com/treefrogframework/treefrog-framework/archive/v${TFVER}.tar.gz
 RUN tar xf v${TFVER}.tar.gz
 RUN cd treefrog-framework-${TFVER} && \
-    ./configure --spec=linux-clang && \
+    ./configure --enable-shared-mongoc --spec=linux-clang && \
     cd src && \
     make -j4 && \
     make install && \
@@ -37,4 +37,4 @@ RUN sed -i 's|MultiProcessingModule=.*|MultiProcessingModule=epoll|g' config/app
 EXPOSE 8080
 
 # 3. Start TreeFrog
-CMD treefrog /workspace
+CMD treefrog /workspace

+ 9 - 9
frameworks/C++/treefrog/treefrog-mongodb.dockerfile

@@ -1,21 +1,21 @@
-FROM buildpack-deps:focal
+FROM buildpack-deps:jammy
 
 ENV DEBIAN_FRONTEND noninteractive
 ENV DEBCONF_NOWARNINGS yes
-ENV TFVER=2.2.0
+ENV TFVER=2.5.0
 
-RUN apt-get update -yqq && apt-get upgrade -yq && apt-get install -yqq --no-install-recommends \
-    software-properties-common unzip wget make cmake gcc clang libjemalloc-dev qt5-qmake qt5-default qtbase5-dev \
-    qtbase5-dev-tools libqt5sql5 libqt5sql5-mysql libqt5sql5-psql libqt5qml5 libqt5xml5 \
-    qtdeclarative5-dev libqt5quick5 libqt5quickparticles5 libqt5gui5 libqt5printsupport5 \
-    libqt5widgets5 libqt5opengl5-dev libqt5quicktest5 libqt5sql5-sqlite libsqlite3-dev libmongoc-dev libbson-dev \
-    redis-server
+RUN apt-get update -yqq && apt-get upgrade -yq && \
+    apt-get install -yqq --no-install-recommends software-properties-common unzip wget libjemalloc-dev \
+    qmake6 qt6-base-dev qt6-base-dev-tools qt6-tools-dev-tools qt6-declarative-dev libqt6sql6-mysql \
+    libqt6sql6-psql libqt6sql6-odbc libqt6sql6-sqlite libqt6core6 libqt6qml6 libqt6xml6 libpq5 libodbc1 \
+    libmongoc-dev libbson-dev gcc g++ clang make cmake pkg-config redis-server
+RUN rm -f /usr/bin/qmake; ln -sf /usr/bin/qmake6 /usr/bin/qmake
 
 WORKDIR /usr/src
 RUN wget -q https://github.com/treefrogframework/treefrog-framework/archive/v${TFVER}.tar.gz
 RUN tar xf v${TFVER}.tar.gz
 RUN cd treefrog-framework-${TFVER} && \
-    ./configure --spec=linux-clang && \
+    ./configure --enable-shared-mongoc --spec=linux-clang && \
     cd src && \
     make -j4 && \
     make install && \

+ 9 - 9
frameworks/C++/treefrog/treefrog-mysql.dockerfile

@@ -1,21 +1,21 @@
-FROM buildpack-deps:focal
+FROM buildpack-deps:jammy
 
 ENV DEBIAN_FRONTEND noninteractive
 ENV DEBCONF_NOWARNINGS yes
-ENV TFVER=2.2.0
+ENV TFVER=2.5.0
 
-RUN apt-get update -yqq && apt-get upgrade -yq && apt-get install -yqq --no-install-recommends \
-    software-properties-common unzip wget make cmake gcc clang libjemalloc-dev qt5-qmake qt5-default qtbase5-dev \
-    qtbase5-dev-tools libqt5sql5 libqt5sql5-mysql libqt5sql5-psql libqt5qml5 libqt5xml5 \
-    qtdeclarative5-dev libqt5quick5 libqt5quickparticles5 libqt5gui5 libqt5printsupport5 \
-    libqt5widgets5 libqt5opengl5-dev libqt5quicktest5 libqt5sql5-sqlite libsqlite3-dev libmongoc-dev libbson-dev \
-    redis-server
+RUN apt-get update -yqq && apt-get upgrade -yq && \
+    apt-get install -yqq --no-install-recommends software-properties-common unzip wget libjemalloc-dev \
+    qmake6 qt6-base-dev qt6-base-dev-tools qt6-tools-dev-tools qt6-declarative-dev libqt6sql6-mysql \
+    libqt6sql6-psql libqt6sql6-odbc libqt6sql6-sqlite libqt6core6 libqt6qml6 libqt6xml6 libpq5 libodbc1 \
+    libmongoc-dev libbson-dev gcc g++ clang make cmake pkg-config redis-server
+RUN rm -f /usr/bin/qmake; ln -sf /usr/bin/qmake6 /usr/bin/qmake
 
 WORKDIR /usr/src
 RUN wget -q https://github.com/treefrogframework/treefrog-framework/archive/v${TFVER}.tar.gz
 RUN tar xf v${TFVER}.tar.gz
 RUN cd treefrog-framework-${TFVER} && \
-    ./configure --spec=linux-clang && \
+    ./configure --enable-shared-mongoc --spec=linux-clang && \
     cd src && \
     make -j4 && \
     make install && \

+ 9 - 9
frameworks/C++/treefrog/treefrog.dockerfile

@@ -1,21 +1,21 @@
-FROM buildpack-deps:focal
+FROM buildpack-deps:jammy
 
 ENV DEBIAN_FRONTEND noninteractive
 ENV DEBCONF_NOWARNINGS yes
-ENV TFVER=2.2.0
+ENV TFVER=2.5.0
 
-RUN apt-get update -yqq && apt-get upgrade -yq && apt-get install -yqq --no-install-recommends \
-    software-properties-common unzip wget make cmake gcc clang libjemalloc-dev qt5-qmake qt5-default qtbase5-dev \
-    qtbase5-dev-tools libqt5sql5 libqt5sql5-mysql libqt5sql5-psql libqt5qml5 libqt5xml5 \
-    qtdeclarative5-dev libqt5quick5 libqt5quickparticles5 libqt5gui5 libqt5printsupport5 \
-    libqt5widgets5 libqt5opengl5-dev libqt5quicktest5 libqt5sql5-sqlite libsqlite3-dev libmongoc-dev libbson-dev \
-    redis-server
+RUN apt-get update -yqq && apt-get upgrade -yq && \
+    apt-get install -yqq --no-install-recommends software-properties-common unzip wget libjemalloc-dev \
+    qmake6 qt6-base-dev qt6-base-dev-tools qt6-tools-dev-tools qt6-declarative-dev libqt6sql6-mysql \
+    libqt6sql6-psql libqt6sql6-odbc libqt6sql6-sqlite libqt6core6 libqt6qml6 libqt6xml6 libpq5 libodbc1 \
+    libmongoc-dev libbson-dev gcc g++ clang make cmake pkg-config redis-server
+RUN rm -f /usr/bin/qmake; ln -sf /usr/bin/qmake6 /usr/bin/qmake
 
 WORKDIR /usr/src
 RUN wget -q https://github.com/treefrogframework/treefrog-framework/archive/v${TFVER}.tar.gz
 RUN tar xf v${TFVER}.tar.gz
 RUN cd treefrog-framework-${TFVER} && \
-    ./configure --spec=linux-clang && \
+    ./configure --enable-shared-mongoc --spec=linux-clang && \
     cd src && \
     make -j4 && \
     make install && \

+ 23 - 11
frameworks/C/h2o/CMakeLists.txt

@@ -1,22 +1,34 @@
-cmake_minimum_required(VERSION 3.16.0)
+cmake_minimum_required(VERSION 3.18.0)
 project(h2o_app)
-find_library(H2O_LIB h2o-evloop)
-find_library(MUSTACHE_C_LIB mustache_c)
-find_library(YAJL_LIB yajl)
-find_path(H2O_INCLUDE h2o.h)
-find_path(MUSTACHE_C_INCLUDE mustache.h)
-find_path(YAJL_INCLUDE yajl/yajl_gen.h)
+find_library(CRYPTO_LIB crypto REQUIRED)
+find_library(H2O_LIB h2o-evloop REQUIRED)
+find_library(MUSTACHE_C_LIB mustache_c REQUIRED)
+find_library(NUMA_LIB numa REQUIRED)
+find_library(PQ_LIB pq REQUIRED)
+find_library(SSL_LIB ssl REQUIRED)
+find_library(YAJL_LIB yajl REQUIRED)
+find_library(Z_LIB z REQUIRED)
+find_path(H2O_INCLUDE h2o.h REQUIRED)
+find_path(MUSTACHE_C_INCLUDE mustache.h REQUIRED)
+find_path(NUMA_INCLUDE numaif.h REQUIRED)
+find_path(OPENSSL_INCLUDE openssl/ssl.h REQUIRED)
+find_path(PQ_INCLUDE postgresql/libpq-fe.h REQUIRED)
+find_path(YAJL_INCLUDE yajl/yajl_gen.h REQUIRED)
+include_directories(src ${H2O_INCLUDE} ${MUSTACHE_C_INCLUDE} ${NUMA_INCLUDE} ${OPENSSL_INCLUDE})
+include_directories(${PQ_INCLUDE} ${YAJL_INCLUDE})
+set(CMAKE_C_STANDARD 11)
+set(CMAKE_C_STANDARD_REQUIRED ON)
+add_compile_definitions(H2O_USE_LIBUV=0)
 set(COMMON_OPTIONS -flto -pthread)
-add_compile_options(-std=gnu11 -pedantic -Wall -Wextra ${COMMON_OPTIONS})
+add_compile_options(-pedantic -Wall -Wextra ${COMMON_OPTIONS})
 set(CMAKE_C_FLAGS_DEBUG "${CMAKE_C_FLAGS_DEBUG} -D_FORTIFY_SOURCE=2")
 set(CMAKE_C_FLAGS_RELEASE "${CMAKE_C_FLAGS_RELEASE} -O3")
 set(CMAKE_C_FLAGS_RELWITHDEBINFO "${CMAKE_C_FLAGS_RELWITHDEBINFO} -O3")
-add_definitions(-DH2O_USE_LIBUV=0)
-include_directories(src ${H2O_INCLUDE} ${MUSTACHE_C_INCLUDE} ${YAJL_INCLUDE})
 file(GLOB_RECURSE SOURCES "src/*.c")
 add_executable(${PROJECT_NAME} ${SOURCES})
 target_link_libraries(${PROJECT_NAME} ${COMMON_OPTIONS})
-target_link_libraries(${PROJECT_NAME} ${H2O_LIB} ssl crypto numa pq z ${MUSTACHE_C_LIB} ${YAJL_LIB})
+target_link_libraries(${PROJECT_NAME} ${H2O_LIB} ${MUSTACHE_C_LIB} ${NUMA_LIB} ${PQ_LIB} ${SSL_LIB})
+target_link_libraries(${PROJECT_NAME} ${CRYPTO_LIB} ${YAJL_LIB} ${Z_LIB})
 install(TARGETS ${PROJECT_NAME} RUNTIME DESTINATION bin)
 file(GLOB TEMPLATES "template/*")
 install(FILES ${TEMPLATES} DESTINATION share/${PROJECT_NAME}/template)

+ 11 - 12
frameworks/C/h2o/README.md

@@ -1,29 +1,28 @@
 # h2o
 
-This is a framework implementation using the [H2O](https://h2o.examp1e.net) HTTP server. It builds directly on top of `libh2o` instead of running the standalone server.
+This is a framework implementation using the [H2O](https://h2o.examp1e.net) HTTP server. It
+builds directly on top of `libh2o` instead of running the standalone server.
 
 ## Requirements
 
-[CMake](https://cmake.org), [H2O](https://h2o.examp1e.net), [libpq](https://www.postgresql.org), [mustache-c](https://github.com/x86-64/mustache-c), [OpenSSL](https://www.openssl.org), [YAJL](https://lloyd.github.io/yajl)
+[CMake](https://cmake.org), [H2O](https://h2o.examp1e.net), [libpq](https://www.postgresql.org),
+[mustache-c](https://github.com/x86-64/mustache-c), [numactl](https://github.com/numactl/numactl),
+[OpenSSL](https://www.openssl.org), [YAJL](https://lloyd.github.io/yajl)
 
 ## Test implementations
 
-The test implementations are located into the `src/handlers` directory.
+The test implementations are located into the `src/handlers` directory - refer to the
+`initialize_*_handler*()` functions.
 
 ## Performance tuning
 
-If the test environment changes, it will probably be necessary to tune some of the framework settings in order to achieve the best performance possible. The most significant parameter is the maximum number of database connections per thread, which is controlled by the `DB_CONN` variable in the `h2o.sh` script.
+If the test environment changes, it will probably be necessary to tune some of the framework
+settings in order to achieve the best performance possible. The most significant parameter is
+the maximum number of database connections per thread, which is controlled by the `DB_CONN`
+variable in the `h2o.sh` script.
 
 ## Performance issues
 
-### Database tests
-
-`libpq` does not support command pipelining, and implementing anything equivalent on top of it conflicts with the requirements.
-
-### Database updates
-
-In the Citrine environment the database connection settings that improve the performance on the updates test make the other database results worse, and vice versa.
-
 ### Plaintext
 
 `libh2o` performs at least one system call per pipelined response.

+ 2 - 2
frameworks/C/h2o/h2o.dockerfile

@@ -19,7 +19,7 @@ RUN mkdir -p "$MUSTACHE_C_BUILD_DIR" && \
     cd "$MUSTACHE_C_BUILD_DIR" && \
     wget -qO - "https://github.com/x86-64/mustache-c/archive/${MUSTACHE_C_REVISION}.tar.gz" | \
     tar xz --strip-components=1 && \
-    CFLAGS="-O3 -flto -march=native" ./autogen.sh --prefix="$MUSTACHE_C_PREFIX" && \
+    CFLAGS="-O3 -flto -march=native -mtune=native" ./autogen.sh --prefix="$MUSTACHE_C_PREFIX" && \
     make -j "$(nproc)" install && \
     cd .. && \
     rm -rf "$MUSTACHE_C_BUILD_DIR"
@@ -36,7 +36,7 @@ RUN mkdir -p "${H2O_BUILD_DIR}/build" && \
     wget -qO - "https://github.com/h2o/h2o/archive/${H2O_VERSION}.tar.gz" | \
     tar xz --strip-components=1 && \
     cd build && \
-    cmake -DCMAKE_INSTALL_PREFIX="$H2O_PREFIX" -DCMAKE_C_FLAGS="-flto -march=native" \
+    cmake -DCMAKE_INSTALL_PREFIX="$H2O_PREFIX" -DCMAKE_C_FLAGS="-flto -march=native -mtune=native" \
           -DCMAKE_AR=/usr/bin/gcc-ar -DCMAKE_RANLIB=/usr/bin/gcc-ranlib -G Ninja .. && \
     cmake --build . -j && \
     cmake --install . && \

+ 4 - 3
frameworks/C/h2o/h2o.sh

@@ -25,16 +25,17 @@ if [[ -z "$MUSTACHE_C_PREFIX" ]]; then
 fi
 
 if [[ "$BENCHMARK_ENV" = "Azure" ]]; then
-	DB_CONN=5
+	DB_CONN=2
 else
-	DB_CONN=5
+	DB_CONN=1
 fi
 
 build_h2o_app()
 {
 	cmake -DCMAKE_INSTALL_PREFIX="$H2O_APP_PREFIX" -DCMAKE_BUILD_TYPE=Release \
 	      -DCMAKE_PREFIX_PATH="${H2O_PREFIX};${MUSTACHE_C_PREFIX}" \
-	      -DCMAKE_C_FLAGS="-march=native $1" -G Ninja "$H2O_APP_SRC_ROOT"
+	      -DCMAKE_C_FLAGS="-march=native -mtune=native $1" -G Ninja \
+	      "$H2O_APP_SRC_ROOT"
 	cmake --build . --clean-first -j
 }
 

+ 495 - 300
frameworks/C/h2o/src/database.c

@@ -30,21 +30,25 @@
 
 #include "database.h"
 #include "error.h"
+#include "global_data.h"
 #include "list.h"
-#include "thread.h"
 
-#define IS_RESETTING 1
-#define IS_WRITING 2
+#define EXPECT_SYNC 1
+#define IGNORE_RESULT 2
+#define IS_RESETTING 4
+#define MS_IN_S 1000
 
 typedef struct {
 	list_t l;
 	PGconn *conn;
-	thread_context_t *ctx;
-	db_query_param_t *param;
-	list_t *prepared_statement;
+	db_conn_pool_t *pool;
+	const list_t *prepared_statement;
+	queue_t queries;
 	h2o_socket_t *sock;
+	size_t query_num;
 	uint_fast32_t flags;
-	h2o_timeout_entry_t h2o_timeout_entry;
+	int sd;
+	h2o_timeout_entry_t timeout;
 } db_conn_t;
 
 typedef struct {
@@ -53,98 +57,117 @@ typedef struct {
 	const char *query;
 } prepared_statement_t;
 
-static int do_database_write(db_conn_t *db_conn);
-static int do_execute_query(db_conn_t *db_conn, bool direct_notification);
-static void error_notification(thread_context_t *ctx, bool timeout, const char *error_string);
-static void on_database_connect_error(db_conn_t *db_conn, bool timeout, const char *error_string);
+static h2o_socket_t *create_socket(int sd, h2o_loop_t *loop);
+static int do_execute_query(db_conn_t *conn, db_query_param_t *param);
+static void error_notification(db_conn_pool_t *pool, bool timeout, const char *error_string);
+static void on_database_connect_error(db_conn_t *conn, bool timeout, const char *error_string);
+static void on_database_connect_read_ready(h2o_socket_t *sock, const char *err);
 static void on_database_connect_timeout(h2o_timeout_entry_t *entry);
-static void on_database_error(db_conn_t *db_conn, const char *error_string);
-static void on_database_read_ready(h2o_socket_t *db_sock, const char *err);
-static void on_database_timeout(h2o_timeout_entry_t *entry);
-static void on_database_write_ready(h2o_socket_t *db_sock, const char *err);
-static void poll_database_connection(h2o_socket_t *db_sock, const char *err);
-static void process_query(db_conn_t *db_conn);
-static void start_database_connect(thread_context_t *ctx, db_conn_t *db_conn);
-
-static int do_database_write(db_conn_t *db_conn)
+static void on_database_connect_write_ready(h2o_socket_t *sock, const char *err);
+static void on_database_error(db_conn_t *conn, const char *error_string);
+static void on_database_read_ready(h2o_socket_t *sock, const char *err);
+static void on_database_timeout(h2o_timeout_entry_t *timeout);
+static void on_database_write_ready(h2o_socket_t *sock, const char *err);
+static void poll_database_connection(h2o_socket_t *sock, const char *err);
+static void prepare_statements(db_conn_t *conn);
+static void process_queries(db_conn_t *conn);
+static void start_database_connect(db_conn_pool_t *pool, db_conn_t *conn);
+
+static h2o_socket_t *create_socket(int sd, h2o_loop_t *loop)
 {
-	assert(db_conn->param);
+	sd = dup(sd);
 
-	int ret = db_conn->param->on_write_ready(db_conn->param, db_conn->conn);
+	if (sd < 0) {
+		STANDARD_ERROR("dup");
+		return NULL;
+	}
+
+	const int flags = fcntl(sd, F_GETFD);
 
-	if (!ret)
-		db_conn->flags &= ~IS_WRITING;
-	else if (ret < 0) {
-		ERROR(PQerrorMessage(db_conn->conn));
-		on_database_error(db_conn, DB_ERROR);
+	if (flags < 0 || fcntl(sd, F_SETFD, flags | FD_CLOEXEC)) {
+		STANDARD_ERROR("fcntl");
+		close(sd);
+		return NULL;
 	}
-	else {
-		h2o_socket_notify_write(db_conn->sock, on_database_write_ready);
-		ret = 0;
+
+	h2o_socket_t * const ret = h2o_evloop_socket_create(loop, sd, H2O_SOCKET_FLAG_DONT_READ);
+
+	if (!ret) {
+		errno = ENOMEM;
+		STANDARD_ERROR("h2o_evloop_socket_create");
+		close(sd);
 	}
 
 	return ret;
 }
 
-static int do_execute_query(db_conn_t *db_conn, bool direct_notification)
+static int do_execute_query(db_conn_t *conn, db_query_param_t *param)
 {
-	const int ec = db_conn->param->flags & IS_PREPARED ?
-	               PQsendQueryPrepared(db_conn->conn,
-	                                   db_conn->param->command,
-	                                   db_conn->param->nParams,
-	                                   db_conn->param->paramValues,
-	                                   db_conn->param->paramLengths,
-	                                   db_conn->param->paramFormats,
-	                                   db_conn->param->resultFormat) :
-	               PQsendQuery(db_conn->conn, db_conn->param->command);
-	int ret = 1;
-
-	if (ec) {
-		if (db_conn->param->flags & IS_SINGLE_ROW)
-			PQsetSingleRowMode(db_conn->conn);
-
-		db_conn->h2o_timeout_entry.cb = on_database_timeout;
-		h2o_timeout_link(db_conn->ctx->event_loop.h2o_ctx.loop,
-		                 &db_conn->ctx->db_state.h2o_timeout,
-		                 &db_conn->h2o_timeout_entry);
-		h2o_socket_read_start(db_conn->sock, on_database_read_ready);
-
-		const int send_status = PQflush(db_conn->conn);
+	assert(conn->query_num);
+	assert((conn->queries.head && conn->query_num < conn->pool->config->max_pipeline_query_num) ||
+	       (!conn->queries.head && conn->query_num == conn->pool->config->max_pipeline_query_num));
+
+	const int ec = param->flags & IS_PREPARED ?
+	               PQsendQueryPrepared(conn->conn,
+	                                   param->command,
+	                                   param->nParams,
+	                                   param->paramValues,
+	                                   param->paramLengths,
+	                                   param->paramFormats,
+	                                   param->resultFormat) :
+	               PQsendQueryParams(conn->conn,
+	                                 param->command,
+	                                 param->nParams,
+	                                 param->paramTypes,
+	                                 param->paramValues,
+	                                 param->paramLengths,
+	                                 param->paramFormats,
+	                                 param->resultFormat);
+
+	if (!ec) {
+		ERROR(PQerrorMessage(conn->conn));
+		return 1;
+	}
 
-		if (send_status < 0) {
-			if (direct_notification)
-				db_conn->param = NULL;
+	if (!PQpipelineSync(conn->conn)) {
+		LIBRARY_ERROR("PQpipelineSync", PQerrorMessage(conn->conn));
+		return 1;
+	}
 
-			LIBRARY_ERROR("PQflush", PQerrorMessage(db_conn->conn));
-			on_database_error(db_conn, DB_ERROR);
-		}
-		else {
-			ret = 0;
+	const int send_status = PQflush(conn->conn);
 
-			if (send_status)
-				h2o_socket_notify_write(db_conn->sock, on_database_write_ready);
-		}
+	if (send_status < 0) {
+		LIBRARY_ERROR("PQflush", PQerrorMessage(conn->conn));
+		return 1;
 	}
-	else {
-		if (direct_notification)
-			db_conn->param = NULL;
-
-		ERROR(PQerrorMessage(db_conn->conn));
-		on_database_error(db_conn, DB_ERROR);
+	else if (send_status)
+		h2o_socket_notify_write(conn->sock, on_database_write_ready);
+
+	if (!conn->queries.head && !(conn->flags & (EXPECT_SYNC | IGNORE_RESULT))) {
+		assert(!h2o_timeout_is_linked(&conn->timeout));
+		conn->timeout.cb = on_database_timeout;
+		h2o_timeout_link(conn->pool->loop, &conn->pool->timeout, &conn->timeout);
+		h2o_socket_read_start(conn->sock, on_database_read_ready);
 	}
 
-	return ret;
+	param->l.next = NULL;
+	*conn->queries.tail = &param->l;
+	conn->queries.tail = &param->l.next;
+	conn->query_num--;
+	return 0;
 }
 
-static void error_notification(thread_context_t *ctx, bool timeout, const char *error_string)
+static void error_notification(db_conn_pool_t *pool, bool timeout, const char *error_string)
 {
-	if (!--ctx->db_state.db_conn_num) {
+	assert(pool->conn_num < pool->config->max_db_conn_num);
+
+	if (++pool->conn_num == pool->config->max_db_conn_num) {
 		// We don't want to keep requests waiting for an unbounded amount of time.
-		list_t *iter = ctx->db_state.queries.head;
+		list_t *iter = pool->queries.head;
 
-		ctx->db_state.queries.head = NULL;
-		ctx->db_state.queries.tail = &ctx->db_state.queries.head;
-		ctx->db_state.query_num = 0;
+		pool->queries.head = NULL;
+		pool->queries.tail = &pool->queries.head;
+		pool->query_num = pool->config->max_query_num;
 
 		if (iter)
 			do {
@@ -161,297 +184,443 @@ static void error_notification(thread_context_t *ctx, bool timeout, const char *
 	}
 }
 
-static void on_database_connect_error(db_conn_t *db_conn, bool timeout, const char *error_string)
+static void on_database_connect_error(db_conn_t *conn, bool timeout, const char *error_string)
 {
-	thread_context_t * const ctx = db_conn->ctx;
-
-	error_notification(ctx, timeout, error_string);
-	h2o_timeout_unlink(&db_conn->h2o_timeout_entry);
-	h2o_socket_read_stop(db_conn->sock);
-	h2o_socket_close(db_conn->sock);
-	PQfinish(db_conn->conn);
-	free(db_conn);
+	error_notification(conn->pool, timeout, error_string);
+	h2o_timeout_unlink(&conn->timeout);
+	h2o_socket_read_stop(conn->sock);
+	h2o_socket_close(conn->sock);
+	PQfinish(conn->conn);
+	free(conn);
+}
+
+static void on_database_connect_read_ready(h2o_socket_t *sock, const char *err)
+{
+	db_conn_t * const conn = sock->data;
+
+	if (err) {
+		ERROR(err);
+		on_database_connect_error(conn, false, DB_ERROR);
+		return;
+	}
+
+	if (!PQconsumeInput(conn->conn)) {
+		LIBRARY_ERROR("PQconsumeInput", PQerrorMessage(conn->conn));
+		on_database_connect_error(conn, false, DB_ERROR);
+		return;
+	}
+
+	const int send_status = PQflush(conn->conn);
+
+	if (send_status < 0) {
+		LIBRARY_ERROR("PQflush", PQerrorMessage(conn->conn));
+		on_database_connect_error(conn, false, DB_ERROR);
+		return;
+	}
+	else if (send_status) {
+		h2o_socket_notify_write(conn->sock, on_database_write_ready);
+		return;
+	}
+
+	while (!PQisBusy(conn->conn)) {
+		PGresult * const result = PQgetResult(conn->conn);
+
+		if (result) {
+			switch (PQresultStatus(result)) {
+				case PGRES_COMMAND_OK:
+					break;
+				case PGRES_PIPELINE_SYNC:
+					PQclear(result);
+					h2o_timeout_unlink(&conn->timeout);
+					h2o_socket_read_stop(conn->sock);
+					process_queries(conn);
+					return;
+				default:
+					LIBRARY_ERROR("PQresultStatus", PQresultErrorMessage(result));
+					PQclear(result);
+					on_database_connect_error(conn, false, DB_ERROR);
+					return;
+			}
+
+			PQclear(result);
+		}
+	}
 }
 
 static void on_database_connect_timeout(h2o_timeout_entry_t *entry)
 {
-	db_conn_t * const db_conn = H2O_STRUCT_FROM_MEMBER(db_conn_t, h2o_timeout_entry, entry);
+	db_conn_t * const conn = H2O_STRUCT_FROM_MEMBER(db_conn_t, timeout, entry);
 
 	ERROR(DB_TIMEOUT_ERROR);
-	on_database_connect_error(db_conn, true, DB_TIMEOUT_ERROR);
+	on_database_connect_error(conn, true, DB_TIMEOUT_ERROR);
 }
 
-static void on_database_error(db_conn_t *db_conn, const char *error_string)
+static void on_database_connect_write_ready(h2o_socket_t *sock, const char *err)
 {
-	if (db_conn->prepared_statement)
-		on_database_connect_error(db_conn, false, error_string);
+	db_conn_t * const conn = sock->data;
+
+	if (err) {
+		ERROR(err);
+		on_database_connect_error(conn, false, err);
+	}
 	else {
-		if (db_conn->param) {
-			db_conn->param->on_error(db_conn->param, error_string);
-			db_conn->param = NULL;
-		}
+		const int send_status = PQflush(conn->conn);
 
-		if (PQstatus(db_conn->conn) == CONNECTION_OK) {
-			h2o_timeout_unlink(&db_conn->h2o_timeout_entry);
-			h2o_socket_read_stop(db_conn->sock);
-			process_query(db_conn);
+		if (send_status < 0) {
+			LIBRARY_ERROR("PQflush", PQerrorMessage(conn->conn));
+			on_database_connect_error(conn, false, DB_ERROR);
 		}
-		else
-			start_database_connect(db_conn->ctx, db_conn);
+		else if (send_status)
+			h2o_socket_notify_write(conn->sock, on_database_connect_write_ready);
 	}
 }
 
-static void on_database_read_ready(h2o_socket_t *db_sock, const char *err)
+static void on_database_error(db_conn_t *conn, const char *error_string)
 {
-	db_conn_t * const db_conn = db_sock->data;
+	if (conn->queries.head)
+		do {
+			db_query_param_t * const param = H2O_STRUCT_FROM_MEMBER(db_query_param_t,
+			                                                        l,
+			                                                        conn->queries.head);
 
-	if (err)
+			// The callback may free the db_query_param_t structure.
+			conn->queries.head = param->l.next;
+			param->on_error(param, error_string);
+		} while (conn->queries.head);
+
+	start_database_connect(conn->pool, conn);
+}
+
+static void on_database_read_ready(h2o_socket_t *sock, const char *err)
+{
+	db_conn_t * const conn = sock->data;
+
+	if (err) {
 		ERROR(err);
-	else {
-		if (PQconsumeInput(db_conn->conn)) {
-			const int send_status = PQflush(db_conn->conn);
-
-			if (send_status > 0)
-				h2o_socket_notify_write(db_conn->sock, on_database_write_ready);
-
-			if (send_status >= 0) {
-				while (!PQisBusy(db_conn->conn)) {
-					PGresult * const result = PQgetResult(db_conn->conn);
-
-					if (db_conn->param)
-						switch (db_conn->param->on_result(db_conn->param, result)) {
-							case WANT_WRITE:
-								db_conn->flags |= IS_WRITING;
-
-								if (do_database_write(db_conn))
-									return;
-
-								break;
-							case DONE:
-								db_conn->param = NULL;
-								h2o_timeout_unlink(&db_conn->h2o_timeout_entry);
-								break;
-							default:
-								break;
-						}
-					else if (result) {
-						if (PQresultStatus(result) != PGRES_COMMAND_OK)
-							LIBRARY_ERROR("PQresultStatus", PQresultErrorMessage(result));
-
-						PQclear(result);
-					}
-
-					if (!result) {
-						assert(!db_conn->param);
-						h2o_timeout_unlink(&db_conn->h2o_timeout_entry);
-						h2o_socket_read_stop(db_conn->sock);
-						process_query(db_conn);
-						break;
-					}
-				}
+		on_database_error(conn, err);
+		return;
+	}
+
+	if (!PQconsumeInput(conn->conn)) {
+		LIBRARY_ERROR("PQconsumeInput", PQerrorMessage(conn->conn));
+		on_database_error(conn, DB_ERROR);
+		return;
+	}
+
+	const int send_status = PQflush(conn->conn);
 
+	if (send_status < 0) {
+		LIBRARY_ERROR("PQflush", PQerrorMessage(conn->conn));
+		on_database_error(conn, DB_ERROR);
+		return;
+	}
+	else if (send_status) {
+		h2o_socket_notify_write(conn->sock, on_database_write_ready);
+		return;
+	}
+
+	while (!PQisBusy(conn->conn)) {
+		PGresult * const result = PQgetResult(conn->conn);
+
+		if (conn->flags & IGNORE_RESULT) {
+			if (result)
+				PQclear(result);
+			else
+				conn->flags &= ~IGNORE_RESULT;
+		}
+		else if (conn->flags & EXPECT_SYNC) {
+			if (PQresultStatus(result) == PGRES_PIPELINE_SYNC) {
+				PQclear(result);
+				conn->flags &= ~EXPECT_SYNC;
+			}
+			else {
+				LIBRARY_ERROR("PQresultStatus", PQresultErrorMessage(result));
+				PQclear(result);
+				on_database_error(conn, DB_ERROR);
 				return;
 			}
 		}
-
-		ERROR(PQerrorMessage(db_conn->conn));
+		else if (conn->queries.head) {
+			db_query_param_t * const param = H2O_STRUCT_FROM_MEMBER(db_query_param_t,
+			                                                        l,
+			                                                        conn->queries.head);
+			// The callback may free the db_query_param_t structure.
+			list_t * const next = param->l.next;
+			const bool nonnull_result = !!result;
+
+			if (param->on_result(param, result) == DONE) {
+				conn->query_num++;
+				h2o_timeout_unlink(&conn->timeout);
+				conn->timeout.cb = on_database_timeout;
+				h2o_timeout_link(conn->pool->loop, &conn->pool->timeout, &conn->timeout);
+				conn->flags |= EXPECT_SYNC;
+				conn->queries.head = next;
+
+				if (!next)
+					conn->queries.tail = &conn->queries.head;
+
+				if (nonnull_result)
+					conn->flags |= IGNORE_RESULT;
+			}
+			else
+				assert(nonnull_result);
+		}
+		else {
+			assert(!result);
+			h2o_timeout_unlink(&conn->timeout);
+			h2o_socket_read_stop(conn->sock);
+			break;
+		}
 	}
 
-	on_database_error(db_conn, DB_ERROR);
+	process_queries(conn);
 }
 
-static void on_database_timeout(h2o_timeout_entry_t *entry)
+static void on_database_timeout(h2o_timeout_entry_t *timeout)
 {
-	db_conn_t * const db_conn = H2O_STRUCT_FROM_MEMBER(db_conn_t, h2o_timeout_entry, entry);
+	db_conn_t * const conn = H2O_STRUCT_FROM_MEMBER(db_conn_t, timeout, timeout);
 
 	ERROR(DB_TIMEOUT_ERROR);
 
-	if (db_conn->param) {
-		db_conn->param->on_timeout(db_conn->param);
-		db_conn->param = NULL;
+	if (conn->queries.head) {
+		db_query_param_t * const param = H2O_STRUCT_FROM_MEMBER(db_query_param_t,
+		                                                        l,
+		                                                        conn->queries.head);
+
+		conn->queries.head = param->l.next;
+		param->on_timeout(param);
 	}
 
-	start_database_connect(db_conn->ctx, db_conn);
+	on_database_error(conn, DB_TIMEOUT_ERROR);
 }
 
-static void on_database_write_ready(h2o_socket_t *db_sock, const char *err)
+static void on_database_write_ready(h2o_socket_t *sock, const char *err)
 {
-	db_conn_t * const db_conn = db_sock->data;
+	db_conn_t * const conn = sock->data;
 
 	if (err) {
 		ERROR(err);
-		on_database_error(db_conn, DB_ERROR);
+		on_database_error(conn, err);
 	}
 	else {
-		const int send_status = PQflush(db_conn->conn);
+		const int send_status = PQflush(conn->conn);
 
-		if (!send_status) {
-			if (db_conn->flags & IS_WRITING && db_conn->param)
-				do_database_write(db_conn);
+		if (send_status < 0) {
+			LIBRARY_ERROR("PQflush", PQerrorMessage(conn->conn));
+			on_database_error(conn, DB_ERROR);
 		}
-		else if (send_status < 0) {
-			LIBRARY_ERROR("PQflush", PQerrorMessage(db_conn->conn));
-			on_database_error(db_conn, DB_ERROR);
+		else {
+			if (send_status)
+				h2o_socket_notify_write(conn->sock, on_database_write_ready);
+
+			process_queries(conn);
 		}
-		else
-			h2o_socket_notify_write(db_conn->sock, on_database_write_ready);
 	}
 }
 
-static void poll_database_connection(h2o_socket_t *db_sock, const char *err)
+static void poll_database_connection(h2o_socket_t *sock, const char *err)
 {
-	db_conn_t * const db_conn = db_sock->data;
+	db_conn_t * const conn = sock->data;
 
 	if (err)
 		ERROR(err);
 	else {
-		const PostgresPollingStatusType status = db_conn->flags & IS_RESETTING ?
-		                                         PQresetPoll(db_conn->conn) :
-		                                         PQconnectPoll(db_conn->conn);
+		const PostgresPollingStatusType status = conn->flags & IS_RESETTING ?
+		                                         PQresetPoll(conn->conn) :
+		                                         PQconnectPoll(conn->conn);
+		const int sd = PQsocket(conn->conn);
 
 		switch (status) {
 			case PGRES_POLLING_WRITING:
-				if (!h2o_socket_is_writing(db_conn->sock))
-					h2o_socket_notify_write(db_conn->sock, poll_database_connection);
+				h2o_socket_read_stop(conn->sock);
+
+				if (sd != conn->sd) {
+					h2o_socket_t * const sock = create_socket(sd, conn->pool->loop);
+
+					if (!sock)
+						break;
+
+					h2o_socket_close(conn->sock);
+					conn->sd = sd;
+					conn->sock = sock;
+				}
+
+				if (!h2o_socket_is_writing(conn->sock))
+					h2o_socket_notify_write(conn->sock, poll_database_connection);
 
-				h2o_socket_read_stop(db_conn->sock);
 				return;
 			case PGRES_POLLING_OK:
-				if (PQsetnonblocking(db_conn->conn, 1)) {
-					LIBRARY_ERROR("PQsetnonblocking", PQerrorMessage(db_conn->conn));
+				h2o_timeout_unlink(&conn->timeout);
+				h2o_socket_read_stop(conn->sock);
+
+				if (PQsetnonblocking(conn->conn, 1)) {
+					LIBRARY_ERROR("PQsetnonblocking", PQerrorMessage(conn->conn));
+					break;
+				}
+
+				if (!PQenterPipelineMode(conn->conn)) {
+					ERROR("PQenterPipelineMode");
 					break;
 				}
 
-				h2o_timeout_unlink(&db_conn->h2o_timeout_entry);
-				h2o_socket_read_stop(db_conn->sock);
-				process_query(db_conn);
+				if (sd != conn->sd) {
+					h2o_socket_t * const sock = create_socket(sd, conn->pool->loop);
+
+					if (!sock)
+						break;
+
+					h2o_socket_close(conn->sock);
+					conn->sd = sd;
+					conn->sock = sock;
+				}
+
+				prepare_statements(conn);
 				return;
 			case PGRES_POLLING_READING:
-				h2o_socket_read_start(db_conn->sock, poll_database_connection);
+				if (sd != conn->sd) {
+					h2o_socket_t * const sock = create_socket(sd, conn->pool->loop);
+
+					if (!sock)
+						break;
+
+					h2o_socket_read_stop(conn->sock);
+					h2o_socket_close(conn->sock);
+					conn->sd = sd;
+					conn->sock = sock;
+				}
+
+				h2o_socket_read_start(conn->sock, poll_database_connection);
 				return;
 			default:
-				ERROR(PQerrorMessage(db_conn->conn));
+				ERROR(PQerrorMessage(conn->conn));
 		}
 	}
 
-	on_database_connect_error(db_conn, false, DB_ERROR);
+	on_database_connect_error(conn, false, DB_ERROR);
 }
 
-static void process_query(db_conn_t *db_conn)
+static void prepare_statements(db_conn_t *conn)
 {
-	if (db_conn->prepared_statement) {
-		const prepared_statement_t * const p = H2O_STRUCT_FROM_MEMBER(prepared_statement_t,
-		                                                              l,
-		                                                              db_conn->prepared_statement);
-
-		if (PQsendPrepare(db_conn->conn, p->name, p->query, 0, NULL)) {
-			db_conn->prepared_statement = p->l.next;
-			db_conn->h2o_timeout_entry.cb = on_database_connect_timeout;
-			h2o_timeout_link(db_conn->ctx->event_loop.h2o_ctx.loop,
-			                 &db_conn->ctx->db_state.h2o_timeout,
-			                 &db_conn->h2o_timeout_entry);
-			h2o_socket_read_start(db_conn->sock, on_database_read_ready);
-			on_database_write_ready(db_conn->sock, NULL);
-		}
-		else {
-			LIBRARY_ERROR("PQsendPrepare", PQerrorMessage(db_conn->conn));
-			on_database_connect_error(db_conn, false, DB_ERROR);
+	if (conn->prepared_statement) {
+		const list_t *iter = conn->prepared_statement;
+
+		do {
+			const prepared_statement_t * const p = H2O_STRUCT_FROM_MEMBER(prepared_statement_t,
+			                                                              l,
+			                                                              iter);
+
+			if (!PQsendPrepare(conn->conn, p->name, p->query, 0, NULL)) {
+				LIBRARY_ERROR("PQsendPrepare", PQerrorMessage(conn->conn));
+				on_database_connect_error(conn, false, DB_ERROR);
+				return;
+			}
+
+			iter = iter->next;
+		} while (iter);
+
+		if (!PQpipelineSync(conn->conn)) {
+			LIBRARY_ERROR("PQpipelineSync", PQerrorMessage(conn->conn));
+			on_database_connect_error(conn, false, DB_ERROR);
+			return;
 		}
+
+		conn->prepared_statement = NULL;
+		conn->timeout.cb = on_database_connect_timeout;
+		h2o_timeout_link(conn->pool->loop, &conn->pool->timeout, &conn->timeout);
+		h2o_socket_read_start(conn->sock, on_database_connect_read_ready);
+		on_database_connect_write_ready(conn->sock, NULL);
 	}
-	else if (db_conn->ctx->db_state.query_num) {
-		db_conn->ctx->db_state.query_num--;
+	else
+		process_queries(conn);
+}
 
-		if (db_conn->ctx->db_state.queries.tail == &db_conn->ctx->db_state.queries.head->next) {
-			assert(!db_conn->ctx->db_state.query_num);
-			db_conn->ctx->db_state.queries.tail = &db_conn->ctx->db_state.queries.head;
+static void process_queries(db_conn_t *conn)
+{
+	while (conn->query_num && conn->pool->queries.head) {
+		db_query_param_t * const param = H2O_STRUCT_FROM_MEMBER(db_query_param_t,
+		                                                        l,
+		                                                        conn->pool->queries.head);
+
+		if (++conn->pool->query_num == conn->pool->config->max_query_num) {
+			assert(conn->pool->queries.tail == &param->l.next);
+			conn->pool->queries.tail = &conn->pool->queries.head;
 		}
 
-		db_conn->param = H2O_STRUCT_FROM_MEMBER(db_query_param_t,
-		                                        l,
-		                                        db_conn->ctx->db_state.queries.head);
-		db_conn->ctx->db_state.queries.head = db_conn->ctx->db_state.queries.head->next;
-		do_execute_query(db_conn, false);
+		conn->pool->queries.head = param->l.next;
+
+		if (do_execute_query(conn, param)) {
+			param->on_error(param, DB_ERROR);
+
+			if (PQstatus(conn->conn) != CONNECTION_OK) {
+				on_database_error(conn, DB_ERROR);
+				return;
+			}
+		}
 	}
-	else {
-		db_conn->l.next = db_conn->ctx->db_state.db_conn;
-		db_conn->ctx->db_state.db_conn = &db_conn->l;
-		db_conn->ctx->db_state.free_db_conn_num++;
+
+	if (!conn->queries.head && !(conn->flags & (EXPECT_SYNC | IGNORE_RESULT))) {
+		conn->l.next = conn->pool->conn;
+		conn->pool->conn = &conn->l;
 	}
 }
 
-static void start_database_connect(thread_context_t *ctx, db_conn_t *db_conn)
+static void start_database_connect(db_conn_pool_t *pool, db_conn_t *conn)
 {
-	if (db_conn) {
-		db_conn->flags = IS_RESETTING;
-		h2o_timeout_unlink(&db_conn->h2o_timeout_entry);
-		h2o_socket_read_stop(db_conn->sock);
-		h2o_socket_close(db_conn->sock);
-
-		if (!PQresetStart(db_conn->conn)) {
-			LIBRARY_ERROR("PQresetStart", PQerrorMessage(db_conn->conn));
+	if (conn) {
+		PGconn * const c = conn->conn;
+
+		h2o_timeout_unlink(&conn->timeout);
+		h2o_socket_read_stop(conn->sock);
+		h2o_socket_close(conn->sock);
+
+		if (!PQresetStart(c)) {
+			LIBRARY_ERROR("PQresetStart", PQerrorMessage(c));
 			goto error_dup;
 		}
+
+		memset(conn, 0, sizeof(*conn));
+		conn->conn = c;
+		conn->flags = IS_RESETTING;
 	}
 	else {
-		ctx->db_state.db_conn_num++;
-		db_conn = h2o_mem_alloc(sizeof(*db_conn));
-		memset(db_conn, 0, sizeof(*db_conn));
-
-		const char * const conninfo = ctx->config->db_host ? ctx->config->db_host : "";
-
-		db_conn->conn = PQconnectStart(conninfo);
+		assert(pool->conn_num);
+		pool->conn_num--;
+		conn = h2o_mem_alloc(sizeof(*conn));
+		memset(conn, 0, sizeof(*conn));
+		conn->conn = PQconnectStart(pool->conninfo);
 
-		if (!db_conn->conn) {
+		if (!conn->conn) {
 			errno = ENOMEM;
 			STANDARD_ERROR("PQconnectStart");
 			goto error_connect;
 		}
 
-		if (PQstatus(db_conn->conn) == CONNECTION_BAD) {
-			LIBRARY_ERROR("PQstatus", PQerrorMessage(db_conn->conn));
+		if (PQstatus(conn->conn) == CONNECTION_BAD) {
+			LIBRARY_ERROR("PQstatus", PQerrorMessage(conn->conn));
 			goto error_dup;
 		}
 	}
 
-	const int sd = dup(PQsocket(db_conn->conn));
-
-	if (sd < 0) {
-		STANDARD_ERROR("dup");
-		goto error_dup;
-	}
-
-	const int flags = fcntl(sd, F_GETFD);
-
-	if (flags < 0 || fcntl(sd, F_SETFD, flags | FD_CLOEXEC)) {
-		STANDARD_ERROR("fcntl");
-		goto error_fcntl;
-	}
-
-	db_conn->sock = h2o_evloop_socket_create(ctx->event_loop.h2o_ctx.loop,
-	                                         sd,
-	                                         H2O_SOCKET_FLAG_DONT_READ);
-
-	if (db_conn->sock) {
-		db_conn->sock->data = db_conn;
-		db_conn->ctx = ctx;
-		db_conn->h2o_timeout_entry.cb = on_database_connect_timeout;
-		db_conn->prepared_statement = ctx->global_data->prepared_statements;
-		h2o_timeout_link(ctx->event_loop.h2o_ctx.loop,
-		                 &ctx->db_state.h2o_timeout,
-		                 &db_conn->h2o_timeout_entry);
-		h2o_socket_notify_write(db_conn->sock, poll_database_connection);
+	conn->sd = PQsocket(conn->conn);
+	conn->sock = create_socket(conn->sd, pool->loop);
+
+	if (conn->sock) {
+		conn->sock->data = conn;
+		conn->pool = pool;
+		conn->prepared_statement = pool->prepared_statements;
+		conn->queries.tail = &conn->queries.head;
+		conn->query_num = pool->config->max_pipeline_query_num;
+		conn->timeout.cb = on_database_connect_timeout;
+		h2o_timeout_link(pool->loop, &pool->timeout, &conn->timeout);
+		h2o_socket_notify_write(conn->sock, poll_database_connection);
 		return;
 	}
 
-	errno = ENOMEM;
-	STANDARD_ERROR("h2o_evloop_socket_create");
-error_fcntl:
-	close(sd);
 error_dup:
-	PQfinish(db_conn->conn);
+	PQfinish(conn->conn);
 error_connect:
-	free(db_conn);
-	error_notification(ctx, false, DB_ERROR);
+	free(conn);
+	error_notification(pool, false, DB_ERROR);
 }
 
 void add_prepared_statement(const char *name, const char *query, list_t **prepared_statements)
@@ -465,27 +634,36 @@ void add_prepared_statement(const char *name, const char *query, list_t **prepar
 	*prepared_statements = &p->l;
 }
 
-int execute_query(thread_context_t *ctx, db_query_param_t *param)
+int execute_database_query(db_conn_pool_t *pool, db_query_param_t *param)
 {
 	int ret = 1;
 
-	if (ctx->db_state.free_db_conn_num) {
-		db_conn_t * const db_conn = H2O_STRUCT_FROM_MEMBER(db_conn_t, l, ctx->db_state.db_conn);
+	if (pool->conn) {
+		db_conn_t * const conn = H2O_STRUCT_FROM_MEMBER(db_conn_t, l, pool->conn);
+
+		assert(!conn->queries.head);
+		assert(!(conn->flags & (EXPECT_SYNC | IGNORE_RESULT)));
+		pool->conn = conn->l.next;
+		ret = do_execute_query(conn, param);
 
-		ctx->db_state.db_conn = db_conn->l.next;
-		ctx->db_state.free_db_conn_num--;
-		db_conn->param = param;
-		ret = do_execute_query(db_conn, true);
+		if (ret) {
+			if (PQstatus(conn->conn) == CONNECTION_OK) {
+				conn->l.next = pool->conn;
+				pool->conn = &conn->l;
+			}
+			else
+				start_database_connect(conn->pool, conn);
+		}
 	}
-	else if (ctx->db_state.query_num < ctx->config->max_query_num) {
-		if (ctx->db_state.db_conn_num < ctx->config->max_db_conn_num)
-			start_database_connect(ctx, NULL);
+	else if (pool->query_num) {
+		if (pool->conn_num)
+			start_database_connect(pool, NULL);
 
-		if (ctx->db_state.db_conn_num) {
+		if (pool->conn_num < pool->config->max_db_conn_num && pool->query_num) {
 			param->l.next = NULL;
-			*ctx->db_state.queries.tail = &param->l;
-			ctx->db_state.queries.tail = &param->l.next;
-			ctx->db_state.query_num++;
+			*pool->queries.tail = &param->l;
+			pool->queries.tail = &param->l.next;
+			pool->query_num--;
 			ret = 0;
 		}
 	}
@@ -493,31 +671,48 @@ int execute_query(thread_context_t *ctx, db_query_param_t *param)
 	return ret;
 }
 
-void free_database_state(h2o_loop_t *loop, db_state_t *db_state)
+void free_database_connection_pool(db_conn_pool_t *pool)
 {
-	assert(!db_state->query_num && db_state->free_db_conn_num == db_state->db_conn_num);
+	assert(!pool->queries.head);
+	assert(pool->query_num == pool->config->max_query_num);
 
-	list_t *iter = db_state->db_conn;
+	size_t num = 0;
 
-	if (iter)
+	if (pool->conn)
 		do {
-			db_conn_t * const db_conn = H2O_STRUCT_FROM_MEMBER(db_conn_t, l, iter);
-
-			iter = iter->next;
-			assert(!db_conn->param && !h2o_timeout_is_linked(&db_conn->h2o_timeout_entry));
-			h2o_socket_close(db_conn->sock);
-			PQfinish(db_conn->conn);
-			free(db_conn);
-		} while (iter);
-
-	h2o_timeout_dispose(loop, &db_state->h2o_timeout);
+			db_conn_t * const conn = H2O_STRUCT_FROM_MEMBER(db_conn_t, l, pool->conn);
+
+			assert(!conn->queries.head);
+			assert(conn->query_num == pool->config->max_pipeline_query_num);
+			assert(!(conn->flags & (EXPECT_SYNC | IGNORE_RESULT)));
+			assert(!h2o_timeout_is_linked(&conn->timeout));
+			h2o_socket_read_stop(conn->sock);
+			h2o_socket_close(conn->sock);
+			PQfinish(conn->conn);
+			pool->conn = pool->conn->next;
+			free(conn);
+			num++;
+		} while (pool->conn);
+
+	assert(num + pool->conn_num == pool->config->max_db_conn_num);
+	h2o_timeout_dispose(pool->loop, &pool->timeout);
 }
 
-void initialize_database_state(h2o_loop_t *loop, db_state_t *db_state)
+void initialize_database_connection_pool(const char *conninfo,
+                                         const struct config_t *config,
+                                         const list_t *prepared_statements,
+                                         h2o_loop_t *loop,
+                                         db_conn_pool_t *pool)
 {
-	memset(db_state, 0, sizeof(*db_state));
-	db_state->queries.tail = &db_state->queries.head;
-	h2o_timeout_init(loop, &db_state->h2o_timeout, H2O_DEFAULT_HTTP1_REQ_TIMEOUT);
+	memset(pool, 0, sizeof(*pool));
+	pool->config = config;
+	pool->conninfo = conninfo ? conninfo : "";
+	pool->loop = loop;
+	pool->prepared_statements = prepared_statements;
+	pool->queries.tail = &pool->queries.head;
+	pool->conn_num = config->max_db_conn_num;
+	pool->query_num = config->max_query_num;
+	h2o_timeout_init(loop, &pool->timeout, config->db_timeout * MS_IN_S);
 }
 
 void remove_prepared_statements(list_t *prepared_statements)

+ 23 - 20
frameworks/C/h2o/src/database.h

@@ -25,57 +25,60 @@
 #include <stdint.h>
 #include <postgresql/libpq-fe.h>
 
-#include "global_data.h"
 #include "list.h"
 
 #define DB_ERROR "database error\n"
 #define DB_REQ_ERROR "too many concurrent database requests\n"
 #define DB_TIMEOUT_ERROR "database timeout\n"
 #define IS_PREPARED 1
-#define IS_SINGLE_ROW 2
 
 typedef enum {
 	SUCCESS,
 	DONE,
-	WANT_WRITE
 } result_return_t;
 
-typedef struct thread_context_t thread_context_t;
+struct config_t;
+struct db_query_param_t;
 
-typedef struct db_query_param_t db_query_param_t;
+typedef result_return_t (*on_result_t)(struct db_query_param_t *, PGresult *);
 
-typedef result_return_t (*on_result_t)(db_query_param_t *, PGresult *);
-
-struct db_query_param_t {
+typedef struct db_query_param_t {
 	list_t l;
-	void (*on_error)(db_query_param_t *, const char *);
+	void (*on_error)(struct db_query_param_t *, const char *);
 	on_result_t on_result;
-	void (*on_timeout)(db_query_param_t *);
-	int (*on_write_ready)(db_query_param_t *, PGconn *);
+	void (*on_timeout)(struct db_query_param_t *);
 	const char *command;
 	const char * const *paramValues;
 	const int *paramLengths;
 	const int *paramFormats;
+	const Oid *paramTypes;
 	size_t nParams;
 	uint_fast32_t flags;
 	int resultFormat;
-};
+} db_query_param_t;
 
 typedef struct {
-	list_t *db_conn;
+	const struct config_t *config;
+	list_t *conn;
+	const char *conninfo;
+	h2o_loop_t *loop;
+	const list_t *prepared_statements;
 	// We use a FIFO queue instead of a simpler stack, otherwise the earlier queries may wait
 	// an unbounded amount of time to be executed.
 	queue_t queries;
-	size_t db_conn_num;
-	size_t free_db_conn_num;
+	size_t conn_num;
 	size_t query_num;
-	h2o_timeout_t h2o_timeout;
-} db_state_t;
+	h2o_timeout_t timeout;
+} db_conn_pool_t;
 
 void add_prepared_statement(const char *name, const char *query, list_t **prepared_statements);
-int execute_query(thread_context_t *ctx, db_query_param_t *param);
-void free_database_state(h2o_loop_t *loop, db_state_t *db_state);
-void initialize_database_state(h2o_loop_t *loop, db_state_t *db_state);
+int execute_database_query(db_conn_pool_t *pool, db_query_param_t *param);
+void free_database_connection_pool(db_conn_pool_t *pool);
+void initialize_database_connection_pool(const char *conninfo,
+                                         const struct config_t *config,
+                                         const list_t *prepared_statements,
+                                         h2o_loop_t *loop,
+                                         db_conn_pool_t *pool);
 void remove_prepared_statements(list_t *prepared_statements);
 
 #endif // DATABASE_H_

+ 5 - 4
frameworks/C/h2o/src/event_loop.c

@@ -62,7 +62,7 @@ static void accept_connection(h2o_socket_t *listener, const char *err)
 		                                                      listener->data);
 
 		if (!ctx->shutdown) {
-			size_t accepted = ctx->config->max_accept;
+			size_t accepted = ctx->global_thread_data->config->max_accept;
 
 			assert(accepted);
 
@@ -232,12 +232,13 @@ static void shutdown_server(h2o_socket_t *listener, const char *err)
 			ctx->event_loop.h2o_socket = NULL;
 		}
 
-		for (size_t i = ctx->config->thread_num - 1; i > 0; i--) {
+		for (size_t i = ctx->global_thread_data->config->thread_num - 1; i > 0; i--) {
 			message_t * const msg = h2o_mem_alloc(sizeof(*msg));
 
 			memset(msg, 0, sizeof(*msg));
 			msg->type = SHUTDOWN;
-			h2o_multithread_send_message(&ctx->global_thread_data[i].h2o_receiver, &msg->super);
+			h2o_multithread_send_message(&ctx->global_thread_data[i].h2o_receiver,
+			                             &msg->super);
 		}
 	}
 }
@@ -265,7 +266,7 @@ static void start_accept_polling(const config_t *config,
 	h2o_socket_read_start(h2o_socket, accept_cb);
 }
 
-void event_loop(thread_context_t *ctx)
+void event_loop(struct thread_context_t *ctx)
 {
 	while (!ctx->shutdown || ctx->event_loop.conn_num)
 		h2o_evloop_run(ctx->event_loop.h2o_ctx.loop, INT32_MAX);

+ 2 - 2
frameworks/C/h2o/src/event_loop.h

@@ -31,7 +31,7 @@ typedef enum {
 	SHUTDOWN
 } message_type_t;
 
-typedef struct thread_context_t thread_context_t;
+struct thread_context_t;
 
 typedef struct {
 	h2o_socket_t *h2o_https_socket;
@@ -46,7 +46,7 @@ typedef struct {
 	h2o_multithread_message_t super;
 } message_t;
 
-void event_loop(thread_context_t *ctx);
+void event_loop(struct thread_context_t *ctx);
 void free_event_loop(event_loop_t *event_loop, h2o_multithread_receiver_t *h2o_receiver);
 void initialize_event_loop(bool is_main_thread,
                            global_data_t *global_data,

+ 3 - 2
frameworks/C/h2o/src/global_data.h

@@ -32,7 +32,7 @@
 struct global_thread_data_t;
 struct thread_context_t;
 
-typedef struct {
+typedef struct config_t {
 	const char *bind_address;
 	const char *cert;
 	const char *db_host;
@@ -40,9 +40,11 @@ typedef struct {
 	const char *log;
 	const char *root;
 	const char *template_path;
+	size_t db_timeout;
 	size_t max_accept;
 	size_t max_db_conn_num;
 	size_t max_json_generator;
+	size_t max_pipeline_query_num;
 	size_t max_query_num;
 	size_t thread_num;
 	uint16_t https_port;
@@ -53,7 +55,6 @@ typedef struct {
 	h2o_logger_t *file_logger;
 	struct global_thread_data_t *global_thread_data;
 	list_t *postinitialization_tasks;
-	list_t *prepared_statements;
 	h2o_socket_t *signals;
 	SSL_CTX *ssl_ctx;
 	size_t memory_alignment;

+ 18 - 13
frameworks/C/h2o/src/handlers/fortune.c

@@ -214,7 +214,7 @@ static int fortunes(struct st_h2o_handler_t *self, h2o_req_t *req)
 	fortune_ctx->req = req;
 	fortune_ctx->result = &fortune->l;
 
-	if (execute_query(ctx, &fortune_ctx->param)) {
+	if (execute_database_query(&ctx->request_handler_data.hello_world_db, &fortune_ctx->param)) {
 		fortune_ctx->cleanup = true;
 		send_service_unavailable_error(DB_REQ_ERROR, req);
 	}
@@ -284,9 +284,10 @@ static result_return_t on_fortune_result(db_query_param_t *param, PGresult *resu
 		fortune_ctx->iovec_list_iter = iovec_list;
 		fortune_ctx->result = sort_list(fortune_ctx->result, compare_fortunes);
 
-		if (mustache_render(&api,
-		                    fortune_ctx,
-		                    ctx->global_data->request_handler_data.fortunes_template)) {
+		struct mustache_token_t * const fortunes_template =
+			ctx->global_thread_data->global_data->request_handler_data.fortunes_template;
+
+		if (mustache_render(&api, fortune_ctx, fortunes_template)) {
 			fortune_ctx->iovec_list = iovec_list->l.next;
 			set_default_response_param(HTML, fortune_ctx->content_length, fortune_ctx->req);
 			h2o_start_response(fortune_ctx->req, &fortune_ctx->generator);
@@ -422,25 +423,29 @@ static void template_error(mustache_api_t *api,
 	print_error(template_input->name, lineno, "mustache_compile", error);
 }
 
-void cleanup_fortunes_handler(global_data_t *global_data)
+void cleanup_fortunes_handler(request_handler_data_t *data)
 {
-	if (global_data->request_handler_data.fortunes_template) {
+	if (data->fortunes_template) {
 		mustache_api_t api = {.freedata = NULL};
 
-		mustache_free(&api, global_data->request_handler_data.fortunes_template);
+		mustache_free(&api, data->fortunes_template);
 	}
 }
 
 void initialize_fortunes_handler(const config_t *config,
-                                 global_data_t *global_data,
                                  h2o_hostconf_t *hostconf,
-                                 h2o_access_log_filehandle_t *log_handle)
+                                 h2o_access_log_filehandle_t *log_handle,
+                                 request_handler_data_t *data)
 {
 	mustache_template_t *template = NULL;
-	const size_t template_path_prefix_len = config->template_path ? strlen(config->template_path) : 0;
+	const size_t template_path_prefix_len = config->template_path ?
+	                                        strlen(config->template_path) :
+	                                        0;
 	char path[template_path_prefix_len + sizeof(TEMPLATE_PATH_SUFFIX)];
 
-	memcpy(path, config->template_path, template_path_prefix_len);
+	if (template_path_prefix_len)
+		memcpy(path, config->template_path, template_path_prefix_len);
+
 	memcpy(path + template_path_prefix_len, TEMPLATE_PATH_SUFFIX, sizeof(TEMPLATE_PATH_SUFFIX));
 
 	template_input_t template_input = {.input = fopen(path, "rb"), .name = path};
@@ -465,10 +470,10 @@ void initialize_fortunes_handler(const config_t *config,
 		STANDARD_ERROR("fopen");
 
 	if (template) {
-		global_data->request_handler_data.fortunes_template = template;
+		data->fortunes_template = template;
 		add_prepared_statement(FORTUNE_TABLE_NAME,
 		                       FORTUNE_QUERY,
-		                       &global_data->prepared_statements);
+		                       &data->prepared_statements);
 		register_request_handler("/fortunes", fortunes, hostconf, log_handle);
 	}
 }

+ 4 - 3
frameworks/C/h2o/src/handlers/fortune.h

@@ -24,11 +24,12 @@
 #include <h2o.h>
 
 #include "global_data.h"
+#include "request_handler_data.h"
 
-void cleanup_fortunes_handler(global_data_t *global_data);
+void cleanup_fortunes_handler(request_handler_data_t *data);
 void initialize_fortunes_handler(const config_t *config,
-                                 global_data_t *global_data,
                                  h2o_hostconf_t *hostconf,
-                                 h2o_access_log_filehandle_t *log_handle);
+                                 h2o_access_log_filehandle_t *log_handle,
+                                 request_handler_data_t *data);
 
 #endif // FORTUNE_H_

+ 7 - 4
frameworks/C/h2o/src/handlers/request_handler_data.h

@@ -21,18 +21,21 @@
 
 #define REQUEST_HANDLER_DATA_H_
 
-#include "cache.h"
+#include <h2o/cache.h>
+
+#include "database.h"
+#include "list.h"
 
 struct mustache_token_t;
 
 typedef struct {
 	struct mustache_token_t *fortunes_template;
-	cache_t world_cache;
+	list_t *prepared_statements;
 } request_handler_data_t;
 
 typedef struct {
-	// Replace with any actual fields; structures without members cause compiler warnings.
-	int pad;
+	h2o_cache_t *world_cache;
+	db_conn_pool_t hello_world_db;
 } request_handler_thread_data_t;
 
 #endif // REQUEST_HANDLER_DATA_H_

+ 110 - 89
frameworks/C/h2o/src/handlers/world.c

@@ -33,7 +33,6 @@
 #include <yajl/yajl_gen.h>
 
 #include "bitset.h"
-#include "cache.h"
 #include "database.h"
 #include "error.h"
 #include "global_data.h"
@@ -76,8 +75,10 @@ typedef struct multiple_query_ctx_t multiple_query_ctx_t;
 typedef struct update_ctx_t update_ctx_t;
 
 typedef struct {
-	thread_context_t *ctx;
+	request_handler_thread_data_t *data;
+	h2o_loop_t *loop;
 	db_query_param_t param;
+	h2o_iovec_t table;
 } populate_cache_ctx_t;
 
 typedef struct {
@@ -125,7 +126,9 @@ static int compare_items(const void *x, const void *y);
 static void complete_multiple_query(multiple_query_ctx_t *query_ctx);
 static int do_multiple_queries(bool do_update, bool use_cache, h2o_req_t *req);
 static void do_updates(multiple_query_ctx_t *query_ctx);
-static void fetch_from_cache(uint64_t now, cache_t *cache, multiple_query_ctx_t *query_ctx);
+static void fetch_from_cache(uint64_t now,
+                             request_handler_thread_data_t *data,
+                             multiple_query_ctx_t *query_ctx);
 static void free_cache_entry(h2o_iovec_t value);
 static size_t get_query_number(h2o_req_t *req);
 static void initialize_ids(size_t num_query, query_result_t *res, unsigned int *seed);
@@ -140,7 +143,6 @@ static void on_single_query_error(db_query_param_t *param, const char *error_str
 static result_return_t on_single_query_result(db_query_param_t *param, PGresult *result);
 static void on_single_query_timeout(db_query_param_t *param);
 static result_return_t on_update_result(db_query_param_t *param, PGresult *result);
-static void populate_cache(thread_context_t *ctx, void *arg);
 static void process_result(PGresult *result, size_t idx, query_result_t *out);
 static int serialize_item(uint32_t id, uint32_t random_number, yajl_gen gen);
 static void serialize_items(const query_result_t *res,
@@ -163,7 +165,7 @@ static void cleanup_multiple_query(multiple_query_ctx_t *query_ctx)
 		free_json_generator(query_ctx->gen,
 		                    &query_ctx->ctx->json_generator,
 		                    &query_ctx->ctx->json_generator_num,
-		                    query_ctx->ctx->config->max_json_generator);
+		                    query_ctx->ctx->global_thread_data->config->max_json_generator);
 
 	free(query_ctx);
 }
@@ -243,7 +245,9 @@ static int do_multiple_queries(bool do_update, bool use_cache, h2o_req_t *req)
 	base_size = ((base_size + _Alignof(query_param_t) - 1) / _Alignof(query_param_t));
 	base_size = base_size * _Alignof(query_param_t);
 
-	const size_t num_query_in_progress = MIN(num_query, ctx->config->max_db_conn_num);
+	const config_t * const config = ctx->global_thread_data->config;
+	const size_t num_query_in_progress =
+		MIN(num_query, config->max_db_conn_num * config->max_pipeline_query_num);
 	size_t sz = base_size + num_query_in_progress * sizeof(query_param_t);
 
 	if (do_update) {
@@ -273,7 +277,7 @@ static int do_multiple_queries(bool do_update, bool use_cache, h2o_req_t *req)
 	if (use_cache) {
 		query_ctx->flags |= USE_CACHE;
 		fetch_from_cache(h2o_now(ctx->event_loop.h2o_ctx.loop),
-		                 &ctx->global_data->request_handler_data.world_cache,
+		                 &ctx->request_handler_data,
 		                 query_ctx);
 
 		if (query_ctx->num_result == query_ctx->num_query) {
@@ -307,7 +311,8 @@ static int do_multiple_queries(bool do_update, bool use_cache, h2o_req_t *req)
 	}
 
 	for (size_t i = 0; i < query_ctx->num_query_in_progress; i++)
-		if (execute_query(ctx, &query_ctx->query_param[i].param)) {
+		if (execute_database_query(&ctx->request_handler_data.hello_world_db,
+		                           &query_ctx->query_param[i].param)) {
 			query_ctx->num_query_in_progress = i;
 			query_ctx->flags |= DO_CLEANUP;
 			send_service_unavailable_error(DB_REQ_ERROR, req);
@@ -366,7 +371,8 @@ static void do_updates(multiple_query_ctx_t *query_ctx)
 	if ((size_t) c >= sz)
 		goto error;
 
-	if (execute_query(query_ctx->ctx, &query_ctx->query_param->param)) {
+	if (execute_database_query(&query_ctx->ctx->request_handler_data.hello_world_db,
+	                           &query_ctx->query_param->param)) {
 		query_ctx->flags |= DO_CLEANUP;
 		send_service_unavailable_error(DB_REQ_ERROR, query_ctx->req);
 	}
@@ -380,22 +386,49 @@ error:
 	send_error(INTERNAL_SERVER_ERROR, REQ_ERROR, query_ctx->req);
 }
 
-static void fetch_from_cache(uint64_t now, cache_t *cache, multiple_query_ctx_t *query_ctx)
+static void fetch_from_cache(uint64_t now,
+                             request_handler_thread_data_t *data,
+                             multiple_query_ctx_t *query_ctx)
 {
-	h2o_iovec_t key = {.len = sizeof(query_ctx->res->id)};
+	if (data->world_cache) {
+		const h2o_iovec_t key = {.base = WORLD_TABLE_NAME, .len = sizeof(WORLD_TABLE_NAME) - 1};
+		h2o_cache_ref_t * const r = h2o_cache_fetch(data->world_cache, now, key, 0);
 
-	for (size_t i = 0; i < query_ctx->num_query; i++) {
-		key.base = (char *) &query_ctx->res[i].id;
+		if (r) {
+			const uint32_t * const table = (const uint32_t *) r->value.base;
 
-		const h2o_cache_hashcode_t keyhash = h2o_cache_calchash(key.base, key.len);
-		h2o_cache_ref_t * const r = cache_fetch(cache, now, key, keyhash);
+			for (size_t i = 0; i < query_ctx->num_query; i++) {
+				const uint32_t id = query_ctx->res[i].id;
 
-		if (r) {
-			query_ctx->res[i].id = query_ctx->res[query_ctx->num_result].id;
-			memcpy(query_ctx->res + query_ctx->num_result++,
-			       r->value.base,
-			       sizeof(*query_ctx->res));
-			cache_release(cache, r, keyhash);
+				memset(query_ctx->res + i, 0, sizeof(*query_ctx->res));
+				query_ctx->res[i].id = id;
+				assert(id <= MAX_ID);
+				query_ctx->res[i].random_number = table[id];
+			}
+
+			query_ctx->num_result = query_ctx->num_query;
+			h2o_cache_release(data->world_cache, r);
+		}
+	}
+	else {
+		data->world_cache = h2o_cache_create(0, CACHE_CAPACITY, CACHE_DURATION, free_cache_entry);
+
+		if (data->world_cache) {
+			populate_cache_ctx_t * const ctx = h2o_mem_alloc(sizeof(*ctx));
+
+			memset(ctx, 0, sizeof(*ctx));
+			ctx->data = data;
+			ctx->loop = query_ctx->ctx->event_loop.h2o_ctx.loop;
+			ctx->table.len = (MAX_ID + 1) * sizeof(uint32_t);
+			ctx->table.base = h2o_mem_alloc(ctx->table.len);
+			memset(ctx->table.base, 0, ctx->table.len);
+			ctx->param.command = POPULATE_CACHE_QUERY;
+			ctx->param.on_error = on_populate_cache_error;
+			ctx->param.on_result = on_populate_cache_result;
+			ctx->param.on_timeout = on_populate_cache_timeout;
+
+			if (execute_database_query(&data->hello_world_db, &ctx->param))
+				on_populate_cache_error(&ctx->param, NULL);
 		}
 	}
 }
@@ -482,20 +515,6 @@ static result_return_t on_multiple_query_result(db_query_param_t *param, PGresul
 	else if (PQresultStatus(result) == PGRES_TUPLES_OK) {
 		assert(PQntuples(result) == 1);
 		process_result(result, 0, query_ctx->res + query_ctx->num_result);
-
-		if (query_ctx->flags & USE_CACHE) {
-			query_result_t * const r = h2o_mem_alloc(sizeof(*r));
-			const h2o_iovec_t key = {.base = (char *) &r->id, .len = sizeof(r->id)};
-			const h2o_iovec_t value = {.base = (char *) r, .len = sizeof(*r)};
-
-			*r = query_ctx->res[query_ctx->num_result];
-			cache_set(h2o_now(query_ctx->ctx->event_loop.h2o_ctx.loop),
-			          key,
-			          0,
-			          value,
-			          &query_ctx->ctx->global_data->request_handler_data.world_cache);
-		}
-
 		query_ctx->num_result++;
 
 		const size_t num_query_remaining = query_ctx->num_query - query_ctx->num_result;
@@ -505,7 +524,8 @@ static result_return_t on_multiple_query_result(db_query_param_t *param, PGresul
 
 			query_param->id = htonl(query_ctx->res[idx].id);
 
-			if (execute_query(query_ctx->ctx, &query_param->param)) {
+			if (execute_database_query(&query_ctx->ctx->request_handler_data.hello_world_db,
+			                           &query_param->param)) {
 				query_ctx->flags |= DO_CLEANUP;
 				send_service_unavailable_error(DB_REQ_ERROR, query_ctx->req);
 			}
@@ -545,45 +565,53 @@ static void on_multiple_query_timeout(db_query_param_t *param)
 static void on_populate_cache_error(db_query_param_t *param, const char *error_string)
 {
 	IGNORE_FUNCTION_PARAMETER(error_string);
-	free(H2O_STRUCT_FROM_MEMBER(populate_cache_ctx_t, param, param));
-}
 
-static result_return_t on_populate_cache_result(db_query_param_t *param, PGresult *result)
-{
 	populate_cache_ctx_t * const query_ctx = H2O_STRUCT_FROM_MEMBER(populate_cache_ctx_t,
 	                                                                param,
 	                                                                param);
 
+	h2o_cache_destroy(query_ctx->data->world_cache);
+	query_ctx->data->world_cache = NULL;
+	free(query_ctx->table.base);
+	free(query_ctx);
+}
+
+static result_return_t on_populate_cache_result(db_query_param_t *param, PGresult *result)
+{
 	if (PQresultStatus(result) == PGRES_TUPLES_OK) {
 		const size_t num_rows = PQntuples(result);
+		populate_cache_ctx_t * const query_ctx = H2O_STRUCT_FROM_MEMBER(populate_cache_ctx_t,
+		                                                                param,
+		                                                                param);
+		query_result_t r = {.id = 0};
+		uint32_t * const table = (uint32_t *) query_ctx->table.base;
 
 		for (size_t i = 0; i < num_rows; i++) {
-			query_result_t * const r = h2o_mem_alloc(sizeof(*r));
-
-			memset(r, 0, sizeof(*r));
-			process_result(result, i, r);
+			process_result(result, i, &r);
+			table[r.id] = r.random_number;
+		}
 
-			const h2o_iovec_t key = {.base = (char *) &r->id, .len = sizeof(r->id)};
-			const h2o_iovec_t value = {.base = (char *) r, .len = sizeof(*r)};
+		const h2o_iovec_t key = {.base = WORLD_TABLE_NAME, .len = sizeof(WORLD_TABLE_NAME) - 1};
 
-			cache_set(h2o_now(query_ctx->ctx->event_loop.h2o_ctx.loop),
-			          key,
-			          0,
-			          value,
-			          &query_ctx->ctx->global_data->request_handler_data.world_cache);
-		}
+		h2o_cache_set(query_ctx->data->world_cache,
+		              h2o_now(query_ctx->loop),
+		              key,
+		              0,
+		              query_ctx->table);
+		free(query_ctx);
 	}
-	else
+	else {
 		LIBRARY_ERROR("PQresultStatus", PQresultErrorMessage(result));
+		on_populate_cache_error(param, NULL);
+	}
 
 	PQclear(result);
-	free(query_ctx);
 	return DONE;
 }
 
 static void on_populate_cache_timeout(db_query_param_t *param)
 {
-	free(H2O_STRUCT_FROM_MEMBER(populate_cache_ctx_t, param, param));
+	on_populate_cache_error(param, NULL);
 }
 
 static void on_single_query_error(db_query_param_t *param, const char *error_string)
@@ -690,23 +718,6 @@ static result_return_t on_update_result(db_query_param_t *param, PGresult *resul
 	return DONE;
 }
 
-static void populate_cache(thread_context_t *ctx, void *arg)
-{
-	IGNORE_FUNCTION_PARAMETER(arg);
-
-	populate_cache_ctx_t * const query_ctx = h2o_mem_alloc(sizeof(*query_ctx));
-
-	memset(query_ctx, 0, sizeof(*query_ctx));
-	query_ctx->ctx = ctx;
-	query_ctx->param.command = POPULATE_CACHE_QUERY;
-	query_ctx->param.on_error = on_populate_cache_error;
-	query_ctx->param.on_result = on_populate_cache_result;
-	query_ctx->param.on_timeout = on_populate_cache_timeout;
-
-	if (execute_query(ctx, &query_ctx->param))
-		free(query_ctx);
-}
-
 static void process_result(PGresult *result, size_t idx, query_result_t *out)
 {
 	assert(PQnfields(result) == 2);
@@ -811,7 +822,7 @@ static int single_query(struct st_h2o_handler_t *self, h2o_req_t *req)
 	query_ctx->param.resultFormat = 1;
 	query_ctx->req = req;
 
-	if (execute_query(ctx, &query_ctx->param)) {
+	if (execute_database_query(&ctx->request_handler_data.hello_world_db, &query_ctx->param)) {
 		query_ctx->cleanup = true;
 		send_service_unavailable_error(DB_REQ_ERROR, req);
 	}
@@ -825,27 +836,37 @@ static int updates(struct st_h2o_handler_t *self, h2o_req_t *req)
 	return do_multiple_queries(true, false, req);
 }
 
-void cleanup_world_handlers(global_data_t *global_data)
+void cleanup_world_handler_thread_data(request_handler_thread_data_t *data)
+{
+	free_database_connection_pool(&data->hello_world_db);
+
+	if (data->world_cache)
+		h2o_cache_destroy(data->world_cache);
+}
+
+void cleanup_world_handlers(request_handler_data_t *data)
 {
-	cache_destroy(&global_data->request_handler_data.world_cache);
+	remove_prepared_statements(data->prepared_statements);
 }
 
-void initialize_world_handlers(const config_t *config,
-                               global_data_t *global_data,
-                               h2o_hostconf_t *hostconf,
-                               h2o_access_log_filehandle_t *log_handle)
+void initialize_world_handler_thread_data(thread_context_t *ctx,
+                                          const request_handler_data_t *data,
+                                          request_handler_thread_data_t *thread_data)
 {
-	add_prepared_statement(WORLD_TABLE_NAME, WORLD_QUERY, &global_data->prepared_statements);
+	initialize_database_connection_pool(ctx->global_thread_data->config->db_host,
+	                                    ctx->global_thread_data->config,
+	                                    data->prepared_statements,
+	                                    ctx->event_loop.h2o_ctx.loop,
+	                                    &thread_data->hello_world_db);
+}
+
+void initialize_world_handlers(h2o_hostconf_t *hostconf,
+                               h2o_access_log_filehandle_t *log_handle,
+                               request_handler_data_t *data)
+{
+	add_prepared_statement(WORLD_TABLE_NAME, WORLD_QUERY, &data->prepared_statements);
+	register_request_handler("/cached-worlds", cached_queries, hostconf, log_handle);
 	register_request_handler("/db", single_query, hostconf, log_handle);
 	register_request_handler("/queries", multiple_queries, hostconf, log_handle);
 	register_request_handler("/updates", updates, hostconf, log_handle);
-
-	if (!cache_create(config->thread_num,
-	                  CACHE_CAPACITY,
-	                  CACHE_DURATION,
-	                  free_cache_entry,
-	                  &global_data->request_handler_data.world_cache)) {
-		add_postinitialization_task(populate_cache, NULL, &global_data->postinitialization_tasks);
-		register_request_handler("/cached-worlds", cached_queries, hostconf, log_handle);
-	}
 }

+ 12 - 6
frameworks/C/h2o/src/handlers/world.h

@@ -24,11 +24,17 @@
 #include <h2o.h>
 
 #include "global_data.h"
-
-void cleanup_world_handlers(global_data_t *global_data);
-void initialize_world_handlers(const config_t *config,
-                               global_data_t *global_data,
-                               h2o_hostconf_t *hostconf,
-                               h2o_access_log_filehandle_t *log_handle);
+#include "list.h"
+#include "request_handler_data.h"
+#include "thread.h"
+
+void cleanup_world_handler_thread_data(request_handler_thread_data_t *data);
+void cleanup_world_handlers(request_handler_data_t *data);
+void initialize_world_handler_thread_data(thread_context_t *ctx,
+                                          const request_handler_data_t *data,
+                                          request_handler_thread_data_t *thread_data);
+void initialize_world_handlers(h2o_hostconf_t *hostconf,
+                               h2o_access_log_filehandle_t *log_handle,
+                               request_handler_data_t *data);
 
 #endif // WORLD_H_

+ 39 - 13
frameworks/C/h2o/src/main.c

@@ -41,12 +41,23 @@
 #include "utility.h"
 
 #define USAGE_MESSAGE \
-	"Usage:\n%s [-a <max connections accepted simultaneously>] [-b <bind address>] " \
-	"[-c <certificate file>] [-d <database connection string>] [-f template file path] " \
-	"[-j <max reused JSON generators>] [-k <private key file>] [-l <log path>] " \
-	"[-m <max database connections per thread>] [-p <port>] " \
-	"[-q <max enqueued database queries per thread>] [-r <root directory>] " \
-	"[-s <HTTPS port>] [-t <thread number>]\n"
+	"Usage:\n%s " \
+	"[-a <max connections accepted simultaneously>] " \
+	"[-b <bind address>] " \
+	"[-c <certificate file>] " \
+	"[-d <database connection string>] " \
+	"[-e <max pipelined database queries>] " \
+	"[-f <template file path>] " \
+	"[-j <max reused JSON generators>] " \
+	"[-k <private key file>] " \
+	"[-l <log path>] " \
+	"[-m <max database connections per thread>] " \
+	"[-o <database query timeout in seconds>] " \
+	"[-p <port>] " \
+	"[-q <max enqueued database queries per thread>] " \
+	"[-r <root directory>] " \
+	"[-s <HTTPS port>] " \
+	"[-t <thread number>]\n"
 
 typedef struct {
 	list_t l;
@@ -73,8 +84,7 @@ static void free_global_data(global_data_t *global_data)
 	if (global_data->file_logger)
 		global_data->file_logger->dispose(global_data->file_logger);
 
-	cleanup_request_handlers(global_data);
-	remove_prepared_statements(global_data->prepared_statements);
+	cleanup_request_handlers(&global_data->request_handler_data);
 	h2o_config_dispose(&global_data->h2o_config);
 
 	if (global_data->ssl_ctx)
@@ -111,7 +121,11 @@ static int initialize_global_data(const config_t *config, global_data_t *global_
 			goto error;
 	}
 
-	initialize_request_handlers(config, global_data, hostconf, log_handle);
+	initialize_request_handlers(config,
+	                            hostconf,
+	                            log_handle,
+	                            &global_data->postinitialization_tasks,
+	                            &global_data->request_handler_data);
 
 	// Must be registered after the rest of the request handlers.
 	if (config->root) {
@@ -145,7 +159,7 @@ static int parse_options(int argc, char *argv[], config_t *config)
 	opterr = 0;
 
 	while (1) {
-		const int opt = getopt(argc, argv, "?a:b:c:d:f:j:k:l:m:p:q:r:s:t:");
+		const int opt = getopt(argc, argv, "?a:b:c:d:e:f:j:k:l:m:o:p:q:r:s:t:");
 
 		if (opt == -1)
 			break;
@@ -178,6 +192,9 @@ static int parse_options(int argc, char *argv[], config_t *config)
 			case 'd':
 				config->db_host = optarg;
 				break;
+			case 'e':
+				PARSE_NUMBER(config->max_pipeline_query_num);
+				break;
 			case 'f':
 				config->template_path = optarg;
 				break;
@@ -193,6 +210,9 @@ static int parse_options(int argc, char *argv[], config_t *config)
 			case 'm':
 				PARSE_NUMBER(config->max_db_conn_num);
 				break;
+			case 'o':
+				PARSE_NUMBER(config->db_timeout);
+				break;
 			case 'p':
 				PARSE_NUMBER(config->port);
 				break;
@@ -234,12 +254,21 @@ static void run_postinitialization_tasks(list_t **tasks, thread_context_t *ctx)
 
 static void set_default_options(config_t *config)
 {
+	if (!config->db_timeout)
+		config->db_timeout = 10;
+
+	if (!config->https_port)
+		config->https_port = 4443;
+
 	if (!config->max_accept)
 		config->max_accept = 10;
 
 	if (!config->max_db_conn_num)
 		config->max_db_conn_num = 10;
 
+	if (!config->max_pipeline_query_num)
+		config->max_pipeline_query_num = 16;
+
 	if (!config->max_query_num)
 		config->max_query_num = 10000;
 
@@ -248,9 +277,6 @@ static void set_default_options(config_t *config)
 
 	if (!config->thread_num)
 		config->thread_num = h2o_numproc();
-
-	if (!config->https_port)
-		config->https_port = 4443;
 }
 
 static void setup_process(void)

+ 18 - 14
frameworks/C/h2o/src/request_handler.c

@@ -29,6 +29,7 @@
 #include "handlers/fortune.h"
 #include "handlers/json_serializer.h"
 #include "handlers/plaintext.h"
+#include "handlers/request_handler_data.h"
 #include "handlers/world.h"
 
 static const char *status_code_to_string(http_status_code_t status_code)
@@ -58,15 +59,15 @@ static const char *status_code_to_string(http_status_code_t status_code)
 	return ret;
 }
 
-void cleanup_request_handlers(global_data_t *global_data)
+void cleanup_request_handler_thread_data(request_handler_thread_data_t *data)
 {
-	cleanup_fortunes_handler(global_data);
-	cleanup_world_handlers(global_data);
+	cleanup_world_handler_thread_data(data);
 }
 
-void free_request_handler_thread_data(request_handler_thread_data_t *request_handler_thread_data)
+void cleanup_request_handlers(request_handler_data_t *data)
 {
-	IGNORE_FUNCTION_PARAMETER(request_handler_thread_data);
+	cleanup_fortunes_handler(data);
+	cleanup_world_handlers(data);
 }
 
 const char *get_query_param(const char *query,
@@ -94,22 +95,25 @@ const char *get_query_param(const char *query,
 	return ret;
 }
 
-void initialize_request_handler_thread_data(
-		const config_t *config, request_handler_thread_data_t *request_handler_thread_data)
+void initialize_request_handler_thread_data(thread_context_t *ctx)
 {
-	IGNORE_FUNCTION_PARAMETER(config);
-	IGNORE_FUNCTION_PARAMETER(request_handler_thread_data);
+	const request_handler_data_t * const data =
+		&ctx->global_thread_data->global_data->request_handler_data;
+
+	initialize_world_handler_thread_data(ctx, data, &ctx->request_handler_data);
 }
 
 void initialize_request_handlers(const config_t *config,
-                                 global_data_t *global_data,
                                  h2o_hostconf_t *hostconf,
-                                 h2o_access_log_filehandle_t *log_handle)
+                                 h2o_access_log_filehandle_t *log_handle,
+                                 list_t **postinitialization_tasks,
+                                 request_handler_data_t *data)
 {
-	initialize_fortunes_handler(config, global_data, hostconf, log_handle);
+	IGNORE_FUNCTION_PARAMETER(postinitialization_tasks);
+	initialize_fortunes_handler(config, hostconf, log_handle, data);
 	initialize_json_serializer_handler(hostconf, log_handle);
 	initialize_plaintext_handler(hostconf, log_handle);
-	initialize_world_handlers(config, global_data, hostconf, log_handle);
+	initialize_world_handlers(hostconf, log_handle, data);
 }
 
 void register_request_handler(const char *path,
@@ -151,7 +155,7 @@ int send_json_response(json_generator_t *gen, bool free_gen, h2o_req_t *req)
 			free_json_generator(gen,
 			                    &ctx->json_generator,
 			                    &ctx->json_generator_num,
-			                    ctx->config->max_json_generator);
+			                    ctx->global_thread_data->config->max_json_generator);
 		}
 		else {
 			h2o_generator_t generator;

+ 9 - 6
frameworks/C/h2o/src/request_handler.h

@@ -25,7 +25,10 @@
 #include <stdbool.h>
 
 #include "global_data.h"
+#include "list.h"
+#include "thread.h"
 #include "utility.h"
+#include "handlers/request_handler_data.h"
 
 #define REQ_ERROR "request error\n"
 
@@ -43,18 +46,18 @@ typedef enum {
 	GATEWAY_TIMEOUT = 504
 } http_status_code_t;
 
-void cleanup_request_handlers(global_data_t *global_data);
-void free_request_handler_thread_data(request_handler_thread_data_t *request_handler_thread_data);
+void cleanup_request_handler_thread_data(request_handler_thread_data_t *data);
+void cleanup_request_handlers(request_handler_data_t *data);
 const char *get_query_param(const char *query,
                             size_t query_len,
                             const char *param,
                             size_t param_len);
-void initialize_request_handler_thread_data(
-		const config_t *config, request_handler_thread_data_t *request_handler_thread_data);
+void initialize_request_handler_thread_data(thread_context_t *ctx);
 void initialize_request_handlers(const config_t *config,
-                                 global_data_t *global_data,
                                  h2o_hostconf_t *hostconf,
-                                 h2o_access_log_filehandle_t *log_handle);
+                                 h2o_access_log_filehandle_t *log_handle,
+                                 list_t **postinitialization_tasks,
+                                 request_handler_data_t *data);
 void register_request_handler(const char *path,
                               int (*handler)(struct st_h2o_handler_t *, h2o_req_t *),
                               h2o_hostconf_t *hostconf,

+ 3 - 8
frameworks/C/h2o/src/thread.c

@@ -31,7 +31,6 @@
 #include <h2o/serverutil.h>
 #include <sys/syscall.h>
 
-#include "database.h"
 #include "error.h"
 #include "event_loop.h"
 #include "global_data.h"
@@ -46,7 +45,7 @@ static void *run_thread(void *arg)
 	thread_context_t ctx;
 
 	initialize_thread_context(arg, false, &ctx);
-	set_thread_memory_allocation_policy(ctx.config->thread_num);
+	set_thread_memory_allocation_policy(ctx.global_thread_data->config->thread_num);
 	event_loop(&ctx);
 	free_thread_context(&ctx);
 	pthread_exit(NULL);
@@ -87,9 +86,8 @@ static void set_thread_memory_allocation_policy(size_t thread_num)
 
 void free_thread_context(thread_context_t *ctx)
 {
-	free_database_state(ctx->event_loop.h2o_ctx.loop, &ctx->db_state);
+	cleanup_request_handler_thread_data(&ctx->request_handler_data);
 	free_event_loop(&ctx->event_loop, &ctx->global_thread_data->h2o_receiver);
-	free_request_handler_thread_data(&ctx->request_handler_data);
 
 	if (ctx->json_generator)
 		do {
@@ -129,16 +127,13 @@ void initialize_thread_context(global_thread_data_t *global_thread_data,
                                thread_context_t *ctx)
 {
 	memset(ctx, 0, sizeof(*ctx));
-	ctx->config = global_thread_data->config;
-	ctx->global_data = global_thread_data->global_data;
 	ctx->global_thread_data = global_thread_data;
 	ctx->random_seed = syscall(SYS_gettid);
 	initialize_event_loop(is_main_thread,
 	                      global_thread_data->global_data,
 	                      &global_thread_data->h2o_receiver,
 	                      &ctx->event_loop);
-	initialize_database_state(ctx->event_loop.h2o_ctx.loop, &ctx->db_state);
-	initialize_request_handler_thread_data(ctx->config, &ctx->request_handler_data);
+	initialize_request_handler_thread_data(ctx);
 	global_thread_data->ctx = ctx;
 }
 

+ 4 - 10
frameworks/C/h2o/src/thread.h

@@ -25,36 +25,30 @@
 #include <pthread.h>
 #include <stdbool.h>
 
-#include "database.h"
 #include "event_loop.h"
 #include "global_data.h"
 #include "list.h"
 #include "handlers/request_handler_data.h"
 
-typedef struct thread_context_t thread_context_t;
+struct thread_context_t;
 
 typedef struct global_thread_data_t {
 	const config_t *config;
-	thread_context_t *ctx;
+	struct thread_context_t *ctx;
 	global_data_t *global_data;
 	h2o_multithread_receiver_t h2o_receiver;
 	pthread_t thread;
 } global_thread_data_t;
 
-struct thread_context_t {
-	const config_t *config;
-	global_data_t *global_data;
-	// global_thread_data contains config and global_data as well,
-	// but keep copies here to avoid some pointer chasing.
+typedef struct thread_context_t {
 	global_thread_data_t *global_thread_data;
 	list_t *json_generator;
 	size_t json_generator_num;
 	unsigned random_seed;
 	bool shutdown;
-	db_state_t db_state;
 	event_loop_t event_loop;
 	request_handler_thread_data_t request_handler_data;
-};
+} thread_context_t;
 
 void free_thread_context(thread_context_t *ctx);
 global_thread_data_t *initialize_global_thread_data(const config_t *config,

+ 1 - 1
frameworks/C/h2o/src/utility.c

@@ -32,7 +32,7 @@
 #include "list.h"
 #include "utility.h"
 
-#define DEFAULT_CACHE_LINE_SIZE 128
+#define DEFAULT_CACHE_LINE_SIZE 256
 
 static list_t *get_sorted_sublist(list_t *head, int (*compare)(const list_t *, const list_t *));
 static list_t *merge_lists(list_t *head1,

+ 10 - 10
frameworks/CSharp/fastendpoints/Benchmarks/Benchmarks.csproj

@@ -1,14 +1,14 @@
 <Project Sdk="Microsoft.NET.Sdk.Web">
 
-    <PropertyGroup>
-        <TargetFramework>net6.0</TargetFramework>
-        <Nullable>enable</Nullable>
-        <ImplicitUsings>enable</ImplicitUsings>
-        <NoWarn>CA2016;IDE1006</NoWarn>
-    </PropertyGroup>
+  <PropertyGroup>
+    <TargetFramework>net7.0</TargetFramework>
+    <Nullable>enable</Nullable>
+    <ImplicitUsings>enable</ImplicitUsings>
+    <NoWarn>CA2016;IDE1006</NoWarn>
+  </PropertyGroup>
 
-    <ItemGroup>
-        <PackageReference Include="FastEndpoints" Version="3.*" />
-    </ItemGroup>
+  <ItemGroup>
+    <PackageReference Include="FastEndpoints" Version="5.*" />
+  </ItemGroup>
 
-</Project>
+</Project>

+ 1 - 1
frameworks/CSharp/fastendpoints/Benchmarks/Endpoints/JsonEndpoint.cs

@@ -1,6 +1,6 @@
 namespace Benchmarks.Endpoints;
 
-public class JsonEndpoint : Endpoint<EmptyRequest, object>
+public sealed class JsonEndpoint : Endpoint<EmptyRequest, object>
 {
     public override void Configure()
     {

+ 1 - 1
frameworks/CSharp/fastendpoints/Benchmarks/Endpoints/PlainTextEndpoint.cs

@@ -1,6 +1,6 @@
 namespace Benchmarks.Endpoints;
 
-public class PlainTextEndpoint : Endpoint<EmptyRequest, EmptyResponse>
+public sealed class PlainTextEndpoint : Endpoint<EmptyRequest, EmptyResponse>
 {
     private static readonly byte[] payload = System.Text.Encoding.UTF8.GetBytes("Hello, World!");
 

+ 2 - 2
frameworks/CSharp/fastendpoints/fastendpoints.dockerfile

@@ -1,9 +1,9 @@
-FROM mcr.microsoft.com/dotnet/sdk:6.0.100 AS build
+FROM mcr.microsoft.com/dotnet/sdk:7.0 AS build
 WORKDIR /app
 COPY Benchmarks .
 RUN dotnet publish -c Release -o out
 
-FROM mcr.microsoft.com/dotnet/aspnet:6.0.0 AS runtime
+FROM mcr.microsoft.com/dotnet/aspnet:7.0 AS runtime
 WORKDIR /app
 COPY --from=build /app/out ./
 

+ 3 - 1
frameworks/Crystal/grip/grip.cr

@@ -120,7 +120,9 @@ class Fortunes < Grip::Controllers::Http
 end
 
 class Application < Grip::Application
-  def routes
+  def initialize
+    super(environment: "production", serve_static: false)
+
     get "/json", Json
     get "/plaintext", Plaintext
     get "/db", Db

+ 2 - 1
frameworks/Crystal/grip/shard.yml

@@ -1,9 +1,10 @@
 name: grip
-version: 0.1.1
+version: 0.2.0
 
 dependencies:
   grip:
     github: grip-framework/grip
+    version: 2.0.0
 
   pg:
     github: will/crystal-pg

+ 18 - 0
frameworks/D/archttp/README.md

@@ -0,0 +1,18 @@
+# Archttp Benchmarking
+A highly performant web framework written in D.
+
+## Requirements
+* LDC > 1.27
+
+## Infrastructure Software Versions
+* [Archttp ~main](https://github.com/kerisy/archttp)
+
+## Test URLs
+
+### PlanText Test
+
+    http://localhost:1111/plaintext
+
+### JSON Encoding Test
+
+    http://localhost:1111/json

+ 12 - 0
frameworks/D/archttp/archttp.dockerfile

@@ -0,0 +1,12 @@
+FROM dlang2/ldc-ubuntu:latest
+
+ADD ./ /archttp
+WORKDIR /archttp
+
+RUN apt-get update -yqq && apt-get install -yqq zlib1g-dev
+
+RUN dub build -b release --compiler=ldc2 --verbose
+
+EXPOSE 1111
+
+CMD ["./archttp-server"]

+ 24 - 0
frameworks/D/archttp/benchmark_config.json

@@ -0,0 +1,24 @@
+{
+  "framework": "archttp",
+  "tests": [{
+    "default": {
+      "json_url": "/json",
+      "plaintext_url": "/plaintext",
+      "port": 1111,
+      "approach": "Realistic",
+      "classification": "Platform",
+      "database": "None",
+      "framework": "Archttp",
+      "language": "D",
+      "flavor": "LDC",
+      "orm": "Raw",
+      "platform": "Archttp",
+      "webserver": "None",
+      "os": "Linux",
+      "database_os": "Linux",
+      "display_name": "Archttp",
+      "notes": "",
+      "versus": "Archttp"
+  }
+  }]
+}

+ 5 - 5
frameworks/Erlang/chicagoboss/config.toml → frameworks/D/archttp/config.toml

@@ -1,15 +1,15 @@
 [framework]
-name = "chicagoboss"
+name = "archttp"
 
 [main]
 urls.plaintext = "/plaintext"
 urls.json = "/json"
 approach = "Realistic"
-classification = "Fullstack"
+classification = "Platform"
 database = "None"
 database_os = "Linux"
 os = "Linux"
-orm = "raw"
-platform = "Cowboy"
+orm = "Raw"
+platform = "Archttp"
 webserver = "None"
-versus = ""
+versus = "Archttp"

+ 5 - 0
frameworks/D/archttp/dub.sdl

@@ -0,0 +1,5 @@
+name "archttp-server"
+description "Archttp server application."
+authors "[email protected]"
+copyright "Copyright © 2021-2022, kerisy.com"
+dependency "archttp" version="~>1.1.1"

+ 19 - 0
frameworks/D/archttp/source/main.d

@@ -0,0 +1,19 @@
+
+import archttp;
+
+void main()
+{
+    auto app = new Archttp;
+
+    app.get("/plaintext", (req, res) {
+        res.send("Hello, World!");
+    });
+
+    app.get("/json", (req, res) {
+        import std.json;
+
+        res.send( JSONValue( ["message" : "Hello, World!"] ) );
+    });
+
+    app.listen(1111);
+}

+ 2 - 2
frameworks/D/vibed/dub.json

@@ -7,9 +7,9 @@
     "Sönke Ludwig"
   ],
   "dependencies": {
-    "vibe-d": "0.9.4",
+    "vibe-d": "0.9.5",
     "mir-random": "2.2.15",
-    "vibe-d:tls": "0.9.4"
+    "vibe-d:tls": "0.9.5"
   },
   "targetType": "executable",
   "sourcePaths": [],

+ 9 - 8
frameworks/D/vibed/dub.selections.json

@@ -3,23 +3,24 @@
 	"versions": {
 		"botan": "1.12.19",
 		"botan-math": "1.0.3",
-		"derelict-pq": "4.0.0-alpha.2",
+		"derelict-pq": "4.0.0",
 		"derelict-util": "3.0.0-beta.2",
-		"diet-ng": "1.8.0",
+		"diet-ng": "1.8.1",
 		"dpq2": "1.0.17",
-		"eventcore": "0.9.18",
+		"eventcore": "0.9.20",
 		"libasync": "0.8.6",
 		"memutils": "1.0.4",
-		"mir-algorithm": "3.10.91",
-		"mir-core": "1.1.83",
+		"mir-algorithm": "3.14.19",
+		"mir-core": "1.1.111",
 		"mir-linux-kernel": "1.0.1",
 		"mir-random": "2.2.15",
 		"money": "2.3.1",
-		"openssl": "1.1.6+1.0.1g",
+		"openssl": "3.2.2",
+		"silly": "1.1.1",
 		"stdx-allocator": "2.77.5",
 		"taggedalgebraic": "0.11.22",
-		"vibe-core": "1.21.0",
-		"vibe-d": "0.9.4",
+		"vibe-core": "1.22.4",
+		"vibe-d": "0.9.5",
 		"vibe-d-postgresql": "3.1.0-rc.1"
 	}
 }

+ 10 - 3
frameworks/D/vibed/source/mongodb.d

@@ -10,6 +10,7 @@ import mir.random.engine.xorshift : Xorshift;
 
 import std.conv : ConvException, to;
 import std.array;
+import std.exception : enforce;
 
 enum worldSize = 10000;
 
@@ -70,7 +71,9 @@ class WebInterface {
 	{
 		struct Q { int _id; }
 		auto query = Q(_uniformVariable(_gen));
-		auto w = WorldResponse(_worldCollection.findOne!World(query));
+		auto world = _worldCollection.findOne!World(query);
+		enforce(!world.isNull(), "expected world, found none.");
+		auto w = WorldResponse(world.get);
 		res.writeJsonBody(w, HTTPStatus.ok, "application/json");
 	}
 
@@ -91,7 +94,9 @@ class WebInterface {
 		foreach (ref w; data) {
 			static struct Q { int _id; }
 			auto query = Q(_uniformVariable(_gen));
-			w = WorldResponse(_worldCollection.findOne!World(query));
+			auto world = _worldCollection.findOne!World(query);
+			enforce(!world.isNull(), "expected world, found none.");
+			w = WorldResponse(world.get);
 		}
 
 		// write response as JSON
@@ -123,7 +128,9 @@ class WebInterface {
 		foreach (ref w; data) {
 			static struct Q { int _id; }
 			auto query = Q(_uniformVariable(_gen));
-			w = WorldResponse(_worldCollection.findOne!World(query));
+			auto world = _worldCollection.findOne!World(query);
+			enforce(!world.isNull(), "expected world, found none.");
+			w = WorldResponse(world.get);
 
 			// update random number
 			w.randomNumber = _uniformVariable(_gen);

+ 22 - 0
frameworks/Dart/angel3/angel3-mysql.dockerfile

@@ -0,0 +1,22 @@
+FROM dart:2.18.1
+
+COPY ./orm-mysql/config /app/config
+COPY ./orm-mysql/lib /app/lib
+COPY ./orm-mysql/run /app/run
+COPY ./orm-mysql/views /app/views
+COPY ./orm-mysql/web /app/web
+COPY ./orm-mysql/*.yaml /app/
+
+WORKDIR /app
+RUN dart pub upgrade
+
+#RUN chmod -R 777 /app/run
+
+# Optionally build generaed sources.
+# RUN pub run build_runner build
+
+# Set environment, start server
+ENV ANGEL_ENV=production
+EXPOSE 8080
+CMD dart ./run/prod.dart -p 8080 -a 0.0.0.0 -j 100
+#CMD dart ./run/dev.dart

+ 2 - 2
frameworks/Dart/angel3/angel3.dockerfile

@@ -1,4 +1,4 @@
-FROM dart:latest
+FROM dart:2.18.1
 
 COPY ./orm/config /app/config
 COPY ./orm/lib /app/lib
@@ -18,4 +18,4 @@ RUN dart pub upgrade
 # Set environment, start server
 ENV ANGEL_ENV=production
 EXPOSE 8080
-CMD dart ./run/prod.dart -p 8080 -a 0.0.0.0 -j 50
+CMD dart ./run/prod.dart -p 8080 -a 0.0.0.0 -j 100

+ 49 - 25
frameworks/Dart/angel3/benchmark_config.json

@@ -1,28 +1,52 @@
 {
   "framework": "angel3",
-  "tests": [{
-     "default": {
-       "json_url": "/json",
-       "plaintext_url": "/plaintext",
-       "db_url": "/db",
-       "query_url": "/query?queries=",
-       "fortune_url": "/fortunes",
-       "update_url": "/updates?queries=",
-       "port": 8080,
-       "approach": "Realistic",
-       "classification": "Fullstack",
-       "database": "Postgres",
-       "framework": "angel3",
-       "language": "Dart",
-       "flavor": "None",
-       "orm": "Micro",
-       "platform": "angel3",
-       "webserver": "None",
-       "os": "Linux",
-       "database_os": "Linux",
-       "display_name": "Angel3",
-       "notes": "",
-       "versus": "None"
-     }
-  }]
+  "tests": [
+    {
+      "default": {
+        "json_url": "/json",
+        "plaintext_url": "/plaintext",
+        "db_url": "/db",
+        "query_url": "/query?queries=",
+        "fortune_url": "/fortunes",
+        "update_url": "/updates?queries=",
+        "port": 8080,
+        "approach": "Realistic",
+        "classification": "Fullstack",
+        "database": "Postgres",
+        "framework": "angel3",
+        "language": "Dart",
+        "flavor": "None",
+        "orm": "Micro",
+        "platform": "angel3",
+        "webserver": "None",
+        "os": "Linux",
+        "database_os": "Linux",
+        "display_name": "Angel3",
+        "notes": "",
+        "versus": "None"
+      },
+      "mysql": {
+        "json_url": "/json",
+        "plaintext_url": "/plaintext",
+        "db_url": "/db",
+        "query_url": "/query?queries=",
+        "fortune_url": "/fortunes",
+        "port": 8080,
+        "approach": "Realistic",
+        "classification": "Fullstack",
+        "database": "MySQL",
+        "framework": "angel3",
+        "language": "Dart",
+        "flavor": "None",
+        "orm": "Micro",
+        "platform": "angel3",
+        "webserver": "None",
+        "os": "Linux",
+        "database_os": "Linux",
+        "display_name": "Angel3",
+        "notes": "",
+        "versus": "None"
+      }
+    }
+  ]
 }

+ 1 - 0
frameworks/Dart/angel3/orm-mysql/analysis_options.yaml

@@ -0,0 +1 @@
+include: package:lints/recommended.yaml

+ 12 - 0
frameworks/Dart/angel3/orm-mysql/config/default.yaml

@@ -0,0 +1,12 @@
+# Default server configuration.
+host: 127.0.0.1
+port: 8080
+mysql:
+  #host: localhost
+  host: tfb-database
+  port: 3306
+  database_name: hello_world
+  username: benchmarkdbuser
+  password: benchmarkdbpass
+  use_ssl: true
+jwt_secret: INSECURE_DEFAULT_SECRET

+ 2 - 0
frameworks/Dart/angel3/orm-mysql/config/development.yaml

@@ -0,0 +1,2 @@
+# Development-only server configuration.
+debug: true

+ 3 - 0
frameworks/Dart/angel3/orm-mysql/config/production.yaml

@@ -0,0 +1,3 @@
+# Production-only server configuration
+debug: false
+jwt_secret: INSECURE_DEFAULT_SECRET

+ 2 - 0
frameworks/Dart/angel3/orm-mysql/lib/models.dart

@@ -0,0 +1,2 @@
+export 'src/models/fortune.dart';
+export 'src/models/world.dart';

+ 19 - 0
frameworks/Dart/angel3/orm-mysql/lib/orm_mysql_app.dart

@@ -0,0 +1,19 @@
+/// Your very own web application!
+import 'dart:async';
+import 'package:angel3_framework/angel3_framework.dart';
+import 'package:file/local.dart';
+import 'src/config/config.dart' as configuration;
+import 'src/routes/routes.dart' as routes;
+import 'src/services/services.dart' as services;
+
+/// Configures the server instance.
+Future configureServer(Angel app) async {
+  // Grab a handle to the file system, so that we can do things like
+  // serve static files.
+  var fs = const LocalFileSystem();
+
+  // Set up our application, using the plug-ins defined with this project.
+  await app.configure(configuration.configureServer(fs));
+  await app.configure(services.configureServer);
+  await app.configure(routes.configureServer(fs));
+}

+ 35 - 0
frameworks/Dart/angel3/orm-mysql/lib/src/config/config.dart

@@ -0,0 +1,35 @@
+/// Configuration for this Angel instance.
+import 'package:angel3_configuration/angel3_configuration.dart';
+import 'package:angel3_framework/angel3_framework.dart';
+import 'package:angel3_jael/angel3_jael.dart';
+import 'package:jael3/jael3.dart';
+import 'package:file/file.dart';
+import 'plugins/plugins.dart' as plugins;
+
+/// This is a perfect place to include configuration and load plug-ins.
+AngelConfigurer configureServer(FileSystem fileSystem) {
+  return (Angel app) async {
+    // Load configuration from the `config/` directory.
+    //
+    // See: https://github.com/angel-dart/configuration
+    await app.configure(configuration(fileSystem));
+
+    // Configure our application to render Jael templates from the `views/` directory.
+    //
+    // See: https://github.com/angel-dart/jael
+    var viewsDirectory = fileSystem.directory('views');
+    var viewCache = <String, Document>{};
+    await jaelTemplatePreload(viewsDirectory, viewCache);
+    await app.configure(jael(viewsDirectory, cache: viewCache));
+
+    // Apply another plug-ins, i.e. ones that *you* have written.
+    //
+    // Typically, the plugins in `lib/src/config/plugins/plugins.dart` are plug-ins
+    // that add functionality specific to your application.
+    //
+    // If you write a plug-in that you plan to use again, or are
+    // using one created by the community, include it in
+    // `lib/src/config/config.dart`.
+    await plugins.configureServer(app);
+  };
+}

+ 75 - 0
frameworks/Dart/angel3/orm-mysql/lib/src/config/plugins/orm.dart

@@ -0,0 +1,75 @@
+import 'dart:async';
+import 'dart:io';
+import 'package:angel3_framework/angel3_framework.dart';
+import 'package:angel3_orm/angel3_orm.dart';
+import 'package:angel3_orm_mysql/angel3_orm_mysql.dart';
+import 'package:mysql1/mysql1.dart';
+import 'package:mysql_client/mysql_client.dart';
+
+// For MariaDb
+Future<void> configureServer2(Angel app) async {
+  try {
+    var connection = await connectToMariaDb(app.configuration);
+    var executor = MariaDbExecutor(connection, logger: app.logger);
+
+    app
+      ..container.registerSingleton<QueryExecutor>(executor)
+      ..shutdownHooks.add((_) => connection.close());
+  } catch (e) {
+    app.logger.severe("Failed to connect to MariaDB. ORM disabled.", e);
+  }
+}
+
+// MariaDB connection
+Future<MySqlConnection> connectToMariaDb(Map configuration) async {
+  var mariaDbConfig = configuration['mysql'] as Map? ?? {};
+  var settings = ConnectionSettings(
+      host: mariaDbConfig['host'] as String? ?? 'localhost',
+      port: mariaDbConfig['port'] as int? ?? 3306,
+      db: mariaDbConfig['database_name'] as String? ??
+          Platform.environment['USER'] ??
+          Platform.environment['USERNAME'] ??
+          '',
+      user: mariaDbConfig['username'] as String?,
+      password: mariaDbConfig['password'] as String?,
+      timeout: Duration(
+          seconds: mariaDbConfig['timeout_in_seconds'] as int? ?? 30000),
+      useSSL: mariaDbConfig['use_ssl'] as bool? ?? false);
+
+  var connection = await MySqlConnection.connect(settings);
+  return connection;
+}
+
+// For Mysql
+
+Future<void> configureServer(Angel app) async {
+  try {
+    var connection = await connectToMysql(app.configuration);
+    var executor = MySqlExecutor(connection, logger: app.logger);
+
+    app
+      ..container.registerSingleton<QueryExecutor>(executor)
+      ..shutdownHooks.add((_) => connection.close());
+  } catch (e) {
+    app.logger.severe("Failed to connect to MySQL. ORM disabled.", e);
+  }
+}
+
+// Mysql Connection
+Future<MySQLConnection> connectToMysql(Map configuration) async {
+  var mysqlConfig = configuration['mysql'] as Map? ?? {};
+
+  var connection = await MySQLConnection.createConnection(
+      host: mysqlConfig['host'] as String? ?? 'localhost',
+      port: mysqlConfig['port'] as int? ?? 3306,
+      databaseName: mysqlConfig['database_name'] as String? ??
+          Platform.environment['USER'] ??
+          Platform.environment['USERNAME'] ??
+          '',
+      userName: mysqlConfig['username'] as String? ?? '',
+      password: mysqlConfig['password'] as String? ?? '',
+      secure: mysqlConfig['use_ssl'] as bool? ?? false);
+
+  await connection.connect(timeoutMs: 30000);
+  return connection;
+}

+ 10 - 0
frameworks/Dart/angel3/orm-mysql/lib/src/config/plugins/plugins.dart

@@ -0,0 +1,10 @@
+/// Custom plugins go here.
+import 'dart:async';
+import 'package:angel3_framework/angel3_framework.dart';
+import 'orm.dart' as orm;
+
+Future configureServer(Angel app) async {
+  // Include any plugins you have made here.
+
+  await app.configure(orm.configureServer);
+}

+ 16 - 0
frameworks/Dart/angel3/orm-mysql/lib/src/models/fortune.dart

@@ -0,0 +1,16 @@
+import 'package:angel3_migration/angel3_migration.dart';
+//import 'package:angel3_model/angel3_model.dart';
+import 'package:angel3_serialize/angel3_serialize.dart';
+import 'package:angel3_orm/angel3_orm.dart';
+import 'package:optional/optional.dart';
+
+part 'fortune.g.dart';
+
+@serializable
+@Orm(tableName: 'fortune')
+abstract class _Fortune {
+  int? id;
+
+  @Column(length: 2048)
+  String? message;
+}

+ 212 - 0
frameworks/Dart/angel3/orm-mysql/lib/src/models/fortune.g.dart

@@ -0,0 +1,212 @@
+// GENERATED CODE - DO NOT MODIFY BY HAND
+
+part of 'fortune.dart';
+
+// **************************************************************************
+// MigrationGenerator
+// **************************************************************************
+
+class FortuneMigration extends Migration {
+  @override
+  void up(Schema schema) {
+    schema.create('fortune', (table) {
+      table.integer('id');
+      table.varChar('message', length: 2048);
+    });
+  }
+
+  @override
+  void down(Schema schema) {
+    schema.drop('fortune');
+  }
+}
+
+// **************************************************************************
+// OrmGenerator
+// **************************************************************************
+
+class FortuneQuery extends Query<Fortune, FortuneQueryWhere> {
+  FortuneQuery({Query? parent, Set<String>? trampoline})
+      : super(parent: parent) {
+    trampoline ??= <String>{};
+    trampoline.add(tableName);
+    _where = FortuneQueryWhere(this);
+  }
+
+  @override
+  final FortuneQueryValues values = FortuneQueryValues();
+
+  List<String> _selectedFields = [];
+
+  FortuneQueryWhere? _where;
+
+  @override
+  Map<String, String> get casts {
+    return {};
+  }
+
+  @override
+  String get tableName {
+    return 'fortune';
+  }
+
+  @override
+  List<String> get fields {
+    const _fields = ['id', 'message'];
+    return _selectedFields.isEmpty
+        ? _fields
+        : _fields.where((field) => _selectedFields.contains(field)).toList();
+  }
+
+  FortuneQuery select(List<String> selectedFields) {
+    _selectedFields = selectedFields;
+    return this;
+  }
+
+  @override
+  FortuneQueryWhere? get where {
+    return _where;
+  }
+
+  @override
+  FortuneQueryWhere newWhereClause() {
+    return FortuneQueryWhere(this);
+  }
+
+  Optional<Fortune> parseRow(List row) {
+    if (row.every((x) => x == null)) {
+      return Optional.empty();
+    }
+    var model = Fortune(
+        id: fields.contains('id') ? (row[0] as int?) : null,
+        message: fields.contains('message') ? (row[1] as String?) : null);
+    return Optional.of(model);
+  }
+
+  @override
+  Optional<Fortune> deserialize(List row) {
+    return parseRow(row);
+  }
+}
+
+class FortuneQueryWhere extends QueryWhere {
+  FortuneQueryWhere(FortuneQuery query)
+      : id = NumericSqlExpressionBuilder<int>(query, 'id'),
+        message = StringSqlExpressionBuilder(query, 'message');
+
+  final NumericSqlExpressionBuilder<int> id;
+
+  final StringSqlExpressionBuilder message;
+
+  @override
+  List<SqlExpressionBuilder> get expressionBuilders {
+    return [id, message];
+  }
+}
+
+class FortuneQueryValues extends MapQueryValues {
+  @override
+  Map<String, String> get casts {
+    return {};
+  }
+
+  int? get id {
+    return (values['id'] as int?);
+  }
+
+  set id(int? value) => values['id'] = value;
+  String? get message {
+    return (values['message'] as String?);
+  }
+
+  set message(String? value) => values['message'] = value;
+  void copyFrom(Fortune model) {
+    id = model.id;
+    message = model.message;
+  }
+}
+
+// **************************************************************************
+// JsonModelGenerator
+// **************************************************************************
+
+@generatedSerializable
+class Fortune extends _Fortune {
+  Fortune({this.id, this.message});
+
+  @override
+  int? id;
+
+  @override
+  String? message;
+
+  Fortune copyWith({int? id, String? message}) {
+    return Fortune(id: id ?? this.id, message: message ?? this.message);
+  }
+
+  @override
+  bool operator ==(other) {
+    return other is _Fortune && other.id == id && other.message == message;
+  }
+
+  @override
+  int get hashCode {
+    return hashObjects([id, message]);
+  }
+
+  @override
+  String toString() {
+    return 'Fortune(id=$id, message=$message)';
+  }
+
+  Map<String, dynamic> toJson() {
+    return FortuneSerializer.toMap(this);
+  }
+}
+
+// **************************************************************************
+// SerializerGenerator
+// **************************************************************************
+
+const FortuneSerializer fortuneSerializer = FortuneSerializer();
+
+class FortuneEncoder extends Converter<Fortune, Map> {
+  const FortuneEncoder();
+
+  @override
+  Map convert(Fortune model) => FortuneSerializer.toMap(model);
+}
+
+class FortuneDecoder extends Converter<Map, Fortune> {
+  const FortuneDecoder();
+
+  @override
+  Fortune convert(Map map) => FortuneSerializer.fromMap(map);
+}
+
+class FortuneSerializer extends Codec<Fortune, Map> {
+  const FortuneSerializer();
+
+  @override
+  FortuneEncoder get encoder => const FortuneEncoder();
+  @override
+  FortuneDecoder get decoder => const FortuneDecoder();
+  static Fortune fromMap(Map map) {
+    return Fortune(id: map['id'] as int?, message: map['message'] as String?);
+  }
+
+  static Map<String, dynamic> toMap(_Fortune? model) {
+    if (model == null) {
+      return {};
+    }
+    return {'id': model.id, 'message': model.message};
+  }
+}
+
+abstract class FortuneFields {
+  static const List<String> allFields = <String>[id, message];
+
+  static const String id = 'id';
+
+  static const String message = 'message';
+}

+ 16 - 0
frameworks/Dart/angel3/orm-mysql/lib/src/models/world.dart

@@ -0,0 +1,16 @@
+import 'package:angel3_migration/angel3_migration.dart';
+//import 'package:angel3_model/angel3_model.dart';
+import 'package:angel3_serialize/angel3_serialize.dart';
+import 'package:angel3_orm/angel3_orm.dart';
+import 'package:optional/optional.dart';
+
+part 'world.g.dart';
+
+@serializable
+@Orm(tableName: 'world')
+abstract class _World {
+  int? id;
+
+  @Column()
+  int? randomNumber;
+}

+ 216 - 0
frameworks/Dart/angel3/orm-mysql/lib/src/models/world.g.dart

@@ -0,0 +1,216 @@
+// GENERATED CODE - DO NOT MODIFY BY HAND
+
+part of 'world.dart';
+
+// **************************************************************************
+// MigrationGenerator
+// **************************************************************************
+
+class WorldMigration extends Migration {
+  @override
+  void up(Schema schema) {
+    schema.create('world', (table) {
+      table.integer('id');
+      table.integer('randomNumber');
+    });
+  }
+
+  @override
+  void down(Schema schema) {
+    schema.drop('world');
+  }
+}
+
+// **************************************************************************
+// OrmGenerator
+// **************************************************************************
+
+class WorldQuery extends Query<World, WorldQueryWhere> {
+  WorldQuery({Query? parent, Set<String>? trampoline}) : super(parent: parent) {
+    trampoline ??= <String>{};
+    trampoline.add(tableName);
+    _where = WorldQueryWhere(this);
+  }
+
+  @override
+  final WorldQueryValues values = WorldQueryValues();
+
+  List<String> _selectedFields = [];
+
+  WorldQueryWhere? _where;
+
+  @override
+  Map<String, String> get casts {
+    return {};
+  }
+
+  @override
+  String get tableName {
+    return 'world';
+  }
+
+  @override
+  List<String> get fields {
+    const _fields = ['id', 'randomNumber'];
+    return _selectedFields.isEmpty
+        ? _fields
+        : _fields.where((field) => _selectedFields.contains(field)).toList();
+  }
+
+  WorldQuery select(List<String> selectedFields) {
+    _selectedFields = selectedFields;
+    return this;
+  }
+
+  @override
+  WorldQueryWhere? get where {
+    return _where;
+  }
+
+  @override
+  WorldQueryWhere newWhereClause() {
+    return WorldQueryWhere(this);
+  }
+
+  Optional<World> parseRow(List row) {
+    if (row.every((x) => x == null)) {
+      return Optional.empty();
+    }
+    var model = World(
+        id: fields.contains('id') ? (row[0] as int?) : null,
+        randomNumber:
+            fields.contains('randomNumber') ? (row[1] as int?) : null);
+    return Optional.of(model);
+  }
+
+  @override
+  Optional<World> deserialize(List row) {
+    return parseRow(row);
+  }
+}
+
+class WorldQueryWhere extends QueryWhere {
+  WorldQueryWhere(WorldQuery query)
+      : id = NumericSqlExpressionBuilder<int>(query, 'id'),
+        randomNumber = NumericSqlExpressionBuilder<int>(query, 'randomNumber');
+
+  final NumericSqlExpressionBuilder<int> id;
+
+  final NumericSqlExpressionBuilder<int> randomNumber;
+
+  @override
+  List<SqlExpressionBuilder> get expressionBuilders {
+    return [id, randomNumber];
+  }
+}
+
+class WorldQueryValues extends MapQueryValues {
+  @override
+  Map<String, String> get casts {
+    return {};
+  }
+
+  int? get id {
+    return (values['id'] as int?);
+  }
+
+  set id(int? value) => values['id'] = value;
+  int? get randomNumber {
+    return (values['randomNumber'] as int?);
+  }
+
+  set randomNumber(int? value) => values['randomNumber'] = value;
+  void copyFrom(World model) {
+    id = model.id;
+    randomNumber = model.randomNumber;
+  }
+}
+
+// **************************************************************************
+// JsonModelGenerator
+// **************************************************************************
+
+@generatedSerializable
+class World extends _World {
+  World({this.id, this.randomNumber});
+
+  @override
+  int? id;
+
+  @override
+  int? randomNumber;
+
+  World copyWith({int? id, int? randomNumber}) {
+    return World(
+        id: id ?? this.id, randomNumber: randomNumber ?? this.randomNumber);
+  }
+
+  @override
+  bool operator ==(other) {
+    return other is _World &&
+        other.id == id &&
+        other.randomNumber == randomNumber;
+  }
+
+  @override
+  int get hashCode {
+    return hashObjects([id, randomNumber]);
+  }
+
+  @override
+  String toString() {
+    return 'World(id=$id, randomNumber=$randomNumber)';
+  }
+
+  Map<String, dynamic> toJson() {
+    return WorldSerializer.toMap(this);
+  }
+}
+
+// **************************************************************************
+// SerializerGenerator
+// **************************************************************************
+
+const WorldSerializer worldSerializer = WorldSerializer();
+
+class WorldEncoder extends Converter<World, Map> {
+  const WorldEncoder();
+
+  @override
+  Map convert(World model) => WorldSerializer.toMap(model);
+}
+
+class WorldDecoder extends Converter<Map, World> {
+  const WorldDecoder();
+
+  @override
+  World convert(Map map) => WorldSerializer.fromMap(map);
+}
+
+class WorldSerializer extends Codec<World, Map> {
+  const WorldSerializer();
+
+  @override
+  WorldEncoder get encoder => const WorldEncoder();
+  @override
+  WorldDecoder get decoder => const WorldDecoder();
+  static World fromMap(Map map) {
+    return World(
+        id: map['id'] as int?, randomNumber: map['randomNumber'] as int?);
+  }
+
+  static Map<String, dynamic> toMap(_World? model) {
+    if (model == null) {
+      return {};
+    }
+    return {'id': model.id, 'randomNumber': model.randomNumber};
+  }
+}
+
+abstract class WorldFields {
+  static const List<String> allFields = <String>[id, randomNumber];
+
+  static const String id = 'id';
+
+  static const String randomNumber = 'randomNumber';
+}

+ 131 - 0
frameworks/Dart/angel3/orm-mysql/lib/src/routes/controllers/controllers.dart

@@ -0,0 +1,131 @@
+import 'dart:async';
+import 'dart:convert';
+import 'dart:math';
+import 'package:angel3_framework/angel3_framework.dart';
+import 'package:angel3_orm/angel3_orm.dart';
+import '../../models/fortune.dart';
+import '../../models/world.dart';
+
+Future configureServer(Angel app) async {
+  /// Controllers will not function unless wired to the application!
+
+  var executor = app.container.make<QueryExecutor>();
+
+  // Generate a random number between 1 and 10000
+  int _genRandomId() {
+    var rand = Random();
+    return rand.nextInt(10000) + 1;
+  }
+
+  int _parseQueryCount(String? count) {
+    if (count == null) {
+      return 1;
+    }
+
+    var limit = int.tryParse(count) ?? 0;
+    if (limit < 1) return 1;
+
+    if (limit > 500) return 500;
+
+    return limit;
+  }
+
+  List<int> _generateIds(int maxCount) {
+    var result = <int>[];
+
+    while (result.length < maxCount) {
+      var id = _genRandomId();
+      if (!result.contains(id)) {
+        result.add(id);
+      }
+    }
+
+    return result;
+  }
+
+  // Return data in json
+  app.get('/json', (req, res) => res.json({'message': 'Hello, World!'}));
+
+  const reply = "Hello, World!";
+
+  // Return data in plaintext
+  app.get('/plaintext', (req, res) async {
+    res.contentLength = reply.length;
+    res.write(reply);
+  });
+
+  // Add an entry and sort a list of fortune
+  app.get('/fortunes', (req, res) async {
+    //var stopwatch = Stopwatch()..start();
+
+    var list = await FortuneQuery().get(executor);
+
+    //print('Query Time: ${stopwatch.elapsed.inMilliseconds}ms');
+
+    list.add(
+        Fortune(id: 0, message: 'Additional fortune added at request time.'));
+    list.sort((a, b) => a.message?.compareTo(b.message ?? '') ?? 0);
+
+    //print('Process Time: ${stopwatch.elapsed.inMilliseconds}ms');
+    //stopwatch.stop();
+
+    res.render('listing', {'fortunes': list});
+  });
+
+  // Find a random World
+  app.get('/db', (req, res) async {
+    var id = _genRandomId();
+    var query = WorldQuery()..where?.id.equals(id);
+    var result = await query.get(executor);
+    if (result.isNotEmpty) {
+      res.json(result[0]);
+    } else {
+      res.json({});
+    }
+  });
+
+  // Return a list of worlds
+  app.get('/query', (req, res) async {
+    var params = req.queryParameters;
+
+    var queryLimit = _parseQueryCount(params['queries'] as String?);
+
+    var list = _generateIds(queryLimit);
+    var query = WorldQuery();
+    var result = <World>[];
+    for (var id in list) {
+      query.where?.id.equals(id);
+      var optWorld = await query.getOne(executor);
+      result.add(optWorld.value);
+    }
+
+    res.json(result);
+  });
+
+  // Update a list of worlds
+  app.get('/updates', (req, res) async {
+    //var stopwatch = Stopwatch()..start();
+
+    var params = req.queryParameters;
+    var queryLimit = _parseQueryCount(params['queries'] as String?);
+    var listOfIds = _generateIds(queryLimit);
+
+    var query = WorldQuery();
+    var result = <World>[];
+    for (var id in listOfIds) {
+      query.where?.id.equals(id);
+      var optWorld = await query.getOne(executor);
+
+      query
+        ..where?.id.equals(optWorld.value.id!)
+        ..values.randomNumber = _genRandomId();
+      var updatedRec = await query.updateOne(executor);
+      result.add(updatedRec.value);
+    }
+
+    //rint('Process Time: ${stopwatch.elapsed.inMilliseconds}ms');
+    //stopwatch.stop();
+
+    res.json(result);
+  });
+}

+ 62 - 0
frameworks/Dart/angel3/orm-mysql/lib/src/routes/routes.dart

@@ -0,0 +1,62 @@
+/// This app's route configuration.
+import 'package:angel3_framework/angel3_framework.dart';
+import 'package:angel3_static/angel3_static.dart';
+import 'package:file/file.dart';
+import 'controllers/controllers.dart' as controllers;
+
+/// Put your app routes here!
+///
+/// See the wiki for information about routing, requests, and responses:
+/// * https://angel3-docs.dukefirehawk.com/guides/basic-routing
+/// * https://angel3-docs.dukefirehawk.com/guides/requests-and-responses
+AngelConfigurer configureServer(FileSystem fileSystem) {
+  return (Angel app) async {
+    // Typically, you want to mount controllers first, after any global middleware.
+    await app.configure(controllers.configureServer);
+
+    // Render `views/hello.jl` when a user visits the application root.
+    app.get('/', (req, res) => res.render('hello'));
+
+    // Mount static server at web in development.
+    // The `CachingVirtualDirectory` variant of `VirtualDirectory` also sends `Cache-Control` headers.
+    //
+    // In production, however, prefer serving static files through NGINX or a
+    // similar reverse proxy.
+    //
+    // Read the following two sources for documentation:
+    // * https://medium.com/the-angel-framework/serving-static-files-with-the-angel-framework-2ddc7a2b84ae
+    // * https://pub.dev/packages/angel3_static
+    if (!app.environment.isProduction) {
+      var vDir = VirtualDirectory(
+        app,
+        fileSystem,
+        source: fileSystem.directory('web'),
+      );
+      app.fallback(vDir.handleRequest);
+    }
+
+    // Throw a 404 if no route matched the request.
+    app.fallback((req, res) => throw AngelHttpException.notFound());
+
+    // Set our application up to handle different errors.
+    //
+    // Read the following for documentation:
+    // * https://angel3-docs.dukefirehawk.com/guides/error-handling
+
+    var oldErrorHandler = app.errorHandler;
+    app.errorHandler = (e, req, res) async {
+      if (req.accepts('text/html', strict: true)) {
+        if (e.statusCode == 404 && req.accepts('text/html', strict: true)) {
+          await res
+              .render('error', {'message': 'No file exists at ${req.uri}.'});
+        } else {
+          await res.render('error', {'message': e.message});
+        }
+      } else {
+        return await oldErrorHandler(e, req, res);
+      }
+    };
+
+    app.enableCache();
+  };
+}

+ 13 - 0
frameworks/Dart/angel3/orm-mysql/lib/src/services/services.dart

@@ -0,0 +1,13 @@
+/// Declare services here!
+import 'dart:async';
+import 'package:angel3_framework/angel3_framework.dart';
+
+/// Configure our application to use *services*.
+/// Services must be wired to the app via `app.use`.
+///
+/// They provide many benefits, such as instant REST API generation,
+/// and respond to both REST and WebSockets.
+///
+/// Read more here:
+/// https://github.com/angel-dart/angel/wiki/Service-Basics
+Future configureServer(Angel app) async {}

+ 845 - 0
frameworks/Dart/angel3/orm-mysql/pubspec.lock

@@ -0,0 +1,845 @@
+# Generated by pub
+# See https://dart.dev/tools/pub/glossary#lockfile
+packages:
+  _fe_analyzer_shared:
+    dependency: transitive
+    description:
+      name: _fe_analyzer_shared
+      url: "https://pub.dartlang.org"
+    source: hosted
+    version: "47.0.0"
+  analyzer:
+    dependency: transitive
+    description:
+      name: analyzer
+      url: "https://pub.dartlang.org"
+    source: hosted
+    version: "4.7.0"
+  angel3_auth:
+    dependency: "direct main"
+    description:
+      name: angel3_auth
+      url: "https://pub.dartlang.org"
+    source: hosted
+    version: "7.0.0"
+  angel3_client:
+    dependency: transitive
+    description:
+      name: angel3_client
+      url: "https://pub.dartlang.org"
+    source: hosted
+    version: "7.0.0"
+  angel3_configuration:
+    dependency: "direct main"
+    description:
+      name: angel3_configuration
+      url: "https://pub.dartlang.org"
+    source: hosted
+    version: "7.0.0"
+  angel3_container:
+    dependency: transitive
+    description:
+      name: angel3_container
+      url: "https://pub.dartlang.org"
+    source: hosted
+    version: "7.0.0"
+  angel3_framework:
+    dependency: "direct main"
+    description:
+      name: angel3_framework
+      url: "https://pub.dartlang.org"
+    source: hosted
+    version: "7.0.0"
+  angel3_hot:
+    dependency: "direct dev"
+    description:
+      name: angel3_hot
+      url: "https://pub.dartlang.org"
+    source: hosted
+    version: "7.0.0"
+  angel3_http_exception:
+    dependency: transitive
+    description:
+      name: angel3_http_exception
+      url: "https://pub.dartlang.org"
+    source: hosted
+    version: "7.0.0"
+  angel3_jael:
+    dependency: "direct main"
+    description:
+      name: angel3_jael
+      url: "https://pub.dartlang.org"
+    source: hosted
+    version: "7.0.0"
+  angel3_migration:
+    dependency: "direct main"
+    description:
+      name: angel3_migration
+      url: "https://pub.dartlang.org"
+    source: hosted
+    version: "7.0.0"
+  angel3_migration_runner:
+    dependency: "direct dev"
+    description:
+      name: angel3_migration_runner
+      url: "https://pub.dartlang.org"
+    source: hosted
+    version: "7.0.0"
+  angel3_mock_request:
+    dependency: transitive
+    description:
+      name: angel3_mock_request
+      url: "https://pub.dartlang.org"
+    source: hosted
+    version: "7.0.0"
+  angel3_model:
+    dependency: transitive
+    description:
+      name: angel3_model
+      url: "https://pub.dartlang.org"
+    source: hosted
+    version: "7.0.0"
+  angel3_orm:
+    dependency: "direct main"
+    description:
+      name: angel3_orm
+      url: "https://pub.dartlang.org"
+    source: hosted
+    version: "7.0.1"
+  angel3_orm_generator:
+    dependency: "direct dev"
+    description:
+      name: angel3_orm_generator
+      url: "https://pub.dartlang.org"
+    source: hosted
+    version: "7.0.0"
+  angel3_orm_mysql:
+    dependency: "direct main"
+    description:
+      name: angel3_orm_mysql
+      url: "https://pub.dartlang.org"
+    source: hosted
+    version: "7.0.1"
+  angel3_production:
+    dependency: "direct main"
+    description:
+      name: angel3_production
+      url: "https://pub.dartlang.org"
+    source: hosted
+    version: "7.0.0"
+  angel3_route:
+    dependency: transitive
+    description:
+      name: angel3_route
+      url: "https://pub.dartlang.org"
+    source: hosted
+    version: "7.0.0"
+  angel3_serialize:
+    dependency: "direct main"
+    description:
+      name: angel3_serialize
+      url: "https://pub.dartlang.org"
+    source: hosted
+    version: "7.0.0"
+  angel3_serialize_generator:
+    dependency: "direct dev"
+    description:
+      name: angel3_serialize_generator
+      url: "https://pub.dartlang.org"
+    source: hosted
+    version: "7.0.0"
+  angel3_static:
+    dependency: "direct main"
+    description:
+      name: angel3_static
+      url: "https://pub.dartlang.org"
+    source: hosted
+    version: "7.0.0"
+  angel3_test:
+    dependency: "direct dev"
+    description:
+      name: angel3_test
+      url: "https://pub.dartlang.org"
+    source: hosted
+    version: "7.0.0"
+  angel3_validate:
+    dependency: "direct main"
+    description:
+      name: angel3_validate
+      url: "https://pub.dartlang.org"
+    source: hosted
+    version: "7.0.0"
+  angel3_websocket:
+    dependency: transitive
+    description:
+      name: angel3_websocket
+      url: "https://pub.dartlang.org"
+    source: hosted
+    version: "7.0.0"
+  args:
+    dependency: transitive
+    description:
+      name: args
+      url: "https://pub.dartlang.org"
+    source: hosted
+    version: "2.3.1"
+  async:
+    dependency: transitive
+    description:
+      name: async
+      url: "https://pub.dartlang.org"
+    source: hosted
+    version: "2.9.0"
+  belatuk_code_buffer:
+    dependency: transitive
+    description:
+      name: belatuk_code_buffer
+      url: "https://pub.dartlang.org"
+    source: hosted
+    version: "4.0.0"
+  belatuk_combinator:
+    dependency: transitive
+    description:
+      name: belatuk_combinator
+      url: "https://pub.dartlang.org"
+    source: hosted
+    version: "4.0.0"
+  belatuk_html_builder:
+    dependency: transitive
+    description:
+      name: belatuk_html_builder
+      url: "https://pub.dartlang.org"
+    source: hosted
+    version: "4.0.0"
+  belatuk_http_server:
+    dependency: transitive
+    description:
+      name: belatuk_http_server
+      url: "https://pub.dartlang.org"
+    source: hosted
+    version: "3.0.0"
+  belatuk_json_serializer:
+    dependency: transitive
+    description:
+      name: belatuk_json_serializer
+      url: "https://pub.dartlang.org"
+    source: hosted
+    version: "6.0.1"
+  belatuk_merge_map:
+    dependency: transitive
+    description:
+      name: belatuk_merge_map
+      url: "https://pub.dartlang.org"
+    source: hosted
+    version: "4.0.0"
+  belatuk_pretty_logging:
+    dependency: "direct main"
+    description:
+      name: belatuk_pretty_logging
+      url: "https://pub.dartlang.org"
+    source: hosted
+    version: "5.0.0"
+  belatuk_pub_sub:
+    dependency: transitive
+    description:
+      name: belatuk_pub_sub
+      url: "https://pub.dartlang.org"
+    source: hosted
+    version: "5.0.0"
+  belatuk_range_header:
+    dependency: transitive
+    description:
+      name: belatuk_range_header
+      url: "https://pub.dartlang.org"
+    source: hosted
+    version: "5.0.0"
+  belatuk_symbol_table:
+    dependency: transitive
+    description:
+      name: belatuk_symbol_table
+      url: "https://pub.dartlang.org"
+    source: hosted
+    version: "4.0.0"
+  boolean_selector:
+    dependency: transitive
+    description:
+      name: boolean_selector
+      url: "https://pub.dartlang.org"
+    source: hosted
+    version: "2.1.0"
+  buffer:
+    dependency: transitive
+    description:
+      name: buffer
+      url: "https://pub.dartlang.org"
+    source: hosted
+    version: "1.1.1"
+  build:
+    dependency: transitive
+    description:
+      name: build
+      url: "https://pub.dartlang.org"
+    source: hosted
+    version: "2.3.1"
+  build_config:
+    dependency: transitive
+    description:
+      name: build_config
+      url: "https://pub.dartlang.org"
+    source: hosted
+    version: "1.1.0"
+  build_daemon:
+    dependency: transitive
+    description:
+      name: build_daemon
+      url: "https://pub.dartlang.org"
+    source: hosted
+    version: "3.1.0"
+  build_resolvers:
+    dependency: transitive
+    description:
+      name: build_resolvers
+      url: "https://pub.dartlang.org"
+    source: hosted
+    version: "2.0.10"
+  build_runner:
+    dependency: "direct dev"
+    description:
+      name: build_runner
+      url: "https://pub.dartlang.org"
+    source: hosted
+    version: "2.2.1"
+  build_runner_core:
+    dependency: transitive
+    description:
+      name: build_runner_core
+      url: "https://pub.dartlang.org"
+    source: hosted
+    version: "7.2.4"
+  built_collection:
+    dependency: transitive
+    description:
+      name: built_collection
+      url: "https://pub.dartlang.org"
+    source: hosted
+    version: "5.1.1"
+  built_value:
+    dependency: transitive
+    description:
+      name: built_value
+      url: "https://pub.dartlang.org"
+    source: hosted
+    version: "8.4.1"
+  charcode:
+    dependency: transitive
+    description:
+      name: charcode
+      url: "https://pub.dartlang.org"
+    source: hosted
+    version: "1.3.1"
+  checked_yaml:
+    dependency: transitive
+    description:
+      name: checked_yaml
+      url: "https://pub.dartlang.org"
+    source: hosted
+    version: "2.0.1"
+  clock:
+    dependency: transitive
+    description:
+      name: clock
+      url: "https://pub.dartlang.org"
+    source: hosted
+    version: "1.1.1"
+  code_builder:
+    dependency: transitive
+    description:
+      name: code_builder
+      url: "https://pub.dartlang.org"
+    source: hosted
+    version: "4.3.0"
+  collection:
+    dependency: transitive
+    description:
+      name: collection
+      url: "https://pub.dartlang.org"
+    source: hosted
+    version: "1.16.0"
+  convert:
+    dependency: transitive
+    description:
+      name: convert
+      url: "https://pub.dartlang.org"
+    source: hosted
+    version: "3.0.2"
+  coverage:
+    dependency: transitive
+    description:
+      name: coverage
+      url: "https://pub.dartlang.org"
+    source: hosted
+    version: "1.6.0"
+  crypto:
+    dependency: transitive
+    description:
+      name: crypto
+      url: "https://pub.dartlang.org"
+    source: hosted
+    version: "3.0.2"
+  dart_style:
+    dependency: transitive
+    description:
+      name: dart_style
+      url: "https://pub.dartlang.org"
+    source: hosted
+    version: "2.2.4"
+  dotenv:
+    dependency: transitive
+    description:
+      name: dotenv
+      url: "https://pub.dartlang.org"
+    source: hosted
+    version: "4.0.1"
+  file:
+    dependency: transitive
+    description:
+      name: file
+      url: "https://pub.dartlang.org"
+    source: hosted
+    version: "6.1.4"
+  fixnum:
+    dependency: transitive
+    description:
+      name: fixnum
+      url: "https://pub.dartlang.org"
+    source: hosted
+    version: "1.0.1"
+  frontend_server_client:
+    dependency: transitive
+    description:
+      name: frontend_server_client
+      url: "https://pub.dartlang.org"
+    source: hosted
+    version: "2.1.3"
+  glob:
+    dependency: transitive
+    description:
+      name: glob
+      url: "https://pub.dartlang.org"
+    source: hosted
+    version: "2.1.0"
+  graphs:
+    dependency: transitive
+    description:
+      name: graphs
+      url: "https://pub.dartlang.org"
+    source: hosted
+    version: "2.1.0"
+  http:
+    dependency: transitive
+    description:
+      name: http
+      url: "https://pub.dartlang.org"
+    source: hosted
+    version: "0.13.5"
+  http2:
+    dependency: transitive
+    description:
+      name: http2
+      url: "https://pub.dartlang.org"
+    source: hosted
+    version: "2.0.0"
+  http_multi_server:
+    dependency: transitive
+    description:
+      name: http_multi_server
+      url: "https://pub.dartlang.org"
+    source: hosted
+    version: "3.2.1"
+  http_parser:
+    dependency: transitive
+    description:
+      name: http_parser
+      url: "https://pub.dartlang.org"
+    source: hosted
+    version: "4.0.1"
+  inflection3:
+    dependency: transitive
+    description:
+      name: inflection3
+      url: "https://pub.dartlang.org"
+    source: hosted
+    version: "0.5.3+2"
+  intl:
+    dependency: transitive
+    description:
+      name: intl
+      url: "https://pub.dartlang.org"
+    source: hosted
+    version: "0.17.0"
+  io:
+    dependency: "direct dev"
+    description:
+      name: io
+      url: "https://pub.dartlang.org"
+    source: hosted
+    version: "1.0.3"
+  jael3:
+    dependency: "direct main"
+    description:
+      name: jael3
+      url: "https://pub.dartlang.org"
+    source: hosted
+    version: "7.0.0"
+  jael3_preprocessor:
+    dependency: transitive
+    description:
+      name: jael3_preprocessor
+      url: "https://pub.dartlang.org"
+    source: hosted
+    version: "7.0.0"
+  js:
+    dependency: transitive
+    description:
+      name: js
+      url: "https://pub.dartlang.org"
+    source: hosted
+    version: "0.6.4"
+  json_annotation:
+    dependency: transitive
+    description:
+      name: json_annotation
+      url: "https://pub.dartlang.org"
+    source: hosted
+    version: "4.6.0"
+  json_rpc_2:
+    dependency: transitive
+    description:
+      name: json_rpc_2
+      url: "https://pub.dartlang.org"
+    source: hosted
+    version: "3.0.2"
+  lints:
+    dependency: "direct dev"
+    description:
+      name: lints
+      url: "https://pub.dartlang.org"
+    source: hosted
+    version: "2.0.0"
+  logging:
+    dependency: "direct main"
+    description:
+      name: logging
+      url: "https://pub.dartlang.org"
+    source: hosted
+    version: "1.0.2"
+  matcher:
+    dependency: transitive
+    description:
+      name: matcher
+      url: "https://pub.dartlang.org"
+    source: hosted
+    version: "0.12.12"
+  meta:
+    dependency: transitive
+    description:
+      name: meta
+      url: "https://pub.dartlang.org"
+    source: hosted
+    version: "1.8.0"
+  mime:
+    dependency: transitive
+    description:
+      name: mime
+      url: "https://pub.dartlang.org"
+    source: hosted
+    version: "1.0.2"
+  mysql1:
+    dependency: transitive
+    description:
+      name: mysql1
+      url: "https://pub.dartlang.org"
+    source: hosted
+    version: "0.20.0"
+  mysql_client:
+    dependency: transitive
+    description:
+      name: mysql_client
+      url: "https://pub.dartlang.org"
+    source: hosted
+    version: "0.0.26"
+  node_preamble:
+    dependency: transitive
+    description:
+      name: node_preamble
+      url: "https://pub.dartlang.org"
+    source: hosted
+    version: "2.0.1"
+  optional:
+    dependency: "direct main"
+    description:
+      name: optional
+      url: "https://pub.dartlang.org"
+    source: hosted
+    version: "6.1.0+1"
+  package_config:
+    dependency: transitive
+    description:
+      name: package_config
+      url: "https://pub.dartlang.org"
+    source: hosted
+    version: "2.1.0"
+  path:
+    dependency: transitive
+    description:
+      name: path
+      url: "https://pub.dartlang.org"
+    source: hosted
+    version: "1.8.2"
+  pedantic:
+    dependency: transitive
+    description:
+      name: pedantic
+      url: "https://pub.dartlang.org"
+    source: hosted
+    version: "1.11.1"
+  pool:
+    dependency: transitive
+    description:
+      name: pool
+      url: "https://pub.dartlang.org"
+    source: hosted
+    version: "1.5.1"
+  postgres:
+    dependency: transitive
+    description:
+      name: postgres
+      url: "https://pub.dartlang.org"
+    source: hosted
+    version: "2.5.2"
+  pub_semver:
+    dependency: transitive
+    description:
+      name: pub_semver
+      url: "https://pub.dartlang.org"
+    source: hosted
+    version: "2.1.1"
+  pubspec_parse:
+    dependency: transitive
+    description:
+      name: pubspec_parse
+      url: "https://pub.dartlang.org"
+    source: hosted
+    version: "1.2.1"
+  quiver:
+    dependency: transitive
+    description:
+      name: quiver
+      url: "https://pub.dartlang.org"
+    source: hosted
+    version: "3.1.0"
+  recase:
+    dependency: transitive
+    description:
+      name: recase
+      url: "https://pub.dartlang.org"
+    source: hosted
+    version: "4.0.0"
+  sasl_scram:
+    dependency: transitive
+    description:
+      name: sasl_scram
+      url: "https://pub.dartlang.org"
+    source: hosted
+    version: "0.1.1"
+  saslprep:
+    dependency: transitive
+    description:
+      name: saslprep
+      url: "https://pub.dartlang.org"
+    source: hosted
+    version: "1.0.2"
+  shelf:
+    dependency: transitive
+    description:
+      name: shelf
+      url: "https://pub.dartlang.org"
+    source: hosted
+    version: "1.3.2"
+  shelf_packages_handler:
+    dependency: transitive
+    description:
+      name: shelf_packages_handler
+      url: "https://pub.dartlang.org"
+    source: hosted
+    version: "3.0.1"
+  shelf_static:
+    dependency: transitive
+    description:
+      name: shelf_static
+      url: "https://pub.dartlang.org"
+    source: hosted
+    version: "1.1.1"
+  shelf_web_socket:
+    dependency: transitive
+    description:
+      name: shelf_web_socket
+      url: "https://pub.dartlang.org"
+    source: hosted
+    version: "1.0.2"
+  source_gen:
+    dependency: transitive
+    description:
+      name: source_gen
+      url: "https://pub.dartlang.org"
+    source: hosted
+    version: "1.2.3"
+  source_map_stack_trace:
+    dependency: transitive
+    description:
+      name: source_map_stack_trace
+      url: "https://pub.dartlang.org"
+    source: hosted
+    version: "2.1.0"
+  source_maps:
+    dependency: transitive
+    description:
+      name: source_maps
+      url: "https://pub.dartlang.org"
+    source: hosted
+    version: "0.10.10"
+  source_span:
+    dependency: transitive
+    description:
+      name: source_span
+      url: "https://pub.dartlang.org"
+    source: hosted
+    version: "1.9.1"
+  stack_trace:
+    dependency: transitive
+    description:
+      name: stack_trace
+      url: "https://pub.dartlang.org"
+    source: hosted
+    version: "1.10.0"
+  stream_channel:
+    dependency: transitive
+    description:
+      name: stream_channel
+      url: "https://pub.dartlang.org"
+    source: hosted
+    version: "2.1.0"
+  stream_transform:
+    dependency: transitive
+    description:
+      name: stream_transform
+      url: "https://pub.dartlang.org"
+    source: hosted
+    version: "2.0.0"
+  string_scanner:
+    dependency: transitive
+    description:
+      name: string_scanner
+      url: "https://pub.dartlang.org"
+    source: hosted
+    version: "1.1.1"
+  term_glyph:
+    dependency: transitive
+    description:
+      name: term_glyph
+      url: "https://pub.dartlang.org"
+    source: hosted
+    version: "1.2.1"
+  test:
+    dependency: "direct dev"
+    description:
+      name: test
+      url: "https://pub.dartlang.org"
+    source: hosted
+    version: "1.21.6"
+  test_api:
+    dependency: transitive
+    description:
+      name: test_api
+      url: "https://pub.dartlang.org"
+    source: hosted
+    version: "0.4.14"
+  test_core:
+    dependency: transitive
+    description:
+      name: test_core
+      url: "https://pub.dartlang.org"
+    source: hosted
+    version: "0.4.18"
+  timing:
+    dependency: transitive
+    description:
+      name: timing
+      url: "https://pub.dartlang.org"
+    source: hosted
+    version: "1.0.0"
+  tuple:
+    dependency: transitive
+    description:
+      name: tuple
+      url: "https://pub.dartlang.org"
+    source: hosted
+    version: "2.0.0"
+  typed_data:
+    dependency: transitive
+    description:
+      name: typed_data
+      url: "https://pub.dartlang.org"
+    source: hosted
+    version: "1.3.1"
+  unorm_dart:
+    dependency: transitive
+    description:
+      name: unorm_dart
+      url: "https://pub.dartlang.org"
+    source: hosted
+    version: "0.2.0"
+  uuid:
+    dependency: transitive
+    description:
+      name: uuid
+      url: "https://pub.dartlang.org"
+    source: hosted
+    version: "3.0.6"
+  vm_service:
+    dependency: transitive
+    description:
+      name: vm_service
+      url: "https://pub.dartlang.org"
+    source: hosted
+    version: "9.4.0"
+  watcher:
+    dependency: transitive
+    description:
+      name: watcher
+      url: "https://pub.dartlang.org"
+    source: hosted
+    version: "1.0.1"
+  web_socket_channel:
+    dependency: transitive
+    description:
+      name: web_socket_channel
+      url: "https://pub.dartlang.org"
+    source: hosted
+    version: "2.2.0"
+  webkit_inspection_protocol:
+    dependency: transitive
+    description:
+      name: webkit_inspection_protocol
+      url: "https://pub.dartlang.org"
+    source: hosted
+    version: "1.2.0"
+  yaml:
+    dependency: transitive
+    description:
+      name: yaml
+      url: "https://pub.dartlang.org"
+    source: hosted
+    version: "3.1.1"
+sdks:
+  dart: ">=2.18.0 <3.0.0"

+ 40 - 0
frameworks/Dart/angel3/orm-mysql/pubspec.yaml

@@ -0,0 +1,40 @@
+name: orm_mysql_app
+version: 1.0.0
+description: A basic starter application template for Angel3 framework
+publish_to: none
+environment:
+  sdk: '>=2.17.0 <3.0.0'
+dependencies:
+  angel3_auth: ^7.0.0
+  angel3_configuration: ^7.0.0
+  angel3_framework: ^7.0.0
+  angel3_jael: ^7.0.0
+  angel3_migration: ^7.0.0
+  angel3_orm: ^7.0.0
+  angel3_orm_mysql: ^7.0.0
+  angel3_serialize: ^7.0.0
+  angel3_production: ^7.0.0
+  angel3_static: ^7.0.0
+  angel3_validate: ^7.0.0
+  jael3: ^7.0.0
+  belatuk_pretty_logging: ^5.0.0
+  optional: ^6.0.0
+  logging: ^1.0.0
+dev_dependencies:
+  angel3_hot: ^7.0.0
+  angel3_migration_runner: ^7.0.0
+  angel3_orm_generator: ^7.0.0
+  angel3_serialize_generator: ^7.0.0
+  angel3_test: ^7.0.0
+  build_runner: ^2.0.3
+  io: ^1.0.0
+  test: ^1.17.5
+  lints: ^2.0.0
+# dependency_overrides:
+#  angel3_orm:
+#    path: ../../../../../belatuk/packages/orm/angel_orm
+#  angel3_orm_postgres:
+#    path: ../../../../../belatuk/packages/orm/angel_orm_postgres
+#  angel3_jael:
+#    path: ../../../../../belatuk/packages/jael/angel_jael
+     

+ 28 - 0
frameworks/Dart/angel3/orm-mysql/run/dev.dart

@@ -0,0 +1,28 @@
+import 'dart:io';
+import 'package:logging/logging.dart';
+import 'package:belatuk_pretty_logging/belatuk_pretty_logging.dart';
+import 'package:angel3_container/mirrors.dart';
+import 'package:angel3_framework/angel3_framework.dart';
+import 'package:angel3_hot/angel3_hot.dart';
+import 'package:orm_mysql_app/orm_mysql_app.dart';
+
+void main() async {
+  // Watch the config/ and web/ directories for changes, and hot-reload the server.
+  hierarchicalLoggingEnabled = true;
+
+  var hot = HotReloader(() async {
+    var logger = Logger.detached('Angel3')
+      ..level = Level.ALL
+      ..onRecord.listen(prettyLog);
+    var app = Angel(logger: logger, reflector: MirrorsReflector());
+    await app.configure(configureServer);
+    return app;
+  }, [
+    Directory('config'),
+    Directory('lib'),
+  ]);
+
+  var server = await hot.startServer('127.0.0.1', 8080);
+  print(
+      'Angel3 server listening at http://${server.address.address}:${server.port}');
+}

+ 29 - 0
frameworks/Dart/angel3/orm-mysql/run/prod.dart

@@ -0,0 +1,29 @@
+import 'package:angel3_container/mirrors.dart';
+import 'package:angel3_production/angel3_production.dart';
+import 'package:orm_mysql_app/orm_mysql_app.dart';
+
+// NOTE: By default, the Runner class does not use the `MirrorsReflector`, or any
+// reflector, by default.
+//
+// If your application is using any sort of functionality reliant on annotations or reflection,
+// either include the MirrorsReflector, or use a static reflector variant.
+//
+// The following use cases require reflection:
+// * Use of Controllers, via @Expose() or @ExposeWS()
+// * Use of dependency injection into constructors, whether in controllers or plain `container.make` calls
+// * Use of the `ioc` function in any route
+//
+// The `MirrorsReflector` from `package:angel_container/mirrors.dart` is by far the most convenient pattern,
+// so use it if possible.
+//
+// However, the following alternatives exist:
+// * Generation via `package:angel_container_generator`
+// * Creating an instance of `StaticReflector`
+// * Manually implementing the `Reflector` interface (cumbersome; not recommended)
+//
+// As of January 4th, 2018, the documentation has not yet been updated to state this,
+// so in the meantime, visit the Angel chat for further questions:
+//
+// https://gitter.im/angel_dart/discussion
+void main(List<String> args) =>
+    Runner('Angel3', configureServer, reflector: MirrorsReflector()).run(args);

+ 20 - 0
frameworks/Dart/angel3/orm-mysql/templates/fortunes.mustache

@@ -0,0 +1,20 @@
+<!DOCTYPE html>
+<html>
+<head>
+  <title>Fortunes</title>
+</head>
+<body>
+<table>
+  <tr>
+    <th>id</th>
+    <th>message</th>
+  </tr>
+  {{#fortunes}}
+  <tr>
+    <td>{{id}}</td>
+    <td>{{message}}</td>
+  </tr>
+  {{/fortunes}}
+</table>
+</body>
+</html>

+ 43 - 0
frameworks/Dart/angel3/orm-mysql/test/all_test.dart

@@ -0,0 +1,43 @@
+import 'package:angel3_framework/angel3_framework.dart';
+import 'package:angel3_test/angel3_test.dart';
+import 'package:test/test.dart';
+import 'package:orm_mysql_app/orm_mysql_app.dart';
+
+// Angel also includes facilities to make testing easier.
+//
+// `package:angel_test` ships a client that can test
+// both plain HTTP and WebSockets.
+//
+// Tests do not require your server to actually be mounted on a port,
+// so they will run faster than they would in other frameworks, where you
+// would have to first bind a socket, and then account for network latency.
+//
+// See the documentation here:
+// https://github.com/angel-dart/test
+//
+// If you are unfamiliar with Dart's advanced testing library, you can read up
+// here:
+// https://github.com/dart-lang/test
+
+void main() async {
+  late TestClient client;
+
+  setUp(() async {
+    var app = Angel();
+    await app.configure(configureServer);
+
+    client = await connectTo(app);
+  });
+
+  tearDown(() async {
+    await client.close();
+  });
+
+  test('index returns 200', () async {
+    // Request a resource at the given path.
+    var response = await client.get(Uri.parse('/'));
+
+    // Expect a 200 response.
+    expect(response, hasStatus(200));
+  });
+}

部分文件因文件數量過多而無法顯示