Browse Source

Dockerify (#3292)

* First commit

* Exploring docker

* Fixed the mysql issue

* Start of dockerification

* Rustified the TFBReaper

* TFBReaper is now in the base

* Separating parts

* Fixed the java issue

* POC suite hookup

* tee'd output

* Minor fixes

* Vagrant works again

* Started work on --init

* Sets up client
* Starts the setup of the database machines

* Database dockerificationating

* Updated to Ubuntu16.04 and fixed a bug

* Fixed a bug and more dockerication

* Make vagrant work

* Be nicer to travis
Mike Smith 7 years ago
parent
commit
8aaf172cee
33 changed files with 1663 additions and 633 deletions
  1. 201 201
      .travis.yml
  2. 3 0
      deployment/vagrant/bootstrap.sh
  3. 3 3
      frameworks/Java/gemini/Docroot/WEB-INF/gemini-mysql.conf
  4. 531 0
      frameworks/Java/gemini/Docroot/WEB-INF/gemini.conf
  5. 2 2
      frameworks/Java/gemini/Docroot/WEB-INF/resin.xml
  6. 0 9
      frameworks/Java/gemini/Docroot/WEB-INF/web.xml
  7. 71 69
      frameworks/Java/gemini/benchmark_config.json
  8. 18 0
      frameworks/Java/gemini/gemini-mysql.dockerfile
  9. 18 0
      frameworks/Java/gemini/gemini.dockerfile
  10. 0 12
      frameworks/Java/gemini/start.sh
  11. 0 12
      frameworks/Java/gemini/start_postgresql.sh
  12. 68 49
      toolset/benchmark/benchmarker.py
  13. 109 82
      toolset/benchmark/framework_test.py
  14. 3 3
      toolset/benchmark/test_types/framework_test_type.py
  15. 25 1
      toolset/benchmark/utils.py
  16. 144 0
      toolset/initializer.py
  17. 18 11
      toolset/run-tests.py
  18. 1 1
      toolset/scaffolding.py
  19. 0 155
      toolset/setup/linux/TFBReaper.c
  20. 3 3
      toolset/setup/linux/client.sh
  21. 36 0
      toolset/setup/linux/database.sh
  22. 2 0
      toolset/setup/linux/databases/mysql/create.sql
  23. 12 0
      toolset/setup/linux/docker/TFBReaper/Cargo.toml
  24. 118 0
      toolset/setup/linux/docker/TFBReaper/src/main.rs
  25. 63 0
      toolset/setup/linux/docker/databases/mysql/create.sql
  26. 76 0
      toolset/setup/linux/docker/databases/mysql/my.cnf
  27. 41 0
      toolset/setup/linux/docker/databases/mysql/mysql.dockerfile
  28. 7 0
      toolset/setup/linux/docker/databases/mysql/mysql.list
  29. 12 0
      toolset/setup/linux/docker/java.dockerfile
  30. 7 0
      toolset/setup/linux/docker/resin.dockerfile
  31. 15 0
      toolset/setup/linux/docker/tfb.dockerfile
  32. 54 20
      toolset/setup/linux/prerequisites.sh
  33. 2 0
      toolset/travis/travis_setup.sh

+ 201 - 201
.travis.yml

@@ -11,207 +11,207 @@ python:
 
 
 env:
 env:
   matrix:
   matrix:
-    - "TESTDIR=C/duda"
-    - "TESTDIR=C/facil.io"
-    - "TESTDIR=C/onion"
-    - "TESTDIR=C/h2o"
-    - "TESTDIR=C/octane"
-    - "TESTDIR=C/libreactor"
-    - "TESTDIR=CSharp/aspnet"
-    - "TESTDIR=CSharp/aspnetcore"
-    ## - "TESTDIR=CSharp/aspnet-stripped"
-    - "TESTDIR=CSharp/evhttp-sharp"
-    ## - "TESTDIR=CSharp/HttpListener"
-    - "TESTDIR=CSharp/nancy"
-    - "TESTDIR=CSharp/revenj"
-    - "TESTDIR=CSharp/servicestack"
-    - "TESTDIR=C++/cppcms"
-    - "TESTDIR=C++/ffead-cpp"
-    - "TESTDIR=C++/cpoll_cppsp"
-    - "TESTDIR=C++/cutelyst"
-    - "TESTDIR=C++/silicon"
-    - "TESTDIR=C++/treefrog"
-    - "TESTDIR=C++/ulib"
-    - "TESTDIR=C++/wt"
-    - "TESTDIR=C++/poco"
-    - "TESTDIR=C++/luna"
-    - "TESTDIR=Clojure/compojure"
-    - "TESTDIR=Clojure/http-kit"
-    - "TESTDIR=Clojure/luminus"
-    - "TESTDIR=Clojure/macchiato"
-    - "TESTDIR=Clojure/pedestal"
-    - "TESTDIR=Clojure/aleph"
-    - "TESTDIR=Clojure/reitit"
-    - "TESTDIR=Crystal/amber"
-    - "TESTDIR=Crystal/crystal"
-    - "TESTDIR=Crystal/kemal"
-    - "TESTDIR=D/vibed"
-    - "TESTDIR=D/hunt"
-    - "TESTDIR=D/collie"
-    - "TESTDIR=Dart/dart-raw"
-    - "TESTDIR=Dart/redstone"
-    - "TESTDIR=Dart/start"
-    - "TESTDIR=Dart/stream"
-    - "TESTDIR=Elixir/phoenix"
-    - "TESTDIR=Elixir/cowboy"
-    - "TESTDIR=Erlang/chicagoboss"
-    - "TESTDIR=Erlang/cowboy"
-    - "TESTDIR=Erlang/elli"
-    - "TESTDIR=Erlang/mochiweb"
-    - "TESTDIR=Go/aah"
-    - "TESTDIR=Go/beego"
-    - "TESTDIR=Go/echo"
-    - "TESTDIR=Go/falcore"
-    - "TESTDIR=Go/fasthttp"
-    - "TESTDIR=Go/gin"
-    - "TESTDIR=Go/goji"
-    - "TESTDIR=Go/go-std"
-    - "TESTDIR=Go/revel"
-    - "TESTDIR=Go/webgo"
-    - "TESTDIR=Groovy/grails"
-    - "TESTDIR=Groovy/hot"
-    - "TESTDIR=Haskell/snap"
-    - "TESTDIR=Haskell/wai"
-    - "TESTDIR=Haskell/yesod"
-    - "TESTDIR=Haskell/servant"
-    - "TESTDIR=Haskell/spock"
-    - "TESTDIR=Java/act"
-    - "TESTDIR=Java/activeweb"
-    - "TESTDIR=Java/baratine"
-    - "TESTDIR=Java/bayou"
-    - "TESTDIR=Java/blade"
-    - "TESTDIR=Java/comsat-servlet"
-    - "TESTDIR=Java/comsat-webactors"
-    - "TESTDIR=Java/curacao"
-    - "TESTDIR=Java/dropwizard"
-    - "TESTDIR=Java/gemini"
-    - "TESTDIR=Java/grizzly-bm"
-    - "TESTDIR=Java/grizzly-jersey"
-    - "TESTDIR=Java/jawn"
-    - "TESTDIR=Java/jetty"
-    - "TESTDIR=Java/jlhttp"
-    - "TESTDIR=Java/jooby"
-    - "TESTDIR=Java/light-java"
-    - "TESTDIR=Java/minijax"
-    - "TESTDIR=Java/netty"
-    - "TESTDIR=Java/ninja-standalone"
-    - "TESTDIR=Java/play1"
-    - "TESTDIR=Java/play2-java"
-    - "TESTDIR=Java/proteus"
-    - "TESTDIR=Java/rapidoid"
-    - "TESTDIR=Java/restexpress"
-    - "TESTDIR=Java/revenj-jvm"
-    - "TESTDIR=Java/servlet"
-    - "TESTDIR=Java/spark"
-    - "TESTDIR=Java/spring"
-    - "TESTDIR=Java/tapestry"
-    - "TESTDIR=Java/undertow"
-    - "TESTDIR=Java/undertow-jersey-c3p0"
-    - "TESTDIR=Java/undertow-jersey-hikaricp"
-    - "TESTDIR=Java/vertx"
-    - "TESTDIR=Java/vertx-web"
-    - "TESTDIR=Java/wicket"
-    - "TESTDIR=Java/wildfly-ee7"
-    - "TESTDIR=JavaScript/express"
-    - "TESTDIR=JavaScript/hapi"
-    - "TESTDIR=JavaScript/koa"
-    - "TESTDIR=JavaScript/nodejs"
-    - "TESTDIR=JavaScript/ringojs"
-    - "TESTDIR=JavaScript/sailsjs"
-    - "TESTDIR=Kotlin/hexagon"
-    - "TESTDIR=Kotlin/ktor"
-    - "TESTDIR=Kotlin/pronghorn"
-    - "TESTDIR=Lua/lapis"
-    - "TESTDIR=Lua/octopus"
-    - "TESTDIR=Lua/openresty"
-    - "TESTDIR=Nim/jester"
-    - "TESTDIR=Perl/dancer"
-    - "TESTDIR=Perl/kelp"
-    - "TESTDIR=Perl/mojolicious"
-    - "TESTDIR=Perl/plack"
-    - "TESTDIR=Perl/web-simple"
-    - "TESTDIR=PHP/cakephp"
-    - "TESTDIR=PHP/hhvm"
-    - "TESTDIR=PHP/php"
-    - "TESTDIR=PHP/cygnite"
-    - "TESTDIR=PHP/codeigniter"
-    - "TESTDIR=PHP/clancats"
-    - "TESTDIR=PHP/fat-free"
-    - "TESTDIR=PHP/fuel"
-    - "TESTDIR=PHP/kohana"
-    - "TESTDIR=PHP/laravel"
-    - "TESTDIR=PHP/limonade"
-    - "TESTDIR=PHP/lithium"
-    - "TESTDIR=PHP/lumen"
-    - "TESTDIR=PHP/peachpie"
-    - "TESTDIR=PHP/phalcon"
-    - "TESTDIR=PHP/phalcon-micro"
-    - "TESTDIR=PHP/phpixie"
-    - "TESTDIR=PHP/silex"
-    - "TESTDIR=PHP/silex-orm"
-    - "TESTDIR=PHP/slim"
-    - "TESTDIR=PHP/symfony"
-    - "TESTDIR=PHP/workerman"
-    - "TESTDIR=PHP/yaf"
-    - "TESTDIR=PHP/yii2"
-    - "TESTDIR=PHP/zend"
-    - "TESTDIR=PHP/zend1"
-    - "TESTDIR=PHP/phreeze"
-    - "TESTDIR=Python/aiohttp"
-    - "TESTDIR=Python/apistar"
-    - "TESTDIR=Python/api_hour"
-    - "TESTDIR=Python/bottle"
-    - "TESTDIR=Python/cherrypy"
-    - "TESTDIR=Python/django"
-    - "TESTDIR=Python/falcon"
-    - "TESTDIR=Python/flask"
-    - "TESTDIR=Python/japronto"
-    - "TESTDIR=Python/klein"
-    - "TESTDIR=Python/morepath"
-    - "TESTDIR=Python/pyramid"
-    - "TESTDIR=Python/sanic"
-    - "TESTDIR=Python/tornado"
-    - "TESTDIR=Python/turbogears"
-    - "TESTDIR=Python/uvicorn"
-    - "TESTDIR=Python/uwsgi"
-    - "TESTDIR=Python/web2py"
-    - "TESTDIR=Python/webware"
-    - "TESTDIR=Python/weppy"
-    - "TESTDIR=Python/wheezyweb"
-    - "TESTDIR=Python/wsgi"
-    - "TESTDIR=Ruby/grape"
-    - "TESTDIR=Ruby/h2o_mruby"
-    - "TESTDIR=Ruby/hanami"
-    - "TESTDIR=Ruby/ngx_mruby"
-    - "TESTDIR=Ruby/padrino"
-    - "TESTDIR=Ruby/rack"
-    - "TESTDIR=Ruby/rack-sequel"
-    - "TESTDIR=Ruby/rails"
-    - "TESTDIR=Ruby/roda-sequel"
-    - "TESTDIR=Ruby/sinatra"
-    - "TESTDIR=Ruby/sinatra-sequel"
-    - "TESTDIR=Rust/iron"
-    - "TESTDIR=Rust/nickel"
-    - "TESTDIR=Rust/hyper"
-    - "TESTDIR=Rust/tokio-minihttp"
-    - "TESTDIR=Rust/rouille"
-    - "TESTDIR=Rust/actix"
-    - "TESTDIR=Scala/akka-http"
-    - "TESTDIR=Scala/blaze"
-    - "TESTDIR=Scala/colossus"
-    - "TESTDIR=Scala/finagle"
-    - "TESTDIR=Scala/finatra"
-    - "TESTDIR=Scala/fintrospect"
-    - "TESTDIR=Scala/play2-scala"
-    - "TESTDIR=Scala/scruffy"
-    - "TESTDIR=Scala/spray"
-    - "TESTDIR=Scala/s-server"
-    - "TESTDIR=Scala/http4s"
-    - "TESTDIR=Scala/finch"
-    - "TESTDIR=Swift/vapor"
-    - "TESTDIR=Ur/urweb"
-    - "TESTDIR=Vala/vsgi"
-    - "TESTDIR=Vala/valum"
+    # - "TESTDIR=C/duda"
+    # - "TESTDIR=C/facil.io"
+    # - "TESTDIR=C/onion"
+    # - "TESTDIR=C/h2o"
+    # - "TESTDIR=C/octane"
+    # - "TESTDIR=C/libreactor"
+    # - "TESTDIR=CSharp/aspnet"
+    # - "TESTDIR=CSharp/aspnetcore"
+    # ## - "TESTDIR=CSharp/aspnet-stripped"
+    # - "TESTDIR=CSharp/evhttp-sharp"
+    # ## - "TESTDIR=CSharp/HttpListener"
+    # - "TESTDIR=CSharp/nancy"
+    # - "TESTDIR=CSharp/revenj"
+    # - "TESTDIR=CSharp/servicestack"
+    # - "TESTDIR=C++/cppcms"
+    # - "TESTDIR=C++/ffead-cpp"
+    # - "TESTDIR=C++/cpoll_cppsp"
+    # - "TESTDIR=C++/cutelyst"
+    # - "TESTDIR=C++/silicon"
+    # - "TESTDIR=C++/treefrog"
+    # - "TESTDIR=C++/ulib"
+    # - "TESTDIR=C++/wt"
+    # - "TESTDIR=C++/poco"
+    # - "TESTDIR=C++/luna"
+    # - "TESTDIR=Clojure/compojure"
+    # - "TESTDIR=Clojure/http-kit"
+    # - "TESTDIR=Clojure/luminus"
+    # - "TESTDIR=Clojure/macchiato"
+    # - "TESTDIR=Clojure/pedestal"
+    # - "TESTDIR=Clojure/aleph"
+    # - "TESTDIR=Clojure/reitit"
+    # - "TESTDIR=Crystal/amber"
+    # - "TESTDIR=Crystal/crystal"
+    # - "TESTDIR=Crystal/kemal"
+    # - "TESTDIR=D/vibed"
+    # - "TESTDIR=D/hunt"
+    # - "TESTDIR=D/collie"
+    # - "TESTDIR=Dart/dart-raw"
+    # - "TESTDIR=Dart/redstone"
+    # - "TESTDIR=Dart/start"
+    # - "TESTDIR=Dart/stream"
+    # - "TESTDIR=Elixir/phoenix"
+    # - "TESTDIR=Elixir/cowboy"
+    # - "TESTDIR=Erlang/chicagoboss"
+    # - "TESTDIR=Erlang/cowboy"
+    # - "TESTDIR=Erlang/elli"
+    # - "TESTDIR=Erlang/mochiweb"
+    # - "TESTDIR=Go/aah"
+    # - "TESTDIR=Go/beego"
+    # - "TESTDIR=Go/echo"
+    # - "TESTDIR=Go/falcore"
+    # - "TESTDIR=Go/fasthttp"
+    # - "TESTDIR=Go/gin"
+    # - "TESTDIR=Go/goji"
+    # - "TESTDIR=Go/go-std"
+    # - "TESTDIR=Go/revel"
+    # - "TESTDIR=Go/webgo"
+    # - "TESTDIR=Groovy/grails"
+    # - "TESTDIR=Groovy/hot"
+    # - "TESTDIR=Haskell/snap"
+    # - "TESTDIR=Haskell/wai"
+    # - "TESTDIR=Haskell/yesod"
+    # - "TESTDIR=Haskell/servant"
+    # - "TESTDIR=Haskell/spock"
+    # - "TESTDIR=Java/act"
+    # - "TESTDIR=Java/activeweb"
+    # - "TESTDIR=Java/baratine"
+    # - "TESTDIR=Java/bayou"
+    # - "TESTDIR=Java/blade"
+    # - "TESTDIR=Java/comsat-servlet"
+    # - "TESTDIR=Java/comsat-webactors"
+    # - "TESTDIR=Java/curacao"
+    # - "TESTDIR=Java/dropwizard"
+     - "TESTDIR=Java/gemini"
+    # - "TESTDIR=Java/grizzly-bm"
+    # - "TESTDIR=Java/grizzly-jersey"
+    # - "TESTDIR=Java/jawn"
+    # - "TESTDIR=Java/jetty"
+    # - "TESTDIR=Java/jlhttp"
+    # - "TESTDIR=Java/jooby"
+    # - "TESTDIR=Java/light-java"
+    # - "TESTDIR=Java/minijax"
+    # - "TESTDIR=Java/netty"
+    # - "TESTDIR=Java/ninja-standalone"
+    # - "TESTDIR=Java/play1"
+    # - "TESTDIR=Java/play2-java"
+    # - "TESTDIR=Java/proteus"
+    # - "TESTDIR=Java/rapidoid"
+    # - "TESTDIR=Java/restexpress"
+    # - "TESTDIR=Java/revenj-jvm"
+    # - "TESTDIR=Java/servlet"
+    # - "TESTDIR=Java/spark"
+    # - "TESTDIR=Java/spring"
+    # - "TESTDIR=Java/tapestry"
+    # - "TESTDIR=Java/undertow"
+    # - "TESTDIR=Java/undertow-jersey-c3p0"
+    # - "TESTDIR=Java/undertow-jersey-hikaricp"
+    # - "TESTDIR=Java/vertx"
+    # - "TESTDIR=Java/vertx-web"
+    # - "TESTDIR=Java/wicket"
+    # - "TESTDIR=Java/wildfly-ee7"
+    # - "TESTDIR=JavaScript/express"
+    # - "TESTDIR=JavaScript/hapi"
+    # - "TESTDIR=JavaScript/koa"
+    # - "TESTDIR=JavaScript/nodejs"
+    # - "TESTDIR=JavaScript/ringojs"
+    # - "TESTDIR=JavaScript/sailsjs"
+    # - "TESTDIR=Kotlin/hexagon"
+    # - "TESTDIR=Kotlin/ktor"
+    # - "TESTDIR=Kotlin/pronghorn"
+    # - "TESTDIR=Lua/lapis"
+    # - "TESTDIR=Lua/octopus"
+    # - "TESTDIR=Lua/openresty"
+    # - "TESTDIR=Nim/jester"
+    # - "TESTDIR=Perl/dancer"
+    # - "TESTDIR=Perl/kelp"
+    # - "TESTDIR=Perl/mojolicious"
+    # - "TESTDIR=Perl/plack"
+    # - "TESTDIR=Perl/web-simple"
+    # - "TESTDIR=PHP/cakephp"
+    # - "TESTDIR=PHP/hhvm"
+    # - "TESTDIR=PHP/php"
+    # - "TESTDIR=PHP/cygnite"
+    # - "TESTDIR=PHP/codeigniter"
+    # - "TESTDIR=PHP/clancats"
+    # - "TESTDIR=PHP/fat-free"
+    # - "TESTDIR=PHP/fuel"
+    # - "TESTDIR=PHP/kohana"
+    # - "TESTDIR=PHP/laravel"
+    # - "TESTDIR=PHP/limonade"
+    # - "TESTDIR=PHP/lithium"
+    # - "TESTDIR=PHP/lumen"
+    # - "TESTDIR=PHP/peachpie"
+    # - "TESTDIR=PHP/phalcon"
+    # - "TESTDIR=PHP/phalcon-micro"
+    # - "TESTDIR=PHP/phpixie"
+    # - "TESTDIR=PHP/silex"
+    # - "TESTDIR=PHP/silex-orm"
+    # - "TESTDIR=PHP/slim"
+    # - "TESTDIR=PHP/symfony"
+    # - "TESTDIR=PHP/workerman"
+    # - "TESTDIR=PHP/yaf"
+    # - "TESTDIR=PHP/yii2"
+    # - "TESTDIR=PHP/zend"
+    # - "TESTDIR=PHP/zend1"
+    # - "TESTDIR=PHP/phreeze"
+    # - "TESTDIR=Python/aiohttp"
+    # - "TESTDIR=Python/apistar"
+    # - "TESTDIR=Python/api_hour"
+    # - "TESTDIR=Python/bottle"
+    # - "TESTDIR=Python/cherrypy"
+    # - "TESTDIR=Python/django"
+    # - "TESTDIR=Python/falcon"
+    # - "TESTDIR=Python/flask"
+    # - "TESTDIR=Python/japronto"
+    # - "TESTDIR=Python/klein"
+    # - "TESTDIR=Python/morepath"
+    # - "TESTDIR=Python/pyramid"
+    # - "TESTDIR=Python/sanic"
+    # - "TESTDIR=Python/tornado"
+    # - "TESTDIR=Python/turbogears"
+    # - "TESTDIR=Python/uvicorn"
+    # - "TESTDIR=Python/uwsgi"
+    # - "TESTDIR=Python/web2py"
+    # - "TESTDIR=Python/webware"
+    # - "TESTDIR=Python/weppy"
+    # - "TESTDIR=Python/wheezyweb"
+    # - "TESTDIR=Python/wsgi"
+    # - "TESTDIR=Ruby/grape"
+    # - "TESTDIR=Ruby/h2o_mruby"
+    # - "TESTDIR=Ruby/hanami"
+    # - "TESTDIR=Ruby/ngx_mruby"
+    # - "TESTDIR=Ruby/padrino"
+    # - "TESTDIR=Ruby/rack"
+    # - "TESTDIR=Ruby/rack-sequel"
+    # - "TESTDIR=Ruby/rails"
+    # - "TESTDIR=Ruby/roda-sequel"
+    # - "TESTDIR=Ruby/sinatra"
+    # - "TESTDIR=Ruby/sinatra-sequel"
+    # - "TESTDIR=Rust/iron"
+    # - "TESTDIR=Rust/nickel"
+    # - "TESTDIR=Rust/hyper"
+    # - "TESTDIR=Rust/tokio-minihttp"
+    # - "TESTDIR=Rust/rouille"
+    # - "TESTDIR=Rust/actix"
+    # - "TESTDIR=Scala/akka-http"
+    # - "TESTDIR=Scala/blaze"
+    # - "TESTDIR=Scala/colossus"
+    # - "TESTDIR=Scala/finagle"
+    # - "TESTDIR=Scala/finatra"
+    # - "TESTDIR=Scala/fintrospect"
+    # - "TESTDIR=Scala/play2-scala"
+    # - "TESTDIR=Scala/scruffy"
+    # - "TESTDIR=Scala/spray"
+    # - "TESTDIR=Scala/s-server"
+    # - "TESTDIR=Scala/http4s"
+    # - "TESTDIR=Scala/finch"
+    # - "TESTDIR=Swift/vapor"
+    # - "TESTDIR=Ur/urweb"
+    # - "TESTDIR=Vala/vsgi"
+    # - "TESTDIR=Vala/valum"
 
 
 before_script:
 before_script:
 
 

+ 3 - 0
deployment/vagrant/bootstrap.sh

@@ -93,4 +93,7 @@ EOF
 
 
   sudo mv motd /etc/
   sudo mv motd /etc/
 
 
+  echo "Setting up client and database machines"
+  tfb --init --quiet
+
 fi
 fi

+ 3 - 3
frameworks/Java/gemini/Docroot/WEB-INF/GeminiHello.conf → frameworks/Java/gemini/Docroot/WEB-INF/gemini-mysql.conf

@@ -81,13 +81,13 @@ DeploymentDescription = Production
 # specific configuration files.
 # specific configuration files.
 
 
 # MySQL/ConnectorJ
 # MySQL/ConnectorJ
-db.Driver.Class = org.postgresql.Driver
-db.Driver.UrlPrefix = jdbc:postgresql://
+db.Driver.Class = com.mysql.jdbc.Driver
+db.Driver.UrlPrefix = jdbc:mysql://
 db.Driver.SupportsAbsolute = yes
 db.Driver.SupportsAbsolute = yes
 db.Driver.SupportsGetRow = yes
 db.Driver.SupportsGetRow = yes
 db.Driver.Jdbc1 = no
 db.Driver.Jdbc1 = no
 
 
-db.ConnectString = 127.0.0.1:5432/hello_world?jdbcCompliantTruncation=false&elideSetAutoCommits=true&useLocalSessionState=true&cachePrepStmts=true&cacheCallableStmts=true&alwaysSendSetIsolation=false&prepStmtCacheSize=4096&cacheServerConfiguration=true&prepStmtCacheSqlLimit=2048&zeroDateTimeBehavior=convertToNull&traceProtocol=false&useServerPrepStmts&enableQueryTimeouts=false&useUnbufferedIO=false&useReadAheadInput=false&maintainTimeStats=false&cacheRSMetadata=true&useSSL=false
+db.ConnectString = TFB-database:3306/hello_world?jdbcCompliantTruncation=false&elideSetAutoCommits=true&useLocalSessionState=true&cachePrepStmts=true&cacheCallableStmts=true&alwaysSendSetIsolation=false&prepStmtCacheSize=4096&cacheServerConfiguration=true&prepStmtCacheSqlLimit=2048&zeroDateTimeBehavior=convertToNull&traceProtocol=false&useServerPrepStmts&enableQueryTimeouts=false&useUnbufferedIO=false&useReadAheadInput=false&maintainTimeStats=false&cacheRSMetadata=true&useSSL=false
 db.LoginName = benchmarkdbuser
 db.LoginName = benchmarkdbuser
 db.LoginPass = benchmarkdbpass
 db.LoginPass = benchmarkdbpass
 
 

+ 531 - 0
frameworks/Java/gemini/Docroot/WEB-INF/gemini.conf

@@ -0,0 +1,531 @@
+# -----------------------------------------------------------------------
+# GEMINIHELLO configuration file
+#
+# BASELINE Configuration
+#
+# This configuration file specifies application default behavior that is
+# suitable where not superseded by deployment-specific configuration
+# files.  Refer to the separate deployment-specific configuration files
+# (such as GeminiHello-Dev.conf).
+#
+# The automatically-generated configuration files align with generic
+# deployment "roles" (Development, Test, and Production).  It is common
+# practice, however, to have additional machine-specific configuration 
+# files.  This allows for configuration attributes to be distributed
+# between so that, for example, all development environments can use
+# attributes specific in a Development configuration file.  For example:
+#
+#    GeminiHello.conf - Baseline configuration
+#    GeminiHello-Dev.conf - Standard development configuration specifics
+#    GeminiHello-BLACKPARK.conf - Specifics for a machine named Blackpark.
+#
+# -----------------------------------------------------------------------
+#
+# AUTO GENERATED CONFIGURATION FILE NOTICE:
+#   Search for lines that contain "EDIT THIS" for AUTO-GENERATED items
+#   that you cam edit for additional functionality.
+#
+# Notes:
+#   Lines that begin with # are comments.  Empty lines are ignored.  Each
+#   entry in this file is described immediately above the provided value.
+#
+#   Directories must have their back-slashes (\) escaped as \\.
+#
+# -----------------------------------------------------------------------
+#
+# STANDARD PROPERTIES
+#   Gemini provides the following standard properties dynamically at
+#   runtime, derived from the deployment environment.
+#
+#   All Servlet Initialization Parameters in the following form:
+#   Servlet.Parameter.(ParamName) = (Value)
+#
+#   All Server Attributes in the following form:
+#   Servlet.Attribute.(AttribtueName) = (Value)
+#
+#   Servlet.ContextName - The deployment context name for the web-app.
+#   Servlet.DeploymentRoot - The physical file system location for the
+#     "document root" of the web-app.
+#   Servlet.WebInf - The physical file system location of the web-app's
+#     WEB-INF directory.
+#   Servlet.MachineName - The discovered machine name of the server.
+#   Servlet.ApplicationRoot - One directory above the DeploymentRoot.
+#
+# -----------------------------------------------------------------------
+
+# -----------------------------------------------------------------------
+# DEPLOYMENT SETTINGS
+# -----------------------------------------------------------------------
+
+# ApplicationRoot
+#   Specifies the file system root of the application.  This value is
+#   not actually used by Gemini, but is used as a macro in several
+#   other property values.
+
+ApplicationRoot = ${Servlet.ApplicationRoot}
+
+# DeploymentDescription
+#   A simple descriptive name or label for this deployment of the
+#   application (e.g., Production, Test, Development, and so on).  This
+#   description is used to identify the installation in some system-
+#   generated messages such as exception report e-mails.
+
+DeploymentDescription = Production
+
+
+# -----------------------------------------------------------------------
+# DATABASE SETTINGS
+# -----------------------------------------------------------------------
+
+db.Enabled = false
+
+# Connection information can be found in the separate deployment-
+# specific configuration files.
+
+# MySQL/ConnectorJ
+# db.Driver.Class = com.mysql.jdbc.Driver
+# db.Driver.UrlPrefix = jdbc:mysql://
+# db.Driver.SupportsAbsolute = yes
+# db.Driver.SupportsGetRow = yes
+# db.Driver.Jdbc1 = no
+
+# db.ConnectString = 10.0.2.15:3306/hello_world?jdbcCompliantTruncation=false&elideSetAutoCommits=true&useLocalSessionState=true&cachePrepStmts=true&cacheCallableStmts=true&alwaysSendSetIsolation=false&prepStmtCacheSize=4096&cacheServerConfiguration=true&prepStmtCacheSqlLimit=2048&zeroDateTimeBehavior=convertToNull&traceProtocol=false&useServerPrepStmts&enableQueryTimeouts=false&useUnbufferedIO=false&useReadAheadInput=false&maintainTimeStats=false&cacheRSMetadata=true&useSSL=false
+# db.LoginName = benchmarkdbuser
+# db.LoginPass = benchmarkdbpass
+
+# JTDS (Open source JDBC driver for Microsoft SQL Server)
+#db.Driver.Class = org.postgresql.Driver
+#db.Driver.UrlPrefix = jdbc:postgresql://
+#db.Driver.SupportsAbsolute = yes
+#db.Driver.SupportsGetRow = yes
+#db.Driver.Jdbc1 = no
+
+# db.Driver.Pooling
+#   How many Connections to maintain to the database.  The default is 1.
+# db.Driver.MaxPooling
+#   How many concurrent Connections to allow at maximum.
+
+# db.Driver.Pooling = 256
+# db.Driver.MaxPooling = 256
+
+# BasicConnectionListener properties
+
+# cl.RetriesOnDisconnect
+#   How many automatic retries on database connection (socket) failures
+#   should be made before aborting a query attempt?
+
+cl.RetriesOnDisconnect = 1
+
+# cl.AlertFile
+#   File to write database connector alerts to.
+
+cl.AlertLogFile = ${ApplicationRoot}/jdbc-alerts.log
+
+
+# -----------------------------------------------------------------------
+# CLUSTER SETTINGS
+# -----------------------------------------------------------------------
+
+# The cluster settings are left commented here.  Gemini clustering can
+# be enabled either (a) by defining cluster connectivity information here
+# in a configuration file or (b) by having application instances fetch
+# their configuration from the cluster master.  In case (b), this
+# configuration file won't be used and there's no point in un-commenting
+# and modifying the lines below.
+
+#ClusterClient.TransportClassname = com.techempower.gemini.cluster.transport.kryonet.KryoClientTransport
+#ClusterClient.Authentication.Key = shared-secret
+#Kryo.Client.Host = localhost
+#Kryo.Client.TcpPort = 54555
+
+
+# -----------------------------------------------------------------------
+# DIRECTORY SETTINGS
+# -----------------------------------------------------------------------
+
+# JSPDirectory
+#   Specifies the relative URL base for JSP files.
+
+JSPDirectory = /WEB-INF/jsp/
+
+# JSPPhysicalDirectory
+#   Specifies the physical directory that is referenced by JSPDirectory
+#   above.
+
+JSPPhysicalDirectory = ${Servlet.WebInf}/jsp/
+
+# HTMLDirectory
+#   Specifies the relative URL base for HTML files.  If HTML files are
+#   served by a separate host, an asbolute URL base can be specified.
+
+HTMLDirectory = /html/
+
+# ImagesDirectory
+#   Specifies the relative URL base for image files.  If images are
+#   served by a separate host, an asbolute URL base can be specified.
+
+ImageDirectory = /images/
+
+# CSSDirectory
+#   Specifies the relative URL base for style sheet files.  If CSS 
+#   files are served by a separate host, an asbolute URL base can be
+#   specified.
+
+CSSDirectory = /css/
+
+# JavaScriptDirectory
+#   Specifies the relative URL base for JavaScript (.js) files.  If
+#   JS files are served by a separate host, an asbolute URL base can be
+#   specified.
+
+JavaScriptDirectory = /js/
+
+# ServletURL
+#   Specifies the relative URL for the dispatcher servlet.  For many
+#   applications, this will be set to the root (just /).  This requires
+#   that the application server direct request to the Servlet only when
+#   the URL matches the root and the root only.  In Resin, the way
+#   this is accomplished is by setting a Servlet Mapping as below:
+#     <servlet-mapping url-regexp='^/$' servlet-name='...'/>
+
+ServletURL = /
+
+
+# -----------------------------------------------------------------------
+# LOGGING SETTINGS
+# -----------------------------------------------------------------------
+
+# Log.File - Log file settings
+# Log.File.On
+#   Enables or disables logging to log files.
+# Log.Console.On
+#   Enables or disables logging to the console.
+
+Log.File.On = no
+Log.Console.On = yes
+
+# Log.File.LogDirectory
+#   Specifies the directory to which logfiles should be written.  By
+#   default this is just a subdirectory named "logs" off of whatever
+#   is the active directory.  The directory must exist for the log files
+#   to work.
+
+Log.File.LogDirectory = ${ApplicationRoot}/Logs/
+
+# Log.File.LogDebugThreshold -and-
+# Log.Console.LogDebugThreshold
+#   Specify the minimum log level (0 to 100, inclusive) to require for
+#   writing items to this log listener.
+#
+# 20 is recommended to start out.
+
+Log.File.LogDebugThreshold = 20
+Log.Console.LogDebugThreshold = 20
+
+
+# -----------------------------------------------------------------------
+# IP/DNS/URL SETTINGS
+# -----------------------------------------------------------------------
+
+# StandardDomain
+#   Species the URL base for the standard (non-secure) domain name.
+#   This should not end with a trailing /, which would otherwise be
+#   standard practice.
+
+# TODO: EDIT THIS
+StandardDomain = http://${Servlet.MachineName}
+
+# SecureDomain
+#   Specifies the URL base for the secure (SSL) domain name.
+#   This should not end with a trailing /, which would otherwise be
+#   standard practice.
+
+# TODO: EDIT THIS
+SecureDomain = https://${Servlet.MachineName}
+
+
+# -----------------------------------------------------------------------
+# LOGIN HANDLER
+# -----------------------------------------------------------------------
+
+# LoginHandler.CmdLogin
+#   The command to recognize for the login page (default: "login"). The
+#   login page can be invoked directly via this command or via a peer
+#   Handler, such as subclasses of SecureHandler.
+
+LoginHandler.CmdLogin = login
+
+# LoginHandler.CmdLogout
+#   The command to recognize for logout requests. The default is
+#   "logout".
+
+LoginHandler.CmdLogout = logout
+
+# LoginHandler.CmdPostLogin
+#   The command to redispatch to after a successful login. This command
+#   will only be used if a prior request was not preempted by a forced
+#   login. The default is "home".
+
+LoginHandler.CmdPostLogin = home
+
+# LoginHandler.JspLogin
+#   The JSP page name to use for rendering the login form. The default
+#   is "login.jsp" within the root of your application's JSP directory.
+
+LoginHandler.JspLogin = accounts/login.jsp
+
+# LoginHandler.JspLogout
+#   The JSP page name to use for rendering the logout screen. The default
+#   is "logout.jsp" within the root of your application's JSP directory.
+
+LoginHandler.JspLogout = accounts/logout.jsp
+
+# LoginHandler.CookieNameSuffix - You can change the suffix of the cookie
+#   name used to save automatic login credentials. The default is
+#   "-automatic-login".
+
+LoginHandler.CookieNameSuffix = -automatic-login
+
+# LoginHandler.LogoutDeletesCookie
+#   Delete automatic login cookie after logout.
+
+LoginHandler.LogoutDeletesCookie = yes
+
+# LoginHandler.EmailTemplateName
+#   You can change the template name of the e-mail used to send a user a
+#   replacement password (in the event that they forget their current
+#   password). The default template name is "E-NewPassword".
+
+LoginHandler.EmailTemplateName = E-NewPassword
+
+# LoginHandler.FailedAttemptLimit
+#   Limits the number of sequential failed attempts before an IP address
+#   cannot make any further login attempts (until a timeout period has
+#   expired).
+
+LoginHandler.FailedAttemptLimit = 25
+
+# LoginHandler.FailedResetSeconds
+#   An interval of time after which an IP address that had been blocked
+#   will be permitted to login again.
+
+LoginHandler.FailedResetSeconds = 60
+
+
+# -----------------------------------------------------------------------
+# STANDARD ADMIN SETTINGS
+# -----------------------------------------------------------------------
+
+# Admin.RelativeJspPath
+#   Specifies the relative path to JSP files for the Admin section.
+
+Admin.RelativeJspPath = admin/
+
+# Admin.ScratchEnabled
+#   Should the admin section allow the execution of arbitrary JSP 
+#   fragments?
+
+Admin.ScratchEnabled = no
+
+# Admin.ScratchIP
+#   If scratches are enabled, from what IPs are they permitted?
+
+Admin.ScratchIP = 127.0.0.1,172.16.98.14
+
+
+# -----------------------------------------------------------------------
+# PYXIS USERS AND GROUPS SETTINGS
+# -----------------------------------------------------------------------
+
+# Pyxis.UsersTable
+#   The name of the Users table for this database.
+Pyxis.UsersTable = GhUsers
+
+# Pyxis.GroupsTable
+#   The name of the Groups table for this database.
+Pyxis.GroupsTable = GhGroups
+
+# Pyxis.UsersToGroupsTable
+#   The name of the table mapping Users to Groups for this database.
+Pyxis.UsersToGroupsTable = MapUserToGroup
+
+# BasicSecurity.PasswordCryptographer
+#   Specifies the fully-qualified classname of a PasswordCryptographer
+#   implementation that can hash user passwords.  A common implementation
+#   is com.techempower.security.BCryptPasswordCryptographer.
+BasicSecurity.PasswordCryptographer = com.techempower.security.BCryptPasswordCryptographer
+
+
+# -----------------------------------------------------------------------
+# JSP PRECOMPILATION (optional; not enabled in this auto-generated file)
+# -----------------------------------------------------------------------
+
+# Precomp.Group1
+#   Defines a comma-separated list of directories (relative URLs) that
+#   contain JSP files (relative to the JSPDirectory).  Multiple groups
+#   can be defined as Group2; Group3; etc.
+#   ex. Precomp.Group1 = /,/includes/,/admin/
+
+#Precomp.Group1 = /
+
+# Precomp.AuthorizedIP
+#   An IP address that is authorized to invoke the Precompilation
+#   process.
+
+#Precomp.AuthorizedIP = 65.115.126.13
+
+
+# -----------------------------------------------------------------------
+# MAIL SETTINGS
+# -----------------------------------------------------------------------
+
+# MailServerCount
+#   The number of mail servers to use in fail-over.  If no fail-over is
+#   used, this should be 1.
+
+MailServerCount = 1
+
+# EmailerThreadsDaemon
+#   Set to no to force e-mail servicer threads to completely deliver their
+#   queues even when the application stops.  Set to yes to allow threads
+#   to stop immediately when the application stops, resulting in the
+#   possibility of undelivered e-mails.
+
+EmailerThreadsDaemon = yes
+
+# Mail Server blocks (where 'X' is a sequential ID of the mail servers
+# used by the application).
+#
+# MailServerX.ServerAddress
+#   Specify each mail server's DNS name or IP address.  Make sure the
+#   mail server will relay e-mail for this application.
+# MailServerX.SmtpPort
+#   Specify an SMTP port number if used for Outbound mail.
+# MailServerX.PopPort
+#   Specify a POP3 port number if used for Inbound mail.
+# MailServerX.Username
+#   Used if an SMTP username is required to send or receive mail
+# MailServerX.Password
+#   The password for the user specified above
+# MailServerX.Role
+#   Outbound, Inbound, or Both
+
+# No mail servers specified in baseline configuration.  See environment-
+# specific configuration files.
+#MailServer1.ServerAddress = mail.techempower.com
+#MailServer1.SmtpPort = 25
+#MailServer1.Username = username
+#MailServer1.Password = password
+#MailServer1.ServerRole = Outbound
+
+# FromEmailAddress
+#   The e-mail address to use as an "author" when sending e-mails.
+
+FromEmailAddress = [email protected]
+
+
+# -----------------------------------------------------------------------
+# ERROR HANDLING SETTINGS
+# -----------------------------------------------------------------------
+
+# Should exceptions caught by the Gemini infrastructure while handling
+# web requests be written to the log file?
+
+BasicExceptionHandler.LogExceptions = yes
+BasicExceptionHandler.LogStackTraces = yes
+
+# Should the stack trace of exceptions caught by the Gemini
+# Infrastructure be clearly visible to users?  This is generally
+# acceptable only in a development environment.  In a Production
+# environment, set this value to 'no'.  Stack traces will still be
+# visible within the HTML source sent to the client.
+
+BasicExceptionHandler.RevealStackTrace = no
+
+# BasicExceptionHandler.ErrorPage
+#   Specifies the filename of the error page that should be rendered
+#   to display an exception.  If empty, the internal "page" will be
+#   used.
+
+BasicExceptionHandler.ErrorPage = error-page.jsp
+
+# Feature.exc-email
+# (Formerly EmailExceptionHandler.Enabled)
+#   The EmailExceptionHandler can be enabled or disabled.
+
+Feature.exc-email = false
+
+# EmailExceptionHandler.ToEmailAddress
+#   The e-mail address to which to send exception reports.
+
+EmailExceptionHandler.ToEmailAddress = [email protected]
+
+# EmailExceptionHandler.FromEmailAddress
+#   The e-mail address from which to send exception reports.
+
+EmailExceptionHandler.FromEmailAddress = [email protected]
+
+# EmailExceptionHandler.MinimumInterval
+#   The minimum number of seconds that must pass since the previous
+#   exception report in order to allow another report to be sent.  This
+#   helps mitigate an avalanche of reports from a critical error.  The
+#   interval is specified in seconds.  A good default is 600 (10
+#   minutes).
+
+EmailExceptionHandler.MinimumInterval = 600
+
+
+# -----------------------------------------------------------------------
+# MISCELLANEOUS SETTINGS
+# -----------------------------------------------------------------------
+
+# Character encoding settings
+#
+# Encoding.RequestCharset
+#   What character encoding are we expecting for requests?
+# Encoding.ResponseType
+#   The full MIME-Type name for responses.
+
+Encoding.RequestCharset = UTF-8
+Encoding.ResponseType = text/html;charset=utf-8
+
+# RequestCounting
+#   Specifies that requests should be counted and that threads
+#   processing requests should be assigned names that include the
+#   request ID number for the scope of the request.
+
+RequestCounting = yes
+
+# SchedulerSleepSeconds
+#   Seconds to sleep between scheduler checks.
+
+SchedulerSleepSeconds = 10
+
+# SessionTimeout
+#   Sets the number of seconds before a session should time out on
+#   the web site.
+
+SessionTimeout = 3600
+
+# SessionTracking
+#   If enabled, sessions will be tracked.  This allows the application
+#   to get a count of active sessions.
+
+SessionTracking = Yes
+
+# ThreadDump.DumpOnStopLocation
+#   Specifies a file system location to write thread dump text files
+#   when the application is unloaded by the application server (such
+#   as when the server stops).
+
+ThreadDump.DumpOnStopLocation = ${ApplicationRoot}/Thread Dumps/
+
+# Robots.File
+#   Specifies the file that should be returned when /robots.txt is 
+#   requested.  See hello.home.RobotsHandler.
+
+Robots.File = ${Servlet.WebInf}/robots-disallowed.txt
+
+# Feature.monitor
+#   Set to yes to enable performance monitoring
+Feature.monitor = no

+ 2 - 2
frameworks/Java/gemini/Docroot/WEB-INF/resin.xml

@@ -3,7 +3,7 @@
 
 
   <cluster id="">
   <cluster id="">
     
     
-    <resin:import path="${RESIN_HOME}/conf/app-default.xml" />
+    <resin:import path="/etc/resin/app-default.xml" />
     <log name="" level="config" path="stdout:" timestamp="[%H:%M:%S.%s] " />
     <log name="" level="config" path="stdout:" timestamp="[%H:%M:%S.%s] " />
 
 
     <server id="">
     <server id="">
@@ -14,7 +14,7 @@
     </server>
     </server>
 
 
     <host>
     <host>
-      <web-app id="/" root-directory="C:/Development/FrameworkBenchmarks/frameworks/Java/gemini/Docroot" />
+      <web-app id="/" root-directory="/gemini/Docroot" />
     </host>
     </host>
 
 
   </cluster>
   </cluster>

+ 0 - 9
frameworks/Java/gemini/Docroot/WEB-INF/web.xml

@@ -22,15 +22,6 @@
   <servlet-mapping url-pattern="/html/*" servlet-name="resin-file" />
   <servlet-mapping url-pattern="/html/*" servlet-name="resin-file" />
   <servlet-mapping url-pattern="favicon.ico" servlet-name="resin-file" />
   <servlet-mapping url-pattern="favicon.ico" servlet-name="resin-file" />
   
   
-  <!-- Assets located in "/static/" are files that should be cached forever, by
-       convention. -->
-  <servlet-mapping url-pattern="/static/*" servlet-name="resin-file" />
-  <resin:Forward regexp="^/static/([\d|-]+)/(css|js|images|html)/(.*)$" target="/$2/$3">
-    <gemini:filter.ExpiresFilter>
-      <expirationDelta>31556926000</expirationDelta>
-    </gemini:filter.ExpiresFilter>
-  </resin:Forward>
-  
   <!-- Disallow Resin 4 from compiling PHP or JSP pages directly. -->
   <!-- Disallow Resin 4 from compiling PHP or JSP pages directly. -->
   <mime-mapping extension=".php" mime-type="text/plain" />
   <mime-mapping extension=".php" mime-type="text/plain" />
   <mime-mapping extension=".jsp" mime-type="text/plain" />
   <mime-mapping extension=".jsp" mime-type="text/plain" />

+ 71 - 69
frameworks/Java/gemini/benchmark_config.json

@@ -1,71 +1,73 @@
 {
 {
   "framework": "gemini",
   "framework": "gemini",
-  "tests": [{
-    "default": {
-      "setup_file": "start",
-      "json_url": "/json",
-      "plaintext_url": "/plaintext",
-      "port": 8080,
-      "approach": "Realistic",
-      "classification": "Fullstack",
-      "database": "None",
-      "framework": "Gemini",
-      "language": "Java",
-      "flavor": "None",
-      "orm": "Micro",
-      "platform": "Servlet",
-      "webserver": "Resin",
-      "os": "Linux",
-      "database_os": "Linux",
-      "display_name": "Gemini",
-      "notes": "",
-      "versus": "servlet"
-    },
-    "mysql": {
-      "setup_file": "start",
-      "db_url": "/db",
-      "query_url": "/query?queries=",
-      "cached_query_url": "/cached_query?queries=",
-      "fortune_url": "/fortunes",
-      "update_url": "/update?queries=",
-      "port": 8080,
-      "approach": "Realistic",
-      "classification": "Fullstack",
-      "database": "MySQL",
-      "framework": "gemini",
-      "language": "Java",
-      "flavor": "None",
-      "orm": "Micro",
-      "platform": "Servlet",
-      "webserver": "Resin",
-      "os": "Linux",
-      "database_os": "Linux",
-      "display_name": "Gemini",
-      "notes": "",
-      "versus": "servlet"
-    },
-    "postgres": {
-      "setup_file": "start_postgresql",
-      "db_url": "/db",
-      "query_url": "/query?queries=",
-      "cached_query_url": "/cached_query?queries=",
-      "fortune_url": "/fortunes",
-      "update_url": "/update?queries=",
-      "port": 8080,
-      "approach": "Realistic",
-      "classification": "Fullstack",
-      "database": "Postgres",
-      "framework": "gemini",
-      "language": "Java",
-      "flavor": "None",
-      "orm": "Micro",
-      "platform": "Servlet",
-      "webserver": "Resin",
-      "os": "Linux",
-      "database_os": "Linux",
-      "display_name": "Gemini",
-      "notes": "",
-      "versus": "servlet"
-    }   
-  }]
-}
+  "tests": [
+    {
+      "default": {
+        "setup_file": "gemini.dockerfile",
+        "json_url": "/json",
+        "plaintext_url": "/plaintext",
+        "port": 8080,
+        "approach": "Realistic",
+        "classification": "Fullstack",
+        "database": "None",
+        "framework": "Gemini",
+        "language": "Java",
+        "flavor": "None",
+        "orm": "Micro",
+        "platform": "Servlet",
+        "webserver": "Resin",
+        "os": "Linux",
+        "database_os": "Linux",
+        "display_name": "Gemini",
+        "notes": "",
+        "versus": "servlet"
+      },
+      "mysql": {
+        "setup_file": "gemini-mysql.dockerfile",
+        "db_url": "/db",
+        "query_url": "/query?queries=",
+        "cached_query_url": "/cached_query?queries=",
+        "fortune_url": "/fortunes",
+        "update_url": "/update?queries=",
+        "port": 8080,
+        "approach": "Realistic",
+        "classification": "Fullstack",
+        "database": "MySQL",
+        "framework": "gemini",
+        "language": "Java",
+        "flavor": "None",
+        "orm": "Micro",
+        "platform": "Servlet",
+        "webserver": "Resin",
+        "os": "Linux",
+        "database_os": "Linux",
+        "display_name": "Gemini",
+        "notes": "",
+        "versus": "servlet"
+      },
+      "postgres": {
+        "setup_file": "start_postgresql",
+        "db_url": "/db",
+        "query_url": "/query?queries=",
+        "cached_query_url": "/cached_query?queries=",
+        "fortune_url": "/fortunes",
+        "update_url": "/update?queries=",
+        "port": 8080,
+        "approach": "Realistic",
+        "classification": "Fullstack",
+        "database": "Postgres",
+        "framework": "gemini",
+        "language": "Java",
+        "flavor": "None",
+        "orm": "Micro",
+        "platform": "Servlet",
+        "webserver": "Resin",
+        "os": "Linux",
+        "database_os": "Linux",
+        "display_name": "Gemini",
+        "notes": "",
+        "versus": "servlet"
+      }
+    }
+  ]
+}

+ 18 - 0
frameworks/Java/gemini/gemini-mysql.dockerfile

@@ -0,0 +1,18 @@
+FROM resin:latest
+
+RUN apt-get install -qqy -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confold" \
+    ant
+
+ADD Docroot/ /gemini/Docroot
+ADD Source/ /gemini/Source
+ADD build.xml /gemini/
+ADD ivy.xml /gemini/
+ADD ivysettings.xml /gemini/
+
+RUN cd /gemini/Docroot/WEB-INF; mv gemini-mysql.conf GeminiHello.conf;
+
+RUN cd /gemini; mkdir -p Docroot/WEB-INF/classes; mkdir -p Docroot/WEB-INF/lib; ant resolve; ant compile
+
+EXPOSE 8080
+
+CMD ["resinctl", "-conf", "/gemini/Docroot/WEB-INF/resin.xml", "console"]

+ 18 - 0
frameworks/Java/gemini/gemini.dockerfile

@@ -0,0 +1,18 @@
+FROM resin:latest
+
+RUN apt-get install -qqy -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confold" \
+    ant
+
+ADD Docroot/ /gemini/Docroot
+ADD Source/ /gemini/Source
+ADD build.xml /gemini/
+ADD ivy.xml /gemini/
+ADD ivysettings.xml /gemini/
+
+RUN cd /gemini/Docroot/WEB-INF; mv gemini.conf GeminiHello.conf;
+
+RUN cd /gemini; mkdir -p Docroot/WEB-INF/classes; mkdir -p Docroot/WEB-INF/lib; ant resolve; ant compile
+
+EXPOSE 8080
+
+CMD ["resinctl", "-conf", "/gemini/Docroot/WEB-INF/resin.xml", "console"]

+ 0 - 12
frameworks/Java/gemini/start.sh

@@ -1,12 +0,0 @@
-#!/bin/bash
-
-fw_depends mysql java resin maven ant
-
-sed -i 's|db.ConnectString = .*/|db.ConnectString = '"$DBHOST"':3306/|g' Docroot/WEB-INF/GeminiHello.conf
-sed -i 's|root-directory=".*/FrameworkBenchmarks/frameworks/Java/gemini|root-directory="'"$TROOT"'|g' Docroot/WEB-INF/resin.xml
-sed -i 's|db.Driver.Class = .*|db.Driver.Class = com.mysql.jdbc.Driver|g' Docroot/WEB-INF/GeminiHello.conf
-sed -i 's|db.Driver.UrlPrefix = .*|db.Driver.UrlPrefix = jdbc:mysql://|g' Docroot/WEB-INF/GeminiHello.conf
-mkdir -p Docroot/WEB-INF/classes
-ant resolve
-ant compile
-resinctl -conf $TROOT/Docroot/WEB-INF/resin.xml console

+ 0 - 12
frameworks/Java/gemini/start_postgresql.sh

@@ -1,12 +0,0 @@
-#!/bin/bash
-
-fw_depends postgresql java ant resin maven
-
-sed -i 's|db.ConnectString = .*/|db.ConnectString = '"$DBHOST"':5432/|g' Docroot/WEB-INF/GeminiHello.conf
-sed -i 's|root-directory=".*/FrameworkBenchmarks/frameworks/Java/gemini|root-directory="'"$TROOT"'|g' Docroot/WEB-INF/resin.xml
-sed -i 's|db.Driver.Class = .*|db.Driver.Class = org.postgresql.Driver|g' Docroot/WEB-INF/GeminiHello.conf
-sed -i 's|db.Driver.UrlPrefix = .*|db.Driver.UrlPrefix = jdbc:postgresql://|g' Docroot/WEB-INF/GeminiHello.conf
-mkdir -p Docroot/WEB-INF/classes
-ant resolve
-ant compile
-resinctl -conf $TROOT/Docroot/WEB-INF/resin.xml console

+ 68 - 49
toolset/benchmark/benchmarker.py

@@ -408,6 +408,15 @@ class Benchmarker:
     # End __setup_database
     # End __setup_database
     ############################################################
     ############################################################
 
 
+    ############################################################
+    ############################################################
+    def __setup_database_container(self, database, port):
+        p = subprocess.Popen(self.database_ssh_string, stdin=subprocess.PIPE, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
+        (out,err) = p.communicate("docker run -d --rm -p %s:%s --network=host %s" % (port,port,database))
+        return out.splitlines()[len(out.splitlines()) - 1]
+    ############################################################
+    ############################################################
+
     ############################################################
     ############################################################
     # Makes any necessary changes to the client machine that
     # Makes any necessary changes to the client machine that
     # should be made before running the tests. Is very similar
     # should be made before running the tests. Is very similar
@@ -510,14 +519,6 @@ class Benchmarker:
     # are needed.
     # are needed.
     ############################################################
     ############################################################
     def __run_test(self, test):
     def __run_test(self, test):
-
-        # Used to capture return values
-        def exit_with_code(code):
-            if self.os.lower() == 'windows':
-                return code
-            else:
-                sys.exit(code)
-
         logDir = os.path.join(self.full_results_directory(), test.name.lower())
         logDir = os.path.join(self.full_results_directory(), test.name.lower())
         try:
         try:
             os.makedirs(logDir)
             os.makedirs(logDir)
@@ -527,12 +528,12 @@ class Benchmarker:
 
 
             if test.os.lower() != self.os.lower() or test.database_os.lower() != self.database_os.lower():
             if test.os.lower() != self.os.lower() or test.database_os.lower() != self.database_os.lower():
                 out.write("OS or Database OS specified in benchmark_config.json does not match the current environment. Skipping.\n")
                 out.write("OS or Database OS specified in benchmark_config.json does not match the current environment. Skipping.\n")
-                return exit_with_code(0)
+                return sys.exit(0)
 
 
             # If the test is in the excludes list, we skip it
             # If the test is in the excludes list, we skip it
             if self.exclude != None and test.name in self.exclude:
             if self.exclude != None and test.name in self.exclude:
                 out.write("Test {name} has been added to the excludes list. Skipping.\n".format(name=test.name))
                 out.write("Test {name} has been added to the excludes list. Skipping.\n".format(name=test.name))
-                return exit_with_code(0)
+                return sys.exit(0)
 
 
             out.write("test.os.lower() = {os}  test.database_os.lower() = {dbos}\n".format(os=test.os.lower(),dbos=test.database_os.lower()))
             out.write("test.os.lower() = {os}  test.database_os.lower() = {dbos}\n".format(os=test.os.lower(),dbos=test.database_os.lower()))
             out.write("self.results['frameworks'] != None: {val}\n".format(val=str(self.results['frameworks'] != None)))
             out.write("self.results['frameworks'] != None: {val}\n".format(val=str(self.results['frameworks'] != None)))
@@ -541,7 +542,7 @@ class Benchmarker:
             if self.results['frameworks'] != None and test.name in self.results['completed']:
             if self.results['frameworks'] != None and test.name in self.results['completed']:
                 out.write('Framework {name} found in latest saved data. Skipping.\n'.format(name=str(test.name)))
                 out.write('Framework {name} found in latest saved data. Skipping.\n'.format(name=str(test.name)))
                 print('WARNING: Test {test} exists in the results directory; this must be removed before running a new test.\n'.format(test=str(test.name)))
                 print('WARNING: Test {test} exists in the results directory; this must be removed before running a new test.\n'.format(test=str(test.name)))
-                return exit_with_code(1)
+                return sys.exit(1)
             out.flush()
             out.flush()
 
 
             out.write(header("Beginning %s" % test.name, top='='))
             out.write(header("Beginning %s" % test.name, top='='))
@@ -552,8 +553,9 @@ class Benchmarker:
             ##########################
             ##########################
             out.write(header("Starting %s" % test.name))
             out.write(header("Starting %s" % test.name))
             out.flush()
             out.flush()
+            database_container_id = None
             try:
             try:
-                self.__cleanup_leftover_processes_before_test()
+                # self.__cleanup_leftover_processes_before_test()
 
 
                 if self.__is_port_bound(test.port):
                 if self.__is_port_bound(test.port):
                     time.sleep(60)
                     time.sleep(60)
@@ -564,17 +566,29 @@ class Benchmarker:
                     out.write(header("Error: Port %s is not available, cannot start %s" % (test.port, test.name)))
                     out.write(header("Error: Port %s is not available, cannot start %s" % (test.port, test.name)))
                     out.flush()
                     out.flush()
                     print("Error: Unable to recover port, cannot start test")
                     print("Error: Unable to recover port, cannot start test")
-                    return exit_with_code(1)
+                    return sys.exit(1)
+
+                ##########################
+                # Start database container
+                ##########################
+                if test.database != "None":
+                    # TODO: this is horrible... how should we really do it?
+                    ports = {
+                        "mysql": 3306
+                    }
+                    database_container_id = self.__setup_database_container(test.database.lower(), ports[test.database.lower()])
 
 
-                result, process = test.start(out)
-                self.__process = process
+                ##########################
+                # Start webapp
+                ##########################
+                result = test.start(out)
                 if result != 0:
                 if result != 0:
-                    self.__process.terminate()
+                    self.__stop_test(test, out)
                     time.sleep(5)
                     time.sleep(5)
                     out.write( "ERROR: Problem starting {name}\n".format(name=test.name) )
                     out.write( "ERROR: Problem starting {name}\n".format(name=test.name) )
                     out.flush()
                     out.flush()
-                    self.__write_intermediate_results(test.name,"<setup.py>#start() returned non-zero")
-                    return exit_with_code(1)
+                    self.__write_intermediate_results(test.name,"ERROR: Problem starting")
+                    return sys.exit(1)
 
 
                 logging.info("Sleeping %s seconds to ensure framework is ready" % self.sleep)
                 logging.info("Sleeping %s seconds to ensure framework is ready" % self.sleep)
                 time.sleep(self.sleep)
                 time.sleep(self.sleep)
@@ -602,7 +616,9 @@ class Benchmarker:
                 ##########################
                 ##########################
                 # Stop this test
                 # Stop this test
                 ##########################
                 ##########################
-                self.__stop_test(test, out)
+                self.__stop_test(database_container_id, test, out)
+                if test.database != "None":
+                    self.__stop_database(database_container_id, out)
 
 
                 out.write(header("Stopped %s" % test.name))
                 out.write(header("Stopped %s" % test.name))
                 out.flush()
                 out.flush()
@@ -637,52 +653,54 @@ class Benchmarker:
 
 
                 if self.mode == "verify" and not passed_verify:
                 if self.mode == "verify" and not passed_verify:
                     print("Failed verify!")
                     print("Failed verify!")
-                    return exit_with_code(1)
+                    return sys.exit(1)
             except KeyboardInterrupt:
             except KeyboardInterrupt:
-                self.__stop_test(test, out)
+                self.__stop_test(database_container_id, test, out)
+                if test.database is not None:
+                    self.__stop_database(database_container_id, out)
             except (OSError, IOError, subprocess.CalledProcessError) as e:
             except (OSError, IOError, subprocess.CalledProcessError) as e:
                 self.__write_intermediate_results(test.name,"<setup.py> raised an exception")
                 self.__write_intermediate_results(test.name,"<setup.py> raised an exception")
                 out.write(header("Subprocess Error %s" % test.name))
                 out.write(header("Subprocess Error %s" % test.name))
                 traceback.print_exc(file=out)
                 traceback.print_exc(file=out)
                 out.flush()
                 out.flush()
                 out.close()
                 out.close()
-                return exit_with_code(1)
+                return sys.exit(1)
 
 
             out.close()
             out.close()
-            return exit_with_code(0)
+            return sys.exit(0)
 
 
     ############################################################
     ############################################################
     # End __run_tests
     # End __run_tests
     ############################################################
     ############################################################
 
 
     ############################################################
     ############################################################
-    # __stop_test
-    # Attempts to stop the running test.
+    # __stop_database
+    # Attempts to stop the running database container.
     ############################################################
     ############################################################
-    def __stop_test(self, test, out):
-        # self.__process may not be set if the user hit ctrl+c prior to the test
-        # starting properly.
-        if self.__process is not None:
-            out.write(header("Stopping %s" % test.name))
-            out.flush()
-            # Ask TFBReaper to nicely terminate itself
-            self.__process.terminate()
-            slept = 0
-            returnCode = None
-            # Check once a second to see if TFBReaper has exited
-            while(slept < 300 and returnCode is None):
-                time.sleep(1)
-                slept += 1
-                returnCode = self.__process.poll()
-            
-            # If TFBReaper has not exited at this point, we have a problem
-            if returnCode is None:
-                self.__write_intermediate_results(test.name, "port " + str(test.port) + " was not released by stop")
-                out.write(header("Error: Port %s was not released by stop - %s" % (test.port, test.name)))
-                out.write(header("Running Processes"))
-                out.write(subprocess.check_output(['ps -aux'], shell=True))
-                out.flush()
-                return exit_with_code(1)
+    def __stop_database(self, database_container_id, out):
+        if database_container_id:
+            p = subprocess.Popen(self.database_ssh_string, stdin=subprocess.PIPE, shell=True, stdout=self.quiet_out, stderr=subprocess.STDOUT)
+            p.communicate("docker stop %s" % database_container_id)
+
+    ############################################################
+    # __stop_test
+    # Attempts to stop the running test container.
+    ############################################################
+    def __stop_test(self, database_container_id, test, out):
+        docker_ids = subprocess.check_output(["docker", "ps", "-q"]).splitlines()
+        for docker_id in docker_ids:
+            # This check is in case the database and server machines are the same
+            if docker_id:
+                if not database_container_id or docker_id not in database_container_id:
+                    subprocess.check_output(["docker", "kill", docker_id])
+                    slept = 0
+                    while(slept < 300 and docker_id is ''):
+                        time.sleep(1)
+                        slept += 1
+                        docker_id = subprocess.check_output(["docker", "ps", "-q"]).strip()
+                    # We still need to sleep a bit before removing the image
+                    # time.sleep(5)
+                    # subprocess.check_output(["docker", "image", "rm", test.name])
     ############################################################
     ############################################################
     # End __stop_test
     # End __stop_test
     ############################################################
     ############################################################
@@ -972,6 +990,7 @@ class Benchmarker:
         if (args['clean'] or args['clean_all']) and os.path.exists(os.path.join(self.fwroot, "results")):
         if (args['clean'] or args['clean_all']) and os.path.exists(os.path.join(self.fwroot, "results")):
             os.system("sudo rm -rf " + self.result_directory + "/*")
             os.system("sudo rm -rf " + self.result_directory + "/*")
 
 
+        # TODO: remove this as installs goes away with docker implementation
         # remove installs directories if --clean-all provided
         # remove installs directories if --clean-all provided
         self.install_root = "%s/%s" % (self.fwroot, "installs")
         self.install_root = "%s/%s" % (self.fwroot, "installs")
         if args['clean_all']:
         if args['clean_all']:

+ 109 - 82
toolset/benchmark/framework_test.py

@@ -22,6 +22,7 @@ from threading import Thread
 from threading import Event
 from threading import Event
 
 
 from utils import header
 from utils import header
+from utils import gather_docker_dependencies
 
 
 # Cross-platform colored text
 # Cross-platform colored text
 from colorama import Fore, Back, Style
 from colorama import Fore, Back, Style
@@ -173,70 +174,70 @@ class FrameworkTest:
 
 
     # Setup environment variables
     # Setup environment variables
     logDir = os.path.join(self.fwroot, self.benchmarker.full_results_directory(), 'logs', self.name.lower())
     logDir = os.path.join(self.fwroot, self.benchmarker.full_results_directory(), 'logs', self.name.lower())
-    bash_functions_path= os.path.join(self.fwroot, 'toolset/setup/linux/bash_functions.sh')
+    # bash_functions_path= os.path.join(self.fwroot, 'toolset/setup/linux/bash_functions.sh')
 
 
-    os.environ['TROOT'] = self.directory
-    os.environ['IROOT'] = self.install_root
-    os.environ['DBHOST'] = socket.gethostbyname(self.database_host)
-    os.environ['LOGDIR'] = logDir
-    os.environ['MAX_CONCURRENCY'] = str(max(self.benchmarker.concurrency_levels))
+    # os.environ['TROOT'] = self.directory
+    # os.environ['IROOT'] = self.install_root
+    # os.environ['DBHOST'] = socket.gethostbyname(self.database_host)
+    # os.environ['LOGDIR'] = logDir
+    # os.environ['MAX_CONCURRENCY'] = str(max(self.benchmarker.concurrency_levels))
 
 
     # Always ensure that IROOT exists
     # Always ensure that IROOT exists
-    if not os.path.exists(self.install_root):
-      os.mkdir(self.install_root)
+    # if not os.path.exists(self.install_root):
+    #   os.mkdir(self.install_root)
 
 
-    if not os.path.exists(os.path.join(self.install_root,"TFBReaper")):
-      subprocess.check_call(['gcc', 
-        '-std=c99', 
-        '-o%s/TFBReaper' % self.install_root, 
-        os.path.join(self.fwroot,'toolset/setup/linux/TFBReaper.c')],
-        stderr=out, stdout=out)
+    # if not os.path.exists(os.path.join(self.install_root,"TFBReaper")):
+    #   subprocess.check_call(['gcc', 
+    #     '-std=c99', 
+    #     '-o%s/TFBReaper' % self.install_root, 
+    #     os.path.join(self.fwroot,'toolset/setup/linux/TFBReaper.c')],
+    #     stderr=out, stdout=out)
 
 
     # Check that the client is setup
     # Check that the client is setup
-    if not os.path.exists(os.path.join(self.install_root, 'client.installed')):
-      print("\nINSTALL: Installing client software\n")    
-      # TODO: hax; should dynamically know where this file is
-      with open (self.fwroot + "/toolset/setup/linux/client.sh", "r") as myfile:
-        remote_script=myfile.read()
-        print("\nINSTALL: {!s}".format(self.benchmarker.client_ssh_string))
-        p = subprocess.Popen(self.benchmarker.client_ssh_string.split(" ") + ["bash"], stdin=subprocess.PIPE)
-        p.communicate(remote_script)
-        returncode = p.returncode
-        if returncode != 0:
-          self.__install_error("status code %s running subprocess '%s'." % (returncode, self.benchmarker.client_ssh_string))
-      print("\nINSTALL: Finished installing client software\n")
-      subprocess.check_call('touch client.installed', shell=True, cwd=self.install_root, executable='/bin/bash')
+    # if not os.path.exists(os.path.join(self.install_root, 'client.installed')):
+    #   print("\nINSTALL: Installing client software\n")    
+    #   # TODO: hax; should dynamically know where this file is
+    #   with open (self.fwroot + "/toolset/setup/linux/client.sh", "r") as myfile:
+    #     remote_script=myfile.read()
+    #     print("\nINSTALL: {!s}".format(self.benchmarker.client_ssh_string))
+    #     p = subprocess.Popen(self.benchmarker.client_ssh_string.split(" ") + ["bash"], stdin=subprocess.PIPE)
+    #     p.communicate(remote_script)
+    #     returncode = p.returncode
+    #     if returncode != 0:
+    #       self.__install_error("status code %s running subprocess '%s'." % (returncode, self.benchmarker.client_ssh_string))
+    #   print("\nINSTALL: Finished installing client software\n")
+    #   subprocess.check_call('touch client.installed', shell=True, cwd=self.install_root, executable='/bin/bash')
 
 
     # Run the module start inside parent of TROOT
     # Run the module start inside parent of TROOT
     #  - we use the parent as a historical accident, a number of tests
     #  - we use the parent as a historical accident, a number of tests
     # refer to their TROOT maually still
     # refer to their TROOT maually still
-    previousDir = os.getcwd()
-    os.chdir(os.path.dirname(self.troot))
-    logging.info("Running setup module start (cwd=%s)", self.directory)
-
-    command = 'bash -exc "source %s && source %s.sh"' % (
-      bash_functions_path,
-      os.path.join(self.troot, self.setup_file))
-
-    debug_command = '''\
-      export FWROOT=%s          &&  \\
-      export TROOT=%s           &&  \\
-      export IROOT=%s           &&  \\
-      export DBHOST=%s          &&  \\
-      export LOGDIR=%s          &&  \\
-      export MAX_CONCURRENCY=%s && \\
-      cd %s && \\
-      %s/TFBReaper "bash -exc \\\"source %s && source %s.sh\\\"''' % (self.fwroot,
-        self.directory,
-        self.install_root,
-        socket.gethostbyname(self.database_host),
-        logDir,
-        max(self.benchmarker.concurrency_levels),
-        self.directory,
-        self.install_root,
-        bash_functions_path,
-        os.path.join(self.troot, self.setup_file))
-    logging.info("To run %s manually, copy/paste this:\n%s", self.name, debug_command)
+    # previousDir = os.getcwd()
+    # os.chdir(os.path.dirname(self.troot))
+    # logging.info("Running setup module start (cwd=%s)", self.directory)
+
+    # command = 'bash -exc "source %s && source %s.sh"' % (
+    #   bash_functions_path,
+    #   os.path.join(self.troot, self.setup_file))
+
+    # debug_command = '''\
+    #   export FWROOT=%s          &&  \\
+    #   export TROOT=%s           &&  \\
+    #   export IROOT=%s           &&  \\
+    #   export DBHOST=%s          &&  \\
+    #   export LOGDIR=%s          &&  \\
+    #   export MAX_CONCURRENCY=%s && \\
+    #   cd %s && \\
+    #   %s/TFBReaper "bash -exc \\\"source %s && source %s.sh\\\"''' % (self.fwroot,
+    #     self.directory,
+    #     self.install_root,
+    #     socket.gethostbyname(self.database_host),
+    #     logDir,
+    #     max(self.benchmarker.concurrency_levels),
+    #     self.directory,
+    #     self.install_root,
+    #     bash_functions_path,
+    #     os.path.join(self.troot, self.setup_file))
+    # logging.info("To run %s manually, copy/paste this:\n%s", self.name, debug_command)
 
 
 
 
     def tee_output(prefix, line):
     def tee_output(prefix, line):
@@ -253,13 +254,52 @@ class FrameworkTest:
       out.write(line)
       out.write(line)
       out.flush()
       out.flush()
 
 
-    # Start the setup.sh command
-    p = subprocess.Popen(["%s/TFBReaper" % self.install_root,command],
-          cwd=self.directory,
+    prefix = "Setup %s: " % self.name
+
+    ##########################
+    # Build the Docker images
+    ##########################
+    test_docker_file = os.path.join(self.directory, self.setup_file)
+    deps = list(reversed(gather_docker_dependencies( test_docker_file )))
+
+    docker_dir = os.path.join(setup_util.get_fwroot(), "toolset", "setup", "linux", "docker")
+
+    for dependency in deps:
+      docker_file = os.path.join(docker_dir, dependency + ".dockerfile")
+      p = subprocess.Popen(["docker", "build", "-f", docker_file, "-t", dependency, docker_dir],
+          stdout=subprocess.PIPE,
+          stderr=subprocess.STDOUT)
+      nbsr = setup_util.NonBlockingStreamReader(p.stdout)
+      while (p.poll() is None):
+        for i in xrange(10):
+          try:
+            line = nbsr.readline(0.05)
+            if line:
+              tee_output(prefix, line)
+          except setup_util.EndOfStream:
+            break
+      p = subprocess.Popen(["docker", "build", "-f", test_docker_file, "-t", self.name, self.directory],
+          stdout=subprocess.PIPE,
+          stderr=subprocess.STDOUT)
+      nbsr = setup_util.NonBlockingStreamReader(p.stdout)
+      while (p.poll() is None):
+        for i in xrange(10):
+          try:
+            line = nbsr.readline(0.05)
+            if line:
+              tee_output(prefix, line)
+          except setup_util.EndOfStream:
+            break
+        
+
+    ##########################
+    # Run the Docker container
+    ##########################
+    p = subprocess.Popen(["docker", "run", "--rm", "-p", "%s:%s" % (self.port, self.port), "--network=host", self.name],
           stdout=subprocess.PIPE,
           stdout=subprocess.PIPE,
           stderr=subprocess.STDOUT)
           stderr=subprocess.STDOUT)
     nbsr = setup_util.NonBlockingStreamReader(p.stdout,
     nbsr = setup_util.NonBlockingStreamReader(p.stdout,
-      "%s: %s.sh and framework processes have terminated" % (self.name, self.setup_file))
+      "%s: framework processes have terminated" % self.name)
 
 
     # Set a limit on total execution time of setup.sh
     # Set a limit on total execution time of setup.sh
     timeout = datetime.now() + timedelta(minutes = 105)
     timeout = datetime.now() + timedelta(minutes = 105)
@@ -268,18 +308,10 @@ class FrameworkTest:
     # Need to print to stdout once every 10 minutes or Travis-CI will abort
     # Need to print to stdout once every 10 minutes or Travis-CI will abort
     travis_timeout = datetime.now() + timedelta(minutes = 5)
     travis_timeout = datetime.now() + timedelta(minutes = 5)
 
 
-    # Flush output until setup.sh work is finished. This is
-    # either a) when setup.sh exits b) when the port is bound
-    # c) when we run out of time. Note that 'finished' doesn't
-    # guarantee setup.sh process is dead - the OS may choose to make
-    # setup.sh a zombie process if it still has living children
-    #
-    # Note: child processes forked (using &) will remain alive
-    # after setup.sh has exited. The will have inherited the
-    # stdout/stderr descriptors and will be directing their
-    # output to the pipes.
-    #
-    prefix = "Setup %s: " % self.name
+    # Flush output until docker run work is finished. This is
+    # either a) when docker run exits b) when the port is bound
+    # c) when we run out of time. 
+    prefix = "Server %s: " % self.name
     while (p.poll() is None
     while (p.poll() is None
       and not self.benchmarker.is_port_bound(self.port)
       and not self.benchmarker.is_port_bound(self.port)
       and not time_remaining.total_seconds() < 0):
       and not time_remaining.total_seconds() < 0):
@@ -289,9 +321,8 @@ class FrameworkTest:
       # print one line per condition check.
       # print one line per condition check.
       # Adding a tight loop here mitigates the effect,
       # Adding a tight loop here mitigates the effect,
       # ensuring that most of the output directly from
       # ensuring that most of the output directly from
-      # setup.sh is sent to tee_output before the outer
-      # loop exits and prints things like "setup.sh exited"
-      #
+      # docker is sent to tee_output before the outer
+      # loop exits and prints things like "docker exited"
       for i in xrange(10):
       for i in xrange(10):
         try:
         try:
           line = nbsr.readline(0.05)
           line = nbsr.readline(0.05)
@@ -301,7 +332,7 @@ class FrameworkTest:
             # Reset Travis-CI timer
             # Reset Travis-CI timer
             travis_timeout = datetime.now() + timedelta(minutes = 5)
             travis_timeout = datetime.now() + timedelta(minutes = 5)
         except setup_util.EndOfStream:
         except setup_util.EndOfStream:
-          tee_output(prefix, "Setup has terminated\n")
+          tee_output(prefix, "Docker has terminated\n")
           break
           break
       time_remaining = timeout - datetime.now()
       time_remaining = timeout - datetime.now()
 
 
@@ -314,18 +345,18 @@ class FrameworkTest:
 
 
     # Did we time out?
     # Did we time out?
     if time_remaining.total_seconds() < 0:
     if time_remaining.total_seconds() < 0:
-      tee_output(prefix, "%s.sh timed out!! Aborting...\n" % self.setup_file)
+      tee_output(prefix, "Docker run has timed out!! Aborting...\n" % self.setup_file)
       p.kill()
       p.kill()
       return 1
       return 1
 
 
     # What's our return code?
     # What's our return code?
-    # If setup.sh has terminated, use that code
+    # If docker run has terminated, use that code
     # Otherwise, detect if the port was bound
     # Otherwise, detect if the port was bound
     tee_output(prefix, "Status: Poll: %s, Port %s bound: %s, Time Left: %s\n" % (
     tee_output(prefix, "Status: Poll: %s, Port %s bound: %s, Time Left: %s\n" % (
       p.poll(), self.port, self.benchmarker.is_port_bound(self.port), time_remaining))
       p.poll(), self.port, self.benchmarker.is_port_bound(self.port), time_remaining))
     retcode = (p.poll() if p.poll() is not None else 0 if self.benchmarker.is_port_bound(self.port) else 1)
     retcode = (p.poll() if p.poll() is not None else 0 if self.benchmarker.is_port_bound(self.port) else 1)
     if p.poll() is not None:
     if p.poll() is not None:
-      tee_output(prefix, "%s.sh process exited naturally with %s\n" % (self.setup_file, p.poll()))
+      tee_output(prefix, "Docker run process exited naturally with %s\n" % (self.setup_file, p.poll()))
     elif self.benchmarker.is_port_bound(self.port):
     elif self.benchmarker.is_port_bound(self.port):
       tee_output(prefix, "Bound port detected on %s\n" % self.port)
       tee_output(prefix, "Bound port detected on %s\n" % self.port)
 
 
@@ -335,7 +366,6 @@ class FrameworkTest:
     # the subprocess.PIPEs are dead, this thread will terminate.
     # the subprocess.PIPEs are dead, this thread will terminate.
     # Use a different prefix to indicate this is the framework
     # Use a different prefix to indicate this is the framework
     # speaking
     # speaking
-    prefix = "Server %s: " % self.name
     def watch_child_pipes(nbsr, prefix):
     def watch_child_pipes(nbsr, prefix):
       while True:
       while True:
         try:
         try:
@@ -351,10 +381,7 @@ class FrameworkTest:
     watch_thread.daemon = True
     watch_thread.daemon = True
     watch_thread.start()
     watch_thread.start()
 
 
-    logging.info("Executed %s.sh, returning %s", self.setup_file, retcode)
-    os.chdir(previousDir)
-
-    return retcode, p
+    return retcode
   ############################################################
   ############################################################
   # End start
   # End start
   ############################################################
   ############################################################

+ 3 - 3
toolset/benchmark/test_types/framework_test_type.py

@@ -146,7 +146,7 @@ class FrameworkTestType:
 
 
         if database_name == "mysql":
         if database_name == "mysql":
             try:
             try:
-                db = MySQLdb.connect(os.environ.get("DBHOST"), "benchmarkdbuser", "benchmarkdbpass", "hello_world")
+                db = MySQLdb.connect("TFB-database", "benchmarkdbuser", "benchmarkdbpass", "hello_world")
                 cursor = db.cursor()
                 cursor = db.cursor()
                 cursor.execute("SELECT * FROM World")
                 cursor.execute("SELECT * FROM World")
                 results = cursor.fetchall()
                 results = cursor.fetchall()
@@ -157,7 +157,7 @@ class FrameworkTestType:
                 print(e)
                 print(e)
         elif database_name == "postgres":
         elif database_name == "postgres":
             try:
             try:
-                db = psycopg2.connect(host=os.environ.get("DBHOST"),
+                db = psycopg2.connect(host="TFB-database",
                                       port="5432",
                                       port="5432",
                                       user="benchmarkdbuser",
                                       user="benchmarkdbuser",
                                       password="benchmarkdbpass",
                                       password="benchmarkdbpass",
@@ -177,7 +177,7 @@ class FrameworkTestType:
         elif database_name == "mongodb":
         elif database_name == "mongodb":
             try:
             try:
                 worlds_json = {}
                 worlds_json = {}
-                connection = pymongo.MongoClient(host=os.environ.get("DBHOST"))
+                connection = pymongo.MongoClient(host="TFB-database")
                 db = connection.hello_world
                 db = connection.hello_world
                 for world in db.world.find():
                 for world in db.world.find():
                     if "randomNumber" in world:
                     if "randomNumber" in world:

+ 25 - 1
toolset/benchmark/utils.py

@@ -6,6 +6,31 @@ import socket
 
 
 from ast import literal_eval
 from ast import literal_eval
 
 
+def gather_docker_dependencies(docker_file):
+    '''
+    Gathers all the known docker dependencies for the given docker image.
+    '''
+    # Avoid setting up a circular import
+    from setup.linux import setup_util
+    deps = []
+
+    if os.path.exists(docker_file):
+        with open(docker_file) as fp:
+            line = fp.readline()
+            if line:
+                tokens = line.strip().split(' ')
+                if tokens[0] == "FROM":
+                    # This is magic that our base image points to
+                    if tokens[1] != "ubuntu:16.04":
+                        depTokens = tokens[1].strip().split(':')
+                        deps.append(depTokens[0])
+                        dep_docker_file = os.path.join(setup_util.get_fwroot(), 
+                            "toolset", "setup", "linux", "docker", depTokens[0] + ".dockerfile")
+                        deps.extend(gather_docker_dependencies(dep_docker_file))
+
+    return deps
+
+
 def gather_langauges():
 def gather_langauges():
     '''
     '''
     Gathers all the known languages in the suite via the folder names
     Gathers all the known languages in the suite via the folder names
@@ -20,7 +45,6 @@ def gather_langauges():
         langs.append(dir.replace(lang_dir,"")[1:])
         langs.append(dir.replace(lang_dir,"")[1:])
     return langs
     return langs
 
 
-
 def gather_tests(include = [], exclude=[], benchmarker=None):
 def gather_tests(include = [], exclude=[], benchmarker=None):
     '''
     '''
     Given test names as strings, returns a list of FrameworkTest objects.
     Given test names as strings, returns a list of FrameworkTest objects.

+ 144 - 0
toolset/initializer.py

@@ -0,0 +1,144 @@
+import subprocess, os
+from setup.linux import setup_util
+
+DEVNULL = open(os.devnull, 'w')
+
+def initialize(args):
+  fwroot = setup_util.get_fwroot()
+  dbuser = args.database_user
+  dbhost = args.database_host
+  dbiden = args.database_identity_file
+  cluser = args.client_user
+  clhost = args.client_host
+  cliden = args.client_identity_file
+  aphost = args.server_host
+
+  # test ssh connections to all the machines
+  client_conn = __check_connection(cluser, clhost, cliden, aphost)
+  database_conn = __check_connection(dbuser, dbhost, dbiden, aphost)
+
+  conn_success = client_conn and database_conn
+  if not conn_success and not args.quiet:
+    return __print_failure()
+  
+  # set up client machine
+  if not __init_client(fwroot, cluser, clhost, cliden, args.quiet) and not args.quiet:
+    return __print_failure()
+
+
+  # set up database software
+  if not __init_database(fwroot, dbuser, dbhost, dbiden, args.quiet) and not args.quiet:
+    return __print_failure()
+  else:
+    # set up database docker images
+    if not __build_database_docker_images(fwroot, dbuser, dbhost, dbiden, args.quiet) and not args.quiet:
+      return __print_failure()
+
+def __print_failure():
+  print("""
+-------------------------------------------------------------------------------
+  This wizard is intended to help configure the required software on all the
+  machines in the ecosystem specified in benchmark.cfg.
+
+  Note: It is expected that you have already set up passwordless-sudo on all
+  of the machines (app, database, client) as well as identity file based 
+  authentication and hostname setup in your hosts file. 
+  More information on this required setup can be found at:
+
+  frameworkbenchmarks.readthedocs.io/en/latest/Development/Installation-Guide/
+
+  Please ensure that your benchmark.cfg is correctly configured as well as all
+  of the machines (app, database, client).
+-------------------------------------------------------------------------------""")
+
+def __ssh_string(user, host, identity_file):
+  return ["ssh", "-T", "-o", "StrictHostKeyChecking=no", "%s@%s" % (user, host), "-i", identity_file]
+
+def __scp_string(user, host, identity_file, files):
+  scpstr = ["scp", "-i", identity_file]
+  for file in files:
+    scpstr.append(file)
+  scpstr.append("%s@%s:~/" % (user, host))
+  return scpstr
+  
+def __check_connection(user, host, identity_file, app_host):
+  ''' 
+  Checks that the given user and host are accessible via ssh with the given
+  identity file and have the the following permissions:
+    1. passwordless sudo
+    2. ability to ssh back to app machine
+  '''
+  client_conn = True
+  try:
+    p = subprocess.Popen(__ssh_string(user, host, identity_file), 
+      stdin=subprocess.PIPE, stdout=DEVNULL, stderr=DEVNULL)
+    p.communicate("ssh -T -o StrictHostKeyChecking=no %s" % app_host)
+    if p.returncode:
+      client_conn = False
+  except Exception as e:
+    client_conn = False
+  return client_conn
+
+def __init_client(fwroot, user, host, identity_file, quiet):
+  '''
+  Initializes and configures the software required to run the suite on the 
+  client machine.
+  '''
+  if not quiet:
+    print("INSTALL: Installing client software")
+  with open (os.path.join(fwroot, "toolset", "setup", "linux", "client.sh"), "r") as myfile:
+    remote_script=myfile.read()
+    if quiet:
+      p = subprocess.Popen(__ssh_string(user, host, identity_file), 
+        stdin=subprocess.PIPE, stdout=DEVNULL, stderr=DEVNULL)
+    else:
+      p = subprocess.Popen(__ssh_string(user, host, identity_file), 
+        stdin=subprocess.PIPE)
+    p.communicate(remote_script)
+    return p.returncode == 0
+
+def __init_database(fwroot, user, host, identity_file, quiet):
+  '''
+  Initializes and configures the software required to run the suite on the
+  database machine.
+  '''
+  if not quiet:
+    print("INSTALL: Installing database software")
+  with open(os.path.join(fwroot, "toolset", "setup", "linux", "database.sh"), "r") as myfile:
+    remote_script=myfile.read()
+    if quiet:
+      p = subprocess.Popen(__ssh_string(user, host, identity_file), 
+        stdin=subprocess.PIPE, stdout=DEVNULL, stderr=DEVNULL)
+    else:
+      p = subprocess.Popen(__ssh_string(user, host, identity_file), 
+        stdin=subprocess.PIPE)
+    p.communicate(remote_script)
+    return p.returncode == 0
+
+def __build_database_docker_images(fwroot, user, host, identity_file, quiet):
+  '''
+  Transfers all the files required by each database to the database machine and
+  builds the docker image for each on the database machine.
+  '''
+  if not quiet:
+    print("INSTALL: Building database docker images")
+
+  returncode = 0
+  databases_path = os.path.join(fwroot, "toolset", "setup", "linux", "docker", "databases")
+  for database in os.listdir(databases_path):
+    dbpath = os.path.join(databases_path, database)
+    dbfiles = ""
+    for dbfile in os.listdir(dbpath):
+      dbfiles += "%s " % os.path.join(dbpath,dbfile)
+    p = subprocess.Popen(__scp_string(user, host, identity_file, dbfiles.split()),
+      stdin=subprocess.PIPE)
+    p.communicate()
+    returncode += p.returncode
+
+    if p.returncode == 0:
+      p = subprocess.Popen(__ssh_string(user, host, identity_file),
+        stdin=subprocess.PIPE)
+      p.communicate("docker build -f ~/%s.dockerfile -t %s ~/" % (database, database))
+      returncode += p.returncode
+
+  return returncode == 0

+ 18 - 11
toolset/run-tests.py

@@ -13,6 +13,7 @@ from benchmark.benchmarker import Benchmarker
 from setup.linux.unbuffered import Unbuffered
 from setup.linux.unbuffered import Unbuffered
 from setup.linux import setup_util
 from setup.linux import setup_util
 from scaffolding import Scaffolding
 from scaffolding import Scaffolding
+from initializer import initialize
 from ast import literal_eval
 from ast import literal_eval
 
 
 # Enable cross-platform colored output
 # Enable cross-platform colored output
@@ -72,8 +73,6 @@ def main(argv=None):
     # App server cpu count
     # App server cpu count
     os.environ['CPU_COUNT'] = str(multiprocessing.cpu_count())
     os.environ['CPU_COUNT'] = str(multiprocessing.cpu_count())
 
 
-    print("FWROOT is {!s}.".format(os.environ['FWROOT']))
-
     conf_parser = argparse.ArgumentParser(
     conf_parser = argparse.ArgumentParser(
         description=__doc__,
         description=__doc__,
         formatter_class=argparse.RawDescriptionHelpFormatter,
         formatter_class=argparse.RawDescriptionHelpFormatter,
@@ -138,9 +137,20 @@ def main(argv=None):
         ''')
         ''')
 
 
     # Install options
     # Install options
+    parser.add_argument('--init', action='store_true', default=False, help='Initializes the benchmark environment')
+
+    # Suite options
     parser.add_argument('--clean', action='store_true', default=False, help='Removes the results directory')
     parser.add_argument('--clean', action='store_true', default=False, help='Removes the results directory')
-    parser.add_argument('--clean-all', action='store_true', dest='clean_all', default=False, help='Removes the results and installs directories')
     parser.add_argument('--new', action='store_true', default=False, help='Initialize a new framework test')
     parser.add_argument('--new', action='store_true', default=False, help='Initialize a new framework test')
+    parser.add_argument('-v', '--verbose', action='store_true', default=False, help='Causes the configuration to print before any other commands are executed.')
+    parser.add_argument('--quiet', action='store_true', default=False, help='Only print a limited set of messages to stdout, keep the bulk of messages in log files only')
+    parser.add_argument('--results-name', help='Gives a name to this set of results, formatted as a date', default='(unspecified, datetime = %Y-%m-%d %H:%M:%S)')
+    parser.add_argument('--results-environment', help='Describes the environment in which these results were gathered', default='(unspecified, hostname = %s)' % socket.gethostname())
+    parser.add_argument('--results-upload-uri', default=None, help='A URI where the in-progress results.json file will be POSTed periodically')
+    parser.add_argument('--parse', help='Parses the results of the given timestamp and merges that with the latest results')
+
+    # TODO: remove this; install dir goes away with docker
+    parser.add_argument('--clean-all', action='store_true', dest='clean_all', default=False, help='Removes the results and installs directories')
 
 
     # Test options
     # Test options
     parser.add_argument('--test', nargs='+', help='names of tests to run')
     parser.add_argument('--test', nargs='+', help='names of tests to run')
@@ -154,18 +164,15 @@ def main(argv=None):
     parser.add_argument('--duration', default=15, help='Time in seconds that each test should run for.')
     parser.add_argument('--duration', default=15, help='Time in seconds that each test should run for.')
     parser.add_argument('--sleep', type=int, default=60, help='the amount of time to sleep after starting each test to allow the server to start up.')
     parser.add_argument('--sleep', type=int, default=60, help='the amount of time to sleep after starting each test to allow the server to start up.')
 
 
-    # Misc Options
-    parser.add_argument('--results-name', help='Gives a name to this set of results, formatted as a date', default='(unspecified, datetime = %Y-%m-%d %H:%M:%S)')
-    parser.add_argument('--results-environment', help='Describes the environment in which these results were gathered', default='(unspecified, hostname = %s)' % socket.gethostname())
-    parser.add_argument('--results-upload-uri', default=None, help='A URI where the in-progress results.json file will be POSTed periodically')
-    parser.add_argument('--parse', help='Parses the results of the given timestamp and merges that with the latest results')
-    parser.add_argument('-v', '--verbose', action='store_true', default=False, help='Causes the configuration to print before any other commands are executed.')
-    parser.add_argument('--quiet', action='store_true', default=False, help='Only print a limited set of messages to stdout, keep the bulk of messages in log files only')
     parser.set_defaults(**defaults) # Must do this after add, or each option's default will override the configuration file default
     parser.set_defaults(**defaults) # Must do this after add, or each option's default will override the configuration file default
     args = parser.parse_args(remaining_argv)
     args = parser.parse_args(remaining_argv)
 
 
     if args.new:
     if args.new:
-        Scaffolding()
+        Scaffolding().scaffold()
+        return 0
+
+    if args.init:
+        initialize(args)
         return 0
         return 0
 
 
     benchmarker = Benchmarker(vars(args))
     benchmarker = Benchmarker(vars(args))

+ 1 - 1
toolset/scaffolding.py

@@ -8,7 +8,7 @@ from setup.linux.setup_util import replace_text
 from benchmark.utils import gather_frameworks, gather_langauges
 from benchmark.utils import gather_frameworks, gather_langauges
 
 
 class Scaffolding:
 class Scaffolding:
-  def __init__(self):
+  def scaffold(self):
     print("""
     print("""
 -------------------------------------------------------------------------------
 -------------------------------------------------------------------------------
     This wizard is intended to help build the scaffolding required for a new 
     This wizard is intended to help build the scaffolding required for a new 

+ 0 - 155
toolset/setup/linux/TFBReaper.c

@@ -1,155 +0,0 @@
-#define _DEFAULT_SOURCE
-
-#include <signal.h>
-#include <stdio.h>
-#include <stdlib.h>
-#include <unistd.h>
-#include <sys/wait.h>
-#include <sys/prctl.h>
-#include <string.h>
-
-typedef struct Node Node;
-
-/**
- * Simple linked-list struct.
- */
-struct Node
-{
-  char *str; 
-  Node *next; 
-};
-
-/**
- * References to the head and tail of the linked-list.
- */
-Node *head = NULL;
-Node *tail = NULL;
-
-/**
- * Reap will recursively find all processes with this process
- * as an ancestor, and kill them.
- */
-void reap(int signum)
-{
-  int pid = getpid();
-
-  FILE *fp;
-  char buf[256];
-
-  char command[256];
-  sprintf(command, "findChilds() { for child in $(ps --ppid $1 ho pid); do echo $child; findChilds $child; done } && findChilds %d", pid);
-
-  int count;
-
-  do
-  {
-    count = 0;
-    char *pids[256];
-    fp = popen(command, "r");
-    while(fgets(buf, sizeof(buf), fp) != 0)
-    {
-      Node *newNode = malloc(sizeof(Node));
-      newNode->str = malloc(strlen(buf)+1);
-      strcpy(newNode->str, buf);
-      newNode->next = NULL;
-
-      if(tail == NULL)
-      {
-        tail = newNode;
-        head = newNode;
-      }
-      else
-      {
-        if(head->next == NULL)
-        {
-          head->next = newNode;
-        }
-        tail->next = newNode;
-        tail = newNode;
-      }
-      count ++;
-    }
-
-    Node *curr = head;
-    while(curr != NULL)
-    {
-      kill(atoi(curr->str), SIGKILL);
-      waitpid(atoi(curr->str), NULL, 0);
-      curr = curr->next;
-    }
-  }
-  // This may seem magical, but that command from above always results in two
-  // additionally PIDs: one for `ps` and one for `sh`. Therefore, all of the
-  // lineage of this TFBReaper have been successfully killed once there are
-  // only two PIDs counted in the loop.
-  // This loop is necessary for edge cases where there is a master->slave 
-  // lineage and TFBReaper kills a slave first, which is observed and fixed
-  // by the master by spawning a NEW slave in the original's place, and then
-  // killing the master (thus orphaning the newly spawned slave, but that PID
-  // is not in our master list).
-  while(count > 2);
-
-  exit(0);
-}
-
-int main(int argc, char *argv[])
-{
-  // Interrupt SIGTERM and SIGINT and pass to our handler.
-  struct sigaction action;
-  memset(&action, 0, sizeof(action));
-  action.sa_handler = reap;
-  sigaction(SIGTERM, &action, NULL);
-  sigaction(SIGINT, &action, NULL);
-
-  // Gather the command line arguments for the pass-through.
-  int count = argc - 1;
-  int *sizes = malloc(sizeof(int) * count);
-  int total_size = 0;
-  for( int i = 1; i < argc; i++ ) {
-    sizes[i - 1] = strlen(argv[i]);
-    total_size += sizes[i - 1];
-  }
-  char *result = malloc(sizeof(char) * total_size + count);
-  char *ptr = result;
-  for( int i = 1; i < argc; i++ ) {
-    memcpy(ptr, argv[i], sizes[i - 1]);
-    ptr[sizes[i - 1]] = ' ';
-    ptr += sizes[i - 1] + 1;
-  }
-  *ptr = '\0';
-  free(sizes);
-
-  // Here is the magic. This sets any child processes to
-  // use THIS process as a 'subreaper'. What that means is
-  // even if the process uses the fork-exit technicque for
-  // running a daemon (which normally orphans the process
-  // and causes init(1) to adopt it, which is problematic
-  // for TFB because we cannot then generally kill the
-  // process since it has lost all context available to us)
-  // the child process will have the parent id of THIS
-  // process, allowing us to kill all the processes started
-  // by the suite in this way generally.
-  //
-  // See: http://man7.org/linux/man-pages/man2/prctl.2.html
-  prctl(PR_SET_CHILD_SUBREAPER,1);
-
-  // This invokes whatever was passed as arguments to TFBReaper
-  // on the system. This program is merely a pass-through to
-  // a shell with the subreaper stuff enabled.
-  int ret = system(result);
-
-  // We need to wait forever; the suite will clean this 
-  // process up later.
-  if (ret == 0) {
-    for(;;) { 
-      // Pause to keep us from spiking CPU; whenever a signal
-      // occurs (except SIGTERM etc which will kill this process)
-      // just iterate and pause again.
-      pause(); 
-    }
-  }
-
-  // If the scripts failed, we should return that code.
-  return ret;
-}
-

+ 3 - 3
toolset/setup/linux/client.sh

@@ -1,8 +1,5 @@
 #!/bin/bash
 #!/bin/bash
 
 
-export DB_HOST={database_host}
-
-set -x
 export DEBIAN_FRONTEND=noninteractive
 export DEBIAN_FRONTEND=noninteractive
 
 
 ##############################
 ##############################
@@ -15,6 +12,7 @@ sudo apt-get -y update
 sudo apt-get -y install -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confold" \
 sudo apt-get -y install -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confold" \
     build-essential git libev-dev libpq-dev libreadline6-dev
     build-essential git libev-dev libpq-dev libreadline6-dev
 
 
+# TODO: this is likely going away with docker implementation (docker provides a ulimit cli arg)
 sudo sh -c "echo '*               -    nofile          65535' >> /etc/security/limits.conf"
 sudo sh -c "echo '*               -    nofile          65535' >> /etc/security/limits.conf"
 
 
 ##############################
 ##############################
@@ -47,3 +45,5 @@ request = function()
   return req
   return req
 end
 end
 EOF
 EOF
+
+echo "Successfully installed software on client machine!"

+ 36 - 0
toolset/setup/linux/database.sh

@@ -0,0 +1,36 @@
+#!/bin/bash
+
+export DEBIAN_FRONTEND=noninteractive
+
+#############################
+# Prerequisites
+#############################
+sudo apt-get -y update
+
+# WARNING: DON'T PUT A SPACE AFTER ANY BACKSLASH OR APT WILL BREAK
+# Dpkg::Options avoid hangs on Travis-CI, doesn't affect clean systems
+sudo apt-get -y install -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confold" \
+  linux-image-extra-$(uname -r) `# Needed for Docker on Ubuntu 14` \
+  linux-image-extra-virtual     `# Needed for Docker on Ubuntu 14` \
+  apt-transport-https           `# Needed for Docker on Ubuntu 14` \
+  ca-certificates               `# Needed for Docker on Ubuntu 14` \
+  curl                          `# Needed for Docker on Ubuntu 14` \
+  software-properties-common    `# Needed for Docker on Ubuntu 14`
+
+# TODO: this is likely going away with docker implementation (docker provides a ulimit cli arg)
+sudo sh -c "echo '*               -    nofile          65535' >> /etc/security/limits.conf"
+
+#
+# Install Docker
+#
+curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -
+sudo add-apt-repository \
+   "deb [arch=amd64] https://download.docker.com/linux/ubuntu \
+   $(lsb_release -cs) \
+   stable"
+sudo apt-get update
+sudo apt-get -qqy install -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confold"  docker-ce
+if ! sudo grep -q -E "^docker:" /etc/group; then
+  sudo groupadd docker
+  sudo usermod -aG docker $USER
+fi

+ 2 - 0
toolset/setup/linux/databases/mysql/create.sql

@@ -61,3 +61,5 @@ INSERT INTO fortune (message) VALUES ('Feature: A bug with seniority.');
 INSERT INTO fortune (message) VALUES ('Computers make very fast, very accurate mistakes.');
 INSERT INTO fortune (message) VALUES ('Computers make very fast, very accurate mistakes.');
 INSERT INTO fortune (message) VALUES ('<script>alert("This should not be displayed in a browser alert box.");</script>');
 INSERT INTO fortune (message) VALUES ('<script>alert("This should not be displayed in a browser alert box.");</script>');
 INSERT INTO fortune (message) VALUES ('フレームワークのベンチマーク');
 INSERT INTO fortune (message) VALUES ('フレームワークのベンチマーク');
+
+FLUSH PRIVILEGES;

+ 12 - 0
toolset/setup/linux/docker/TFBReaper/Cargo.toml

@@ -0,0 +1,12 @@
+[package]
+
+name = "tfb_reaper"
+version = "0.0.1"
+authors = [ "Mike Smith <[email protected]>" ]
+
+[dependencies]
+
+prctl = "1.0.0"
+nix = "0.8.1"
+libc = "0.2"
+procinfo = "0.4.2"

+ 118 - 0
toolset/setup/linux/docker/TFBReaper/src/main.rs

@@ -0,0 +1,118 @@
+extern crate prctl;
+extern crate nix;
+extern crate libc;
+extern crate procinfo;
+
+use std::process;
+use std::process::Command;
+use std::env;
+use std::fs;
+
+use nix::sys::signal;
+
+use libc::pause;
+
+/**
+ * Recurssively finds all descendant PIDs of the given PID.
+ */
+fn find_descendants(pid: i32) -> Vec<i32> {
+  let mut pids: Vec<i32> = vec![];
+
+  for path in fs::read_dir("/proc").unwrap() {
+    let filename = path.unwrap().file_name().into_string().ok().unwrap();
+    let apid = match filename.parse::<i32>() {
+      Ok(apid) => apid,
+      Err(_) => -1
+    };
+    
+    if apid > 0 {
+      let stat = procinfo::pid::stat(apid).unwrap();
+      if stat.ppid == pid {
+
+        pids.push(stat.ppid);
+        pids.append(&mut find_descendants(stat.pid));
+      }
+    }
+  }
+
+  return pids;
+}
+
+/**
+ * Reap will kill all descendants of this process.
+ */
+extern fn reap(_:i32) {
+  unsafe {
+    let mut done = false;
+    while !done {
+      let pids: Vec<i32> = find_descendants(libc::getpid());
+
+      if pids.len() == 0 {
+        done = true;
+      }
+
+      for pid in pids {
+        libc::kill(pid, libc::SIGKILL);
+      }
+    }
+  }
+
+  process::exit(0);
+}
+
+/**
+ * main
+ */
+fn main() {
+  // Interrupt SIGTERM and SIGINT and pass to our handler.
+  let sig_action = signal::SigAction::new(
+    signal::SigHandler::Handler(reap),
+    signal::SaFlags::empty(),
+    signal::SigSet::empty());
+  unsafe {
+    signal::sigaction(signal::SIGINT, &sig_action).unwrap();
+    signal::sigaction(signal::SIGTERM, &sig_action).unwrap();
+  }
+
+  // Here is the magic. This sets any child processes to
+  // use THIS process as a 'subreaper'. What that means is
+  // even if the process uses the fork-exit technicque for
+  // running a daemon (which normally orphans the process
+  // and causes init(1) to adopt it, which is problematic
+  // for TFB because we cannot then generally kill the
+  // process since it has lost all context available to us)
+  // the child process will have the parent id of THIS
+  // process, allowing us to kill all the processes started
+  // by the suite in this way generally.
+  //
+  // See: http://man7.org/linux/man-pages/man2/prctl.2.html
+  prctl::set_child_subreaper(true).unwrap();
+
+  // Gather the command line arguments for the pass-through.
+  let args: Vec<_> = env::args().collect();
+  if args.len() > 1 {
+    // This invokes whatever was passed as arguments to TFBReaper
+    // on the system. This program is merely a pass-through to
+    // a shell with the subreaper stuff enabled.
+    let status = Command::new(&args[1])
+            .args(&args[2..])
+            .status()
+            .expect("Failed to execute");
+
+    // We need to wait forever; the suite will clean this 
+    // process up later.
+    if status.success() {
+      loop {
+        unsafe {
+          // Pause to keep us from spiking CPU; whenever a signal
+          // occurs (except SIGTERM etc which will kill this process)
+          // just iterate and pause again.
+          pause();
+        }
+      }
+    }
+
+    // If the scripts failed, we should return that code.
+    process::exit(status.code().unwrap());
+  }
+}

+ 63 - 0
toolset/setup/linux/docker/databases/mysql/create.sql

@@ -0,0 +1,63 @@
+# To maintain consistency across servers and fix a problem with the jdbc per
+# http://stackoverflow.com/questions/37719818/the-server-time-zone-value-aest-is-unrecognized-or-represents-more-than-one-ti
+SET GLOBAL time_zone = '+00:00';
+
+# modified from SO answer http://stackoverflow.com/questions/5125096/for-loop-in-mysql
+DROP DATABASE IF EXISTS hello_world;
+CREATE DATABASE hello_world;
+USE hello_world;
+
+DROP TABLE IF EXISTS world;
+CREATE TABLE  world (
+  id int(10) unsigned NOT NULL auto_increment,
+  randomNumber int NOT NULL default 0,
+  PRIMARY KEY  (id)
+)
+ENGINE=INNODB;
+GRANT SELECT, UPDATE ON hello_world.world TO 'benchmarkdbuser'@'%' IDENTIFIED BY 'benchmarkdbpass';
+GRANT SELECT, UPDATE ON hello_world.world TO 'benchmarkdbuser'@'localhost' IDENTIFIED BY 'benchmarkdbpass';
+
+DROP PROCEDURE IF EXISTS load_data;
+
+DELIMITER #
+CREATE PROCEDURE load_data()
+BEGIN
+
+declare v_max int unsigned default 10000;
+declare v_counter int unsigned default 0;
+
+  TRUNCATE TABLE world;
+  START TRANSACTION;
+  while v_counter < v_max do
+    INSERT INTO world (randomNumber) VALUES ( floor(0 + (rand() * 10000)) );
+    SET v_counter=v_counter+1;
+  end while;
+  commit;
+END #
+
+DELIMITER ;
+
+CALL load_data();
+
+DROP TABLE IF EXISTS fortune;
+CREATE TABLE  fortune (
+  id int(10) unsigned NOT NULL auto_increment,
+  message varchar(2048) CHARACTER SET 'utf8' NOT NULL,
+  PRIMARY KEY  (id)
+)
+ENGINE=INNODB;
+GRANT SELECT ON hello_world.fortune TO 'benchmarkdbuser'@'%' IDENTIFIED BY 'benchmarkdbpass';
+GRANT SELECT ON hello_world.fortune TO 'benchmarkdbuser'@'localhost' IDENTIFIED BY 'benchmarkdbpass';
+
+INSERT INTO fortune (message) VALUES ('fortune: No such file or directory');
+INSERT INTO fortune (message) VALUES ('A computer scientist is someone who fixes things that aren''t broken.');
+INSERT INTO fortune (message) VALUES ('After enough decimal places, nobody gives a damn.');
+INSERT INTO fortune (message) VALUES ('A bad random number generator: 1, 1, 1, 1, 1, 4.33e+67, 1, 1, 1');
+INSERT INTO fortune (message) VALUES ('A computer program does what you tell it to do, not what you want it to do.');
+INSERT INTO fortune (message) VALUES ('Emacs is a nice operating system, but I prefer UNIX. — Tom Christaensen');
+INSERT INTO fortune (message) VALUES ('Any program that runs right is obsolete.');
+INSERT INTO fortune (message) VALUES ('A list is only as strong as its weakest link. — Donald Knuth');
+INSERT INTO fortune (message) VALUES ('Feature: A bug with seniority.');
+INSERT INTO fortune (message) VALUES ('Computers make very fast, very accurate mistakes.');
+INSERT INTO fortune (message) VALUES ('<script>alert("This should not be displayed in a browser alert box.");</script>');
+INSERT INTO fortune (message) VALUES ('フレームワークのベンチマーク');

+ 76 - 0
toolset/setup/linux/docker/databases/mysql/my.cnf

@@ -0,0 +1,76 @@
+#######################
+# client              #
+#######################
+
+[client]
+port            = 3306
+socket          = /var/run/mysqld/mysqld.sock
+
+#######################
+# mysqld              #
+#######################
+
+[mysqld]
+#
+# * Basic Settings
+#
+default-storage-engine = innodb
+
+user            = mysql
+pid-file        = /var/run/mysqld/mysqld.pid
+socket          = /var/run/mysqld/mysqld.sock
+port            = 3306
+skip-external-locking
+skip-name-resolve
+lower_case_table_names = 1
+
+character-set-server=utf8
+collation-server=utf8_general_ci
+
+#
+# * Fine Tuning
+#
+
+key_buffer_size         = 16M
+max_allowed_packet      = 16M
+thread_stack            = 256K
+thread_cache_size       = 128
+max_connections         = 5000
+back_log                = 5000
+table_open_cache        = 800
+table_definition_cache  = 800
+max_heap_table_size     = 128M
+tmp_table_size          = 128M
+
+#
+# innodb settings
+#
+
+innodb_use_native_aio   = 1
+# sync for every sec. not for every commit.
+innodb_flush_log_at_trx_commit = 2
+innodb_flush_method=O_DIRECT
+innodb_buffer_pool_instances=14
+
+sync_binlog=0
+
+#
+# * Query Cache Configuration
+#
+query_cache_type         = 0
+#query_cache_limit        = 1M
+#query_cache_size         = 64M
+#query_cache_size = 0
+#query_cache_min_res_unit = 1K
+max_prepared_stmt_count  = 1048576
+
+#######################
+# mysqldump           #
+#######################
+
+[mysqldump]
+quick
+quote-names
+max_allowed_packet      = 16M
+
+!includedir /etc/mysql/conf.d/

+ 41 - 0
toolset/setup/linux/docker/databases/mysql/mysql.dockerfile

@@ -0,0 +1,41 @@
+FROM ubuntu:16.04
+
+RUN apt-get update
+RUN apt-get install -qqy locales
+
+RUN locale-gen en_US.UTF-8
+ENV LANG en_US.UTF-8  
+ENV LANGUAGE en_US:en  
+ENV LC_ALL en_US.UTF-8 
+
+ADD create.sql create.sql
+ADD my.cnf my.cnf
+ADD mysql.list mysql.list
+
+RUN cp mysql.list /etc/apt/sources.list.d/
+RUN apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 8C718D3B5072E1F5
+RUN apt-get update
+RUN ["/bin/bash", "-c", "debconf-set-selections <<< \"mysql-community-server mysql-community-server/data-dir select 'Y'\""]
+RUN ["/bin/bash", "-c", "debconf-set-selections <<< \"mysql-community-server mysql-community-server/root-pass password secret\""]
+RUN ["/bin/bash", "-c", "debconf-set-selections <<< \"mysql-community-server mysql-community-server/re-root-pass password secret\""]
+RUN DEBIAN_FRONTEND=noninteractive apt-get -y install mysql-server
+
+RUN mv /etc/mysql/my.cnf /etc/mysql/my.cnf.orig
+RUN cp my.cnf /etc/mysql/my.cnf
+
+RUN rm -rf /ssd/mysql
+RUN rm -rf /ssd/log/mysql
+RUN cp -R -p /var/lib/mysql /ssd/
+RUN cp -R -p /var/log/mysql /ssd/log
+
+# It may seem weird that we call `service mysql start` several times, but the RUN
+# directive is a 1-time operation for building this image. Subsequent RUN calls
+# do not see running processes from prior RUN calls; therefor, each command here
+# that relies on the mysql server running will explicitly start the server and
+# perform the work required.
+RUN service mysql start && mysqladmin -uroot -psecret flush-hosts
+RUN service mysql start && mysql -uroot -psecret < create.sql
+
+EXPOSE 3306
+
+CMD ["mysqld"]

+ 7 - 0
toolset/setup/linux/docker/databases/mysql/mysql.list

@@ -0,0 +1,7 @@
+# You may comment out entries below, but any other modifications may be lost.
+# Use command 'dpkg-reconfigure mysql-apt-config' as root for modifications.
+deb http://repo.mysql.com/apt/ubuntu/ trusty mysql-apt-config
+deb http://repo.mysql.com/apt/ubuntu/ trusty mysql-5.7
+deb http://repo.mysql.com/apt/ubuntu/ trusty mysql-tools
+#deb http://repo.mysql.com/apt/ubuntu/ trusty mysql-tools-preview
+deb-src http://repo.mysql.com/apt/ubuntu/ trusty mysql-5.7

+ 12 - 0
toolset/setup/linux/docker/java.dockerfile

@@ -0,0 +1,12 @@
+FROM tfb:latest
+
+RUN add-apt-repository -y ppa:openjdk-r/ppa
+RUN apt-get update
+RUN apt-get install -qqy -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confold" \
+    openjdk-8-jdk
+
+# https://bugs.launchpad.net/ubuntu/+source/ca-certificates-java/+bug/1396760
+RUN /var/lib/dpkg/info/ca-certificates-java.postinst configure
+
+ENV JAVA_HOME=/usr/lib/jvm/java-8-openjdk-amd64
+ENV PATH="/usr/lib/jvm/java-8-openjdk-amd64/bin:${PATH}"

+ 7 - 0
toolset/setup/linux/docker/resin.dockerfile

@@ -0,0 +1,7 @@
+FROM java:latest
+
+ENV RESIN_HOME=/resin-4.0.55
+
+RUN curl -sLO http://www.caucho.com/download/resin-4.0.55.tar.gz
+RUN tar xf resin-4.0.55.tar.gz
+RUN cd resin-4.0.55; ./configure; make; make install

+ 15 - 0
toolset/setup/linux/docker/tfb.dockerfile

@@ -0,0 +1,15 @@
+FROM ubuntu:16.04
+
+RUN apt-get update
+RUN apt-get install -qqy software-properties-common build-essential curl locales
+
+RUN locale-gen en_US.UTF-8
+ENV LANG en_US.UTF-8  
+ENV LANGUAGE en_US:en  
+ENV LC_ALL en_US.UTF-8 
+
+ADD TFBReaper TFBReaper
+
+RUN mv TFBReaper/target/debug/tfb_reaper /
+
+ENTRYPOINT ["/tfb_reaper"]

+ 54 - 20
toolset/setup/linux/prerequisites.sh

@@ -9,32 +9,66 @@ sudo apt-get -yq update
 
 
 # WARNING: DONT PUT A SPACE AFTER ANY BACKSLASH OR APT WILL BREAK
 # WARNING: DONT PUT A SPACE AFTER ANY BACKSLASH OR APT WILL BREAK
 sudo apt-get -qqy install -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confold" \
 sudo apt-get -qqy install -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confold" \
-  cmake build-essential automake    `# Needed for building code` \
-  curl wget unzip                   `# Common tools` \
-  software-properties-common        `# Needed for add-apt-repository` \
-  git-core mercurial                `# Version control systems` \
-  libpcre3 libpcre3-dev libpcrecpp0 `# Regular expression support` \
-  libssl-dev libcurl4-openssl-dev   `# SSL libraries` \
-  libmysqlclient-dev \
-  zlib1g-dev python-software-properties \
-  libreadline6-dev \
-  libbz2-dev \
-  libyaml-dev libxml2-dev \
-  libxslt-dev libgdbm-dev ncurses-dev  \
-  libffi-dev htop libtool bison libevent-dev \
-  libgstreamer-plugins-base0.10-0 libgstreamer0.10-0 \
-  liborc-0.4-0 libwxbase2.8-0 libwxgtk2.8-0 libgnutls-dev \
-  libjson0-dev libmcrypt-dev libicu-dev gettext \
-  libpq-dev mlton \
-  cloc dstat                        `# Collect resource usage statistics` \
+  git-core \
+  cloc dstat                    `# Collect resource usage statistics` \
   python-dev \
   python-dev \
-  python-pip re2c libnuma-dev
+  python-pip \
+  python-software-properties \
+  libmysqlclient-dev            `# Needed for MySQL-python` \
+  libpq-dev                     `# Needed for psycopg2` \
+  linux-image-extra-$(uname -r) `# Needed for Docker on Ubuntu 14` \
+  linux-image-extra-virtual     `# Needed for Docker on Ubuntu 14` \
+  apt-transport-https           `# Needed for Docker on Ubuntu 14` \
+  ca-certificates               `# Needed for Docker on Ubuntu 14` \
+  curl                          `# Needed for Docker on Ubuntu 14` \
+  software-properties-common    `# Needed for Docker on Ubuntu 14`
+  # cmake build-essential automake    `# Needed for building code` \
+  # wget unzip                   `# Common tools` \
+  # mercurial                `# Version control systems` \
+  # libpcre3 libpcre3-dev libpcrecpp0 `# Regular expression support` \
+  # libssl-dev libcurl4-openssl-dev   `# SSL libraries` \
+  # zlib1g-dev \
+  # libreadline6-dev \
+  # libbz2-dev \
+  # libyaml-dev libxml2-dev \
+  # libxslt-dev libgdbm-dev ncurses-dev  \
+  # libffi-dev htop libtool bison libevent-dev \
+  # libgstreamer-plugins-base0.10-0 libgstreamer0.10-0 \
+  # liborc-0.4-0 libwxbase2.8-0 libwxgtk2.8-0 libgnutls-dev \
+  # libjson0-dev libmcrypt-dev libicu-dev gettext \
+  #  mlton \
+  # re2c libnuma-dev
 
 
 sudo pip install colorama==0.3.1
 sudo pip install colorama==0.3.1
 # Version 2.3 has a nice Counter() and other features
 # Version 2.3 has a nice Counter() and other features
 # but it requires --allow-external and --allow-unverified
 # but it requires --allow-external and --allow-unverified
 sudo pip install progressbar==2.2 requests MySQL-python psycopg2 pymongo
 sudo pip install progressbar==2.2 requests MySQL-python psycopg2 pymongo
 
 
+#
+# Install Rust
+#
+curl -sL https://sh.rustup.rs -o rustup.sh
+chmod 777 rustup.sh
+./rustup.sh -y
+rm ./rustup.sh
+source ~/.profile
+cd toolset/setup/linux/docker/TFBReaper
+cargo build
+cd ../../../../..
+
+#
+# Install Docker
+#
+curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -
+sudo add-apt-repository \
+   "deb [arch=amd64] https://download.docker.com/linux/ubuntu \
+   $(lsb_release -cs) \
+   stable"
+sudo apt-get update
+sudo apt-get -qqy install -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confold"  docker-ce
+sudo groupadd docker
+sudo usermod -aG docker $USER
+
 # Get the ulimit from the benchmark config
 # Get the ulimit from the benchmark config
 if [ -f benchmark.cfg ]; then
 if [ -f benchmark.cfg ]; then
   FILE=benchmark.cfg
   FILE=benchmark.cfg
@@ -58,4 +92,4 @@ tfb() {
   $(pwd)/toolset/run-tests.py "\$@"
   $(pwd)/toolset/run-tests.py "\$@"
 }
 }
 EOF
 EOF
-source /etc/profile.d/tfb.sh
+source /etc/profile.d/tfb.sh

+ 2 - 0
toolset/travis/travis_setup.sh

@@ -23,3 +23,5 @@ echo 127.0.0.1 TFB-client   | sudo tee --append /etc/hosts
 echo 127.0.0.1 TFB-server   | sudo tee --append /etc/hosts
 echo 127.0.0.1 TFB-server   | sudo tee --append /etc/hosts
 
 
 source ./toolset/setup/linux/prerequisites.sh
 source ./toolset/setup/linux/prerequisites.sh
+
+tfb --init --quiet