Browse Source

Broke everything (#3350)

* Broke everything

* Fixin' bugs and removing annoying comments

* Fixed the broken stuff

This is COMPELTELY backwards. Instead of gathering the
metadata and only creating framework_test objects based
on said metadata, we gather ALL the metadata for ALL
test implementations and then create a framework_test
for each, but parsing 'fails' for tests not specified.

I returned it to this craziness because it was less
friction, but I want to come back and fix this properly
some day.

* Linter

* Fixed formatting across python files
* Fixed namespacing
* Fixed executable PYTHONPATH

* Benchmarker.py now benchmarks

* Better filename convention

Or at least it's more consistent

* docker-helper now in charge of docker

* Capitals... why!?

* More linting

* More linting

* More linting

* Restructuring everything

With this I deleted the windows and sqlserver folders.
Docker should get us closer to running on windows, and
all the things in these folders were artifacts of the
past (read: gibberish). It's in git if we ever need it.

* Merge conflicts

* Fixed a bug

* Hosts are only needed in the docker containers

* print

* Fixed the update test verification

* Fixes a bug with ctrl+c
Mike Smith 7 years ago
parent
commit
b8ce59f271
85 changed files with 3293 additions and 3639 deletions
  1. 3 3
      benchmark.cfg.example
  2. 0 5
      deployment/vagrant/bootstrap.sh
  3. 397 497
      toolset/benchmark/benchmarker.py
  4. 151 131
      toolset/benchmark/fortune_html_parser.py
  5. 176 1002
      toolset/benchmark/framework_test.py
  6. 7 16
      toolset/benchmark/test_types/cached_query_type.py
  7. 16 13
      toolset/benchmark/test_types/db_type.py
  8. 18 19
      toolset/benchmark/test_types/fortune_type.py
  9. 35 22
      toolset/benchmark/test_types/framework_test_type.py
  10. 7 12
      toolset/benchmark/test_types/json_type.py
  11. 9 9
      toolset/benchmark/test_types/plaintext_type.py
  12. 7 18
      toolset/benchmark/test_types/query_type.py
  13. 9 13
      toolset/benchmark/test_types/update_type.py
  14. 90 89
      toolset/benchmark/test_types/verifications.py
  15. 0 241
      toolset/benchmark/utils.py
  16. 10 9
      toolset/continuous/tasks/keep-logs.py
  17. 1 1
      toolset/continuous/tasks/run-benchmarks.sh
  18. 1 1
      toolset/continuous/tasks/run-tasks.sh
  19. 0 105
      toolset/initializer.py
  20. 175 59
      toolset/run-tests.py
  21. 0 348
      toolset/scaffolding.py
  22. 0 1
      toolset/setup/__init__.py
  23. 2 0
      toolset/setup/docker/base.dockerfile
  24. 0 0
      toolset/setup/docker/databases/mongodb/create.js
  25. 0 0
      toolset/setup/docker/databases/mongodb/mongodb.dockerfile
  26. 0 0
      toolset/setup/docker/databases/mysql/create.sql
  27. 0 0
      toolset/setup/docker/databases/mysql/my.cnf
  28. 0 0
      toolset/setup/docker/databases/mysql/mysql.dockerfile
  29. 0 0
      toolset/setup/docker/databases/mysql/mysql.list
  30. 0 0
      toolset/setup/docker/databases/postgres/60-postgresql-shm.conf
  31. 0 0
      toolset/setup/docker/databases/postgres/create-postgres-database.sql
  32. 0 0
      toolset/setup/docker/databases/postgres/create-postgres.sql
  33. 0 0
      toolset/setup/docker/databases/postgres/pg_hba.conf
  34. 0 0
      toolset/setup/docker/databases/postgres/postgres.dockerfile
  35. 0 0
      toolset/setup/docker/databases/postgres/postgresql.conf
  36. 0 0
      toolset/setup/docker/languages/d-lang.dockerfile
  37. 0 0
      toolset/setup/docker/languages/dart-lang.dockerfile
  38. 0 0
      toolset/setup/docker/languages/elixir.dockerfile
  39. 0 0
      toolset/setup/docker/languages/erlang.dockerfile
  40. 0 0
      toolset/setup/docker/languages/haskell.dockerfile
  41. 0 0
      toolset/setup/docker/languages/java.dockerfile
  42. 0 0
      toolset/setup/docker/languages/java8.dockerfile
  43. 0 0
      toolset/setup/docker/languages/lua.dockerfile
  44. 0 0
      toolset/setup/docker/languages/nim.dockerfile
  45. 0 0
      toolset/setup/docker/languages/ruby-2.4.dockerfile
  46. 0 0
      toolset/setup/docker/languages/rust.dockerfile
  47. 0 0
      toolset/setup/docker/systools/ant.dockerfile
  48. 0 0
      toolset/setup/docker/systools/gcc-6.dockerfile
  49. 0 0
      toolset/setup/docker/systools/luarocks.dockerfile
  50. 0 0
      toolset/setup/docker/systools/maven-java8.dockerfile
  51. 0 0
      toolset/setup/docker/systools/maven-settings.xml
  52. 0 0
      toolset/setup/docker/systools/maven.dockerfile
  53. 0 0
      toolset/setup/docker/systools/nimble.dockerfile
  54. 0 0
      toolset/setup/docker/systools/sbt-java8.dockerfile
  55. 0 0
      toolset/setup/docker/systools/sbt.dockerfile
  56. 0 0
      toolset/setup/docker/webservers/nginx.dockerfile
  57. 0 0
      toolset/setup/docker/webservers/nodejs8.dockerfile
  58. 0 0
      toolset/setup/docker/webservers/openresty-server.dockerfile
  59. 0 0
      toolset/setup/docker/webservers/resin-java8.dockerfile
  60. 0 0
      toolset/setup/docker/webservers/resin.dockerfile
  61. 0 1
      toolset/setup/linux/__init__.py
  62. 1 0
      toolset/setup/linux/database.sh
  63. 3 21
      toolset/setup/linux/prerequisites.sh
  64. 0 13
      toolset/setup/linux/unbuffered.py
  65. 0 25
      toolset/setup/sqlserver/setup-sqlserver-bootstrap.ps1
  66. 0 91
      toolset/setup/sqlserver/setup-sqlserver.ps1
  67. 0 28
      toolset/setup/windows/databases/sqlserver/create-sqlserver-login-and-database.sql
  68. 0 55
      toolset/setup/windows/databases/sqlserver/create-sqlserver.sql
  69. 0 42
      toolset/setup/windows/installer-bootstrap.ps1
  70. 0 352
      toolset/setup/windows/installer.ps1
  71. 0 340
      toolset/setup/windows/installer.ps1~
  72. 35 13
      toolset/travis/travis_diff.py
  73. 0 4
      toolset/travis/travis_setup.sh
  74. 0 0
      toolset/utils/__init__.py
  75. 86 0
      toolset/utils/benchmark_config.py
  76. 15 0
      toolset/utils/cleaner.py
  77. 201 0
      toolset/utils/docker_helper.py
  78. 129 0
      toolset/utils/initializer.py
  79. 418 0
      toolset/utils/metadata_helper.py
  80. 65 0
      toolset/utils/output_helper.py
  81. 211 0
      toolset/utils/remote_script_helper.py
  82. 596 0
      toolset/utils/results_helper.py
  83. 356 0
      toolset/utils/scaffolding.py
  84. 47 40
      toolset/utils/setup_util.py
  85. 16 0
      toolset/utils/unbuffered.py

+ 3 - 3
benchmark.cfg.example

@@ -1,11 +1,11 @@
 [Defaults]
 [Defaults]
 # Available Keys:
 # Available Keys:
 os=linux
 os=linux
-server_host=TFB-server
-client_host=TFB-client
+server_host=127.0.0.1
+client_host=127.0.0.1
 client_identity_file=/home/techempower/.ssh/id_rsa
 client_identity_file=/home/techempower/.ssh/id_rsa
 client_user=techempower
 client_user=techempower
-database_host=TFB-database
+database_host=127.0.0.1
 database_identity_file=/home/techempower/.ssh/id_rsa
 database_identity_file=/home/techempower/.ssh/id_rsa
 database_os=linux
 database_os=linux
 database_user=techempower
 database_user=techempower

+ 0 - 5
deployment/vagrant/bootstrap.sh

@@ -22,11 +22,6 @@ if [ ! -e "~/.firstboot" ]; then
   chmod 600 ~/.ssh/config
   chmod 600 ~/.ssh/config
   echo "vagrant ALL=(ALL:ALL) NOPASSWD: ALL" | sudo tee -a /etc/sudoers
   echo "vagrant ALL=(ALL:ALL) NOPASSWD: ALL" | sudo tee -a /etc/sudoers
 
 
-  # Setting up hosts aliases
-  echo 127.0.0.1 TFB-database | sudo tee --append /etc/hosts
-  echo 127.0.0.1 TFB-client   | sudo tee --append /etc/hosts
-  echo 127.0.0.1 TFB-server   | sudo tee --append /etc/hosts
-
   # Setting up new FWROOT
   # Setting up new FWROOT
   export FWROOT="/home/vagrant/FrameworkBenchmarks"
   export FWROOT="/home/vagrant/FrameworkBenchmarks"
   echo `export FWROOT="/home/vagrant/FrameworkBenchmarks"` >> ~/.bashrc
   echo `export FWROOT="/home/vagrant/FrameworkBenchmarks"` >> ~/.bashrc

File diff suppressed because it is too large
+ 397 - 497
toolset/benchmark/benchmarker.py


+ 151 - 131
toolset/benchmark/fortune_html_parser.py

@@ -1,12 +1,12 @@
 # -*- coding: utf-8
 # -*- coding: utf-8
-import re
 from HTMLParser import HTMLParser
 from HTMLParser import HTMLParser
 from difflib import unified_diff
 from difflib import unified_diff
 
 
+
 class FortuneHTMLParser(HTMLParser):
 class FortuneHTMLParser(HTMLParser):
-  body = []
+    body = []
 
 
-  valid_fortune = '''<!doctype html><html>
+    valid_fortune = '''<!doctype html><html>
 <head><title>Fortunes</title></head>
 <head><title>Fortunes</title></head>
 <body><table>
 <body><table>
 <tr><th>id</th><th>message</th></tr>
 <tr><th>id</th><th>message</th></tr>
@@ -25,140 +25,160 @@ class FortuneHTMLParser(HTMLParser):
 <tr><td>12</td><td>フレームワークのベンチマーク</td></tr>
 <tr><td>12</td><td>フレームワークのベンチマーク</td></tr>
 </table></body></html>'''
 </table></body></html>'''
 
 
-  # Is called when a doctype or other such tag is read in.
-  # For our purposes, we assume this is only going to be
-  # "DOCTYPE html", so we will surround it with "<!" and ">".
-  def handle_decl(self, decl):
-    # The spec says that for HTML this is case insensitive,
-    # and since we did not specify xml compliance (where
-    # incorrect casing would throw a syntax error), we must
-    # allow all casings. We will lower for our normalization.
-    self.body.append("<!{d}>".format(d=decl.lower()))
+    def handle_decl(self, decl):
+        '''
+        Is called when a doctype or other such tag is read in.
+        For our purposes, we assume this is only going to be
+        "DOCTYPE html", so we will surround it with "<!" and ">".
+        '''
+        # The spec says that for HTML this is case insensitive,
+        # and since we did not specify xml compliance (where
+        # incorrect casing would throw a syntax error), we must
+        # allow all casings. We will lower for our normalization.
+        self.body.append("<!{d}>".format(d=decl.lower()))
 
 
-  # This is called when an HTML character is parsed (i.e.
-  # &quot;). There are a number of issues to be resolved
-  # here. For instance, some tests choose to leave the
-  # "+" character as-is, which should be fine as far as
-  # character escaping goes, but others choose to use the
-  # character reference of "&#43;", which is also fine.
-  # Therefore, this method looks for all possible character
-  # references and normalizes them so that we can
-  # validate the input against a single valid spec string.
-  # Another example problem: "&quot;" is valid, but so is
-  # "&#34;"
-  def handle_charref(self, name):
-    val = name.lower()
-    # "&#34;" is a valid escaping, but we are normalizing
-    # it so that our final parse can just be checked for
-    # equality.
-    if val == "34" or val == "034" or val == "x22":
-      # Append our normalized entity reference to our body.
-      self.body.append("&quot;")
-    # "&#39;" is a valid escaping of "-", but it is not
-    # required, so we normalize for equality checking.
-    if val == "39" or val == "039" or val == "x27":
-      self.body.append("&apos;")
-    # Again, "&#43;" is a valid escaping of the "+", but
-    # it is not required, so we need to normalize for out
-    # final parse and equality check.
-    if val == "43" or val == "043" or val == "x2b":
-      self.body.append("+")
-    # Again, "&#62;" is a valid escaping of ">", but we
-    # need to normalize to "&gt;" for equality checking.
-    if val == "62" or val == "062" or val == "x3e":
-      self.body.append("&gt;")
-    # Again, "&#60;" is a valid escaping of "<", but we
-    # need to normalize to "&lt;" for equality checking.
-    if val == "60" or val == "060" or val == "x3c":
-      self.body.append("&lt;")
-    # Not sure why some are escaping '/'
-    if val == "47" or val == "047" or val == "x2f":
-      self.body.append("/")
-    # "&#40;" is a valid escaping of "(", but
-    # it is not required, so we need to normalize for out
-    # final parse and equality check.
-    if val == "40" or val == "040" or val == "x28":
-      self.body.append("(")
-    # "&#41;" is a valid escaping of ")", but
-    # it is not required, so we need to normalize for out
-    # final parse and equality check.
-    if val == "41" or val == "041" or val == "x29":
-      self.body.append(")")
+    def handle_charref(self, name):
+        '''
+        This is called when an HTML character is parsed (i.e.
+        &quot;). There are a number of issues to be resolved
+        here. For instance, some tests choose to leave the
+        "+" character as-is, which should be fine as far as
+        character escaping goes, but others choose to use the
+        character reference of "&#43;", which is also fine.
+        Therefore, this method looks for all possible character
+        references and normalizes them so that we can
+        validate the input against a single valid spec string.
+        Another example problem: "&quot;" is valid, but so is
+        "&#34;"
+        '''
+        val = name.lower()
+        # "&#34;" is a valid escaping, but we are normalizing
+        # it so that our final parse can just be checked for
+        # equality.
+        if val == "34" or val == "034" or val == "x22":
+            # Append our normalized entity reference to our body.
+            self.body.append("&quot;")
+        # "&#39;" is a valid escaping of "-", but it is not
+        # required, so we normalize for equality checking.
+        if val == "39" or val == "039" or val == "x27":
+            self.body.append("&apos;")
+        # Again, "&#43;" is a valid escaping of the "+", but
+        # it is not required, so we need to normalize for out
+        # final parse and equality check.
+        if val == "43" or val == "043" or val == "x2b":
+            self.body.append("+")
+        # Again, "&#62;" is a valid escaping of ">", but we
+        # need to normalize to "&gt;" for equality checking.
+        if val == "62" or val == "062" or val == "x3e":
+            self.body.append("&gt;")
+        # Again, "&#60;" is a valid escaping of "<", but we
+        # need to normalize to "&lt;" for equality checking.
+        if val == "60" or val == "060" or val == "x3c":
+            self.body.append("&lt;")
+        # Not sure why some are escaping '/'
+        if val == "47" or val == "047" or val == "x2f":
+            self.body.append("/")
+        # "&#40;" is a valid escaping of "(", but
+        # it is not required, so we need to normalize for out
+        # final parse and equality check.
+        if val == "40" or val == "040" or val == "x28":
+            self.body.append("(")
+        # "&#41;" is a valid escaping of ")", but
+        # it is not required, so we need to normalize for out
+        # final parse and equality check.
+        if val == "41" or val == "041" or val == "x29":
+            self.body.append(")")
 
 
-  def handle_entityref(self, name):
-    # Again, "&mdash;" is a valid escaping of "—", but we
-    # need to normalize to "—" for equality checking.
-    if name == "mdash":
-      self.body.append("—")
-    else:
-      self.body.append("&{n};".format(n=name))
+    def handle_entityref(self, name):
+        '''
+        Again, "&mdash;" is a valid escaping of "—", but we
+        need to normalize to "—" for equality checking.
+        '''
+        if name == "mdash":
+            self.body.append("—")
+        else:
+            self.body.append("&{n};".format(n=name))
 
 
-  # This is called every time a tag is opened. We append
-  # each one wrapped in "<" and ">".
-  def handle_starttag(self, tag, attrs):
-    self.body.append("<{t}>".format(t=tag))
+    def handle_starttag(self, tag, attrs):
+        '''
+        This is called every time a tag is opened. We append
+        each one wrapped in "<" and ">".
+        '''
+        self.body.append("<{t}>".format(t=tag))
 
 
-    # Append a newline after the <table> and <html>
-    if tag.lower() == 'table' or tag.lower() == 'html':
-      self.body.append("\n")
+        # Append a newline after the <table> and <html>
+        if tag.lower() == 'table' or tag.lower() == 'html':
+            self.body.append("\n")
 
 
-  # This is called whenever data is presented inside of a
-  # start and end tag. Generally, this will only ever be
-  # the contents inside of "<td>" and "</td>", but there
-  # are also the "<title>" and "</title>" tags.
-  def handle_data (self, data):
-    if data.strip() != '':
-      # After a LOT of debate, these are now considered
-      # valid in data. The reason for this approach is
-      # because a few tests use tools which determine
-      # at compile time whether or not a string needs
-      # a given type of html escaping, and our fortune
-      # test has apostrophes and quotes in html data
-      # rather than as an html attribute etc.
-      # example:
-      # <td>A computer scientist is someone who fixes things that aren't broken.</td>
-      # Semanticly, that apostrophe does not NEED to
-      # be escaped. The same is currently true for our
-      # quotes.
-      # In fact, in data (read: between two html tags)
-      # even the '>' need not be replaced as long as
-      # the '<' are all escaped.
-      # We replace them with their escapings here in
-      # order to have a noramlized string for equality
-      # comparison at the end.
-      data = data.replace('\'', '&apos;')
-      data = data.replace('"', '&quot;')
-      data = data.replace('>', '&gt;')
+    def handle_data(self, data):
+        '''
+        This is called whenever data is presented inside of a
+        start and end tag. Generally, this will only ever be
+        the contents inside of "<td>" and "</td>", but there
+        are also the "<title>" and "</title>" tags.
+        '''
+        if data.strip() != '':
+            # After a LOT of debate, these are now considered
+            # valid in data. The reason for this approach is
+            # because a few tests use tools which determine
+            # at compile time whether or not a string needs
+            # a given type of html escaping, and our fortune
+            # test has apostrophes and quotes in html data
+            # rather than as an html attribute etc.
+            # example:
+            # <td>A computer scientist is someone who fixes things that aren't broken.</td>
+            # Semanticly, that apostrophe does not NEED to
+            # be escaped. The same is currently true for our
+            # quotes.
+            # In fact, in data (read: between two html tags)
+            # even the '>' need not be replaced as long as
+            # the '<' are all escaped.
+            # We replace them with their escapings here in
+            # order to have a noramlized string for equality
+            # comparison at the end.
+            data = data.replace('\'', '&apos;')
+            data = data.replace('"', '&quot;')
+            data = data.replace('>', '&gt;')
 
 
-      self.body.append("{d}".format(d=data))
+            self.body.append("{d}".format(d=data))
 
 
-  # This is called every time a tag is closed. We append
-  # each one wrapped in "</" and ">".
-  def handle_endtag(self, tag):
-    self.body.append("</{t}>".format(t=tag))
+    def handle_endtag(self, tag):
+        '''
+        This is called every time a tag is closed. We append
+        each one wrapped in "</" and ">".
+        '''
+        self.body.append("</{t}>".format(t=tag))
 
 
-    # Append a newline after each </tr> and </head>
-    if tag.lower() == 'tr' or tag.lower() == 'head':
-      self.body.append("\n")
+        # Append a newline after each </tr> and </head>
+        if tag.lower() == 'tr' or tag.lower() == 'head':
+            self.body.append("\n")
 
 
-  # Returns whether the HTML input parsed by this parser
-  # is valid against our known "fortune" spec.
-  # The parsed data in 'body' is joined on empty strings
-  # and checked for equality against our spec.
-  def isValidFortune(self, out):
-    body = ''.join(self.body)
-    same = self.valid_fortune == body
-    diff_lines = []
-    if not same:
-      output = "Oh no! I compared {!s}\n\n\nto.....{!s}\n".format(self.valid_fortune, body)
-      output += "Fortune invalid. Diff following:\n"
-      headers_left = 3
-      for line in unified_diff(self.valid_fortune.split('\n'), body.split('\n'), fromfile='Valid', tofile='Response', n=0):
-        diff_lines.append(line)
-        output += line
-        headers_left -= 1
-        if headers_left <= 0:
-          output += "\n"
-      print(output)
-      out.write(output)
-    return (same, diff_lines)
+    def isValidFortune(self, out):
+        '''
+        Returns whether the HTML input parsed by this parser
+        is valid against our known "fortune" spec.
+        The parsed data in 'body' is joined on empty strings
+        and checked for equality against our spec.
+        '''
+        body = ''.join(self.body)
+        same = self.valid_fortune == body
+        diff_lines = []
+        if not same:
+            output = "Oh no! I compared {!s}\n\n\nto.....{!s}\n".format(
+                self.valid_fortune, body)
+            output += "Fortune invalid. Diff following:\n"
+            headers_left = 3
+            for line in unified_diff(
+                    self.valid_fortune.split('\n'),
+                    body.split('\n'),
+                    fromfile='Valid',
+                    tofile='Response',
+                    n=0):
+                diff_lines.append(line)
+                output += line
+                headers_left -= 1
+                if headers_left <= 0:
+                    output += "\n"
+            print(output)
+            out.write(output)
+        return (same, diff_lines)

+ 176 - 1002
toolset/benchmark/framework_test.py

@@ -1,1017 +1,191 @@
-from benchmark.fortune_html_parser import FortuneHTMLParser
-from setup.linux import setup_util
-from benchmark.test_types import *
-
-import importlib
 import os
 import os
 import subprocess
 import subprocess
-import socket
-import time
-import re
-from pprint import pprint
 import sys
 import sys
 import traceback
 import traceback
-import json
 import logging
 import logging
-import csv
-import shlex
-import math
-import multiprocessing
-import docker
-from collections import OrderedDict
 from requests import ConnectionError
 from requests import ConnectionError
-from threading import Thread
-from threading import Event
 
 
-from utils import header
-from utils import gather_docker_dependencies
-from utils import find_docker_file
+from toolset.utils.output_helper import header
+from toolset.utils import docker_helper
 
 
 # Cross-platform colored text
 # Cross-platform colored text
-from colorama import Fore, Back, Style
-from datetime import datetime
-from datetime import timedelta
-
-class FrameworkTest:
-  headers_template = "-H 'Host: {server_host}' -H 'Accept: {accept}' -H 'Connection: keep-alive'"
-
-  # Used for test types that require no pipelining or query string params.
-  concurrency_template = """
-
-    let max_threads=$(cat /proc/cpuinfo | grep processor | wc -l)
-    echo ""
-    echo "---------------------------------------------------------"
-    echo " Running Primer {name}"
-    echo " {wrk} {headers} --latency -d 5 -c 8 --timeout 8 -t 8 \"http://{server_host}:{port}{url}\""
-    echo "---------------------------------------------------------"
-    echo ""
-    {wrk} {headers} --latency -d 5 -c 8 --timeout 8 -t 8 "http://{server_host}:{port}{url}"
-    sleep 5
-
-    echo ""
-    echo "---------------------------------------------------------"
-    echo " Running Warmup {name}"
-    echo " {wrk} {headers} --latency -d {duration} -c {max_concurrency} --timeout 8 -t $max_threads \"http://{server_host}:{port}{url}\""
-    echo "---------------------------------------------------------"
-    echo ""
-    {wrk} {headers} --latency -d {duration} -c {max_concurrency} --timeout 8 -t $max_threads "http://{server_host}:{port}{url}"
-    sleep 5
-
-    echo ""
-    echo "---------------------------------------------------------"
-    echo " Synchronizing time"
-    echo "---------------------------------------------------------"
-    echo ""
-    ntpdate -s pool.ntp.org
-
-    for c in {levels}
-    do
-      echo ""
-      echo "---------------------------------------------------------"
-      echo " Concurrency: $c for {name}"
-      echo " {wrk} {headers} --latency -d {duration} -c $c --timeout 8 -t $(($c>$max_threads?$max_threads:$c)) \"http://{server_host}:{port}{url}\""
-      echo "---------------------------------------------------------"
-      echo ""
-      STARTTIME=$(date +"%s")
-      {wrk} {headers} --latency -d {duration} -c $c --timeout 8 -t "$(($c>$max_threads?$max_threads:$c))" http://{server_host}:{port}{url}
-      echo "STARTTIME $STARTTIME"
-      echo "ENDTIME $(date +"%s")"
-      sleep 2
-    done
-  """
-  # Used for test types that require pipelining.
-  pipeline_template = """
-
-    let max_threads=$(cat /proc/cpuinfo | grep processor | wc -l)
-    echo ""
-    echo "---------------------------------------------------------"
-    echo " Running Primer {name}"
-    echo " {wrk} {headers} --latency -d 5 -c 8 --timeout 8 -t 8 \"http://{server_host}:{port}{url}\""
-    echo "---------------------------------------------------------"
-    echo ""
-    {wrk} {headers} --latency -d 5 -c 8 --timeout 8 -t 8 "http://{server_host}:{port}{url}"
-    sleep 5
-
-    echo ""
-    echo "---------------------------------------------------------"
-    echo " Running Warmup {name}"
-    echo " {wrk} {headers} --latency -d {duration} -c {max_concurrency} --timeout 8 -t $max_threads \"http://{server_host}:{port}{url}\""
-    echo "---------------------------------------------------------"
-    echo ""
-    {wrk} {headers} --latency -d {duration} -c {max_concurrency} --timeout 8 -t $max_threads "http://{server_host}:{port}{url}"
-    sleep 5
-
-    echo ""
-    echo "---------------------------------------------------------"
-    echo " Synchronizing time"
-    echo "---------------------------------------------------------"
-    echo ""
-    ntpdate -s pool.ntp.org
-
-    for c in {levels}
-    do
-      echo ""
-      echo "---------------------------------------------------------"
-      echo " Concurrency: $c for {name}"
-      echo " {wrk} {headers} --latency -d {duration} -c $c --timeout 8 -t $(($c>$max_threads?$max_threads:$c)) \"http://{server_host}:{port}{url}\" -s ~/pipeline.lua -- {pipeline}"
-      echo "---------------------------------------------------------"
-      echo ""
-      STARTTIME=$(date +"%s")
-      {wrk} {headers} --latency -d {duration} -c $c --timeout 8 -t "$(($c>$max_threads?$max_threads:$c))" http://{server_host}:{port}{url} -s ~/pipeline.lua -- {pipeline}
-      echo "STARTTIME $STARTTIME"
-      echo "ENDTIME $(date +"%s")"
-      sleep 2
-    done
-  """
-  # Used for test types that require a database -
-  # These tests run at a static concurrency level and vary the size of
-  # the query sent with each request
-  query_template = """
-    let max_threads=$(cat /proc/cpuinfo | grep processor | wc -l)
-    echo ""
-    echo "---------------------------------------------------------"
-    echo " Running Primer {name}"
-    echo " wrk {headers} --latency -d 5 -c 8 --timeout 8 -t 8 \"http://{server_host}:{port}{url}2\""
-    echo "---------------------------------------------------------"
-    echo ""
-    wrk {headers} --latency -d 5 -c 8 --timeout 8 -t 8 "http://{server_host}:{port}{url}2"
-    sleep 5
-
-    echo ""
-    echo "---------------------------------------------------------"
-    echo " Running Warmup {name}"
-    echo " wrk {headers} --latency -d {duration} -c {max_concurrency} --timeout 8 -t $max_threads \"http://{server_host}:{port}{url}2\""
-    echo "---------------------------------------------------------"
-    echo ""
-    wrk {headers} --latency -d {duration} -c {max_concurrency} --timeout 8 -t $max_threads "http://{server_host}:{port}{url}2"
-    sleep 5
-
-    echo ""
-    echo "---------------------------------------------------------"
-    echo " Synchronizing time"
-    echo "---------------------------------------------------------"
-    echo ""
-    ntpdate -s pool.ntp.org
-
-    for c in {levels}
-    do
-      echo ""
-      echo "---------------------------------------------------------"
-      echo " Queries: $c for {name}"
-      echo " wrk {headers} --latency -d {duration} -c {max_concurrency} --timeout 8 -t $max_threads \"http://{server_host}:{port}{url}$c\""
-      echo "---------------------------------------------------------"
-      echo ""
-      STARTTIME=$(date +"%s")
-      wrk {headers} --latency -d {duration} -c {max_concurrency} --timeout 8 -t $max_threads "http://{server_host}:{port}{url}$c"
-      echo "STARTTIME $STARTTIME"
-      echo "ENDTIME $(date +"%s")"
-      sleep 2
-    done
-  """
-
-  ############################################################
-  # start(benchmarker)
-  # Start the test using its setup file
-  ############################################################
-  def start(self, out):
-
-    # Setup environment variables
-    logDir = os.path.join(self.fwroot, self.benchmarker.full_results_directory(), 'logs', self.name.lower())
-
-    def tee_output(prefix, line):
-      # Needs to be one atomic write
-      # Explicitly use UTF-8 as it's the most common framework output
-      # TODO improve encoding handling
-      line = prefix.encode('utf-8') + line
-
-      # Log to current terminal
-      sys.stdout.write(line)
-      sys.stdout.flush()
-
-      out.write(line)
-      out.flush()
-
-    prefix = "Setup %s: " % self.name
-
-    ###########################
-    # Build the Docker images
-    ##########################
-
-    # Build the test docker file based on the test name
-    # then build any additional docker files specified in the benchmark_config
-    # Note - If you want to be able to stream the output of the build process you have
-    # to use the low level API:
-    #  https://docker-py.readthedocs.io/en/stable/api.html#module-docker.api.build
-
-    prev_line = os.linesep
-    def handle_build_output(line):
-      if line.startswith('{"stream":'):
-        line = json.loads(line)
-        line = line[line.keys()[0]].encode('utf-8')
-        if prev_line.endswith(os.linesep):
-          tee_output(prefix, line)
-        else:
-          tee_output(line)
-        self.prev_line = line
-
-    docker_buildargs = { 'CPU_COUNT': str(multiprocessing.cpu_count()),
-                         'MAX_CONCURRENCY': str(max(self.benchmarker.concurrency_levels)) }
-
-    test_docker_files = ["%s.dockerfile" % self.name]
-    if self.docker_files is not None:
-      if type(self.docker_files) is list:
-        test_docker_files.extend(self.docker_files)
-      else:
-        raise Exception("docker_files in benchmark_config.json must be an array")
-
-    for test_docker_file in test_docker_files:
-      deps = list(reversed(gather_docker_dependencies(os.path.join(self.directory, test_docker_file))))
-
-      docker_dir = os.path.join(setup_util.get_fwroot(), "toolset", "setup", "linux", "docker")
-
-      for dependency in deps:
-        docker_file = os.path.join(self.directory, dependency + ".dockerfile")
-        if not docker_file or not os.path.exists(docker_file):
-          docker_file = find_docker_file(docker_dir, dependency + ".dockerfile")
-        if not docker_file:
-          tee_output(prefix, "Docker build failed; %s could not be found; terminating\n" % (dependency + ".dockerfile"))
-          return 1
-
-        # Build the dependency image
-        try:
-          for line in docker.APIClient(base_url='unix://var/run/docker.sock').build(
-            path=os.path.dirname(docker_file),
-            dockerfile="%s.dockerfile" % dependency,
-            tag="tfb/%s" % dependency,
-            buildargs=docker_buildargs,
-            forcerm=True
-          ):
-            handle_build_output(line)
-        except Exception as e:
-          tee_output(prefix, "Docker dependency build failed; terminating\n")
-          print(e)
-          return 1
+from colorama import Fore, Style
 
 
-    # Build the test images
-    for test_docker_file in test_docker_files:
-        try:
-          for line in docker.APIClient(base_url='unix://var/run/docker.sock').build(
-            path=self.directory,
-            dockerfile=test_docker_file,
-            tag="tfb/test/%s" % test_docker_file.replace(".dockerfile",""),
-            buildargs=docker_buildargs,
-            forcerm=True
-          ):
-            handle_build_output(line)
-        except Exception as e:
-          tee_output(prefix, "Docker build failed; terminating\n")
-          print(e)
-          return 1
 
 
-
-    ##########################
-    # Run the Docker container
-    ##########################
-
-	client = docker.from_env()
-
-    for test_docker_file in test_docker_files:
-      try:
-        def watch_container(container, prefix):
-          for line in container.logs(stream=True):
-            tee_output(prefix, line)
-
-        container = client.containers.run(
-          "tfb/test/%s" % test_docker_file.replace(".dockerfile", ""),
-          network_mode="host",
-          privileged=True,
-          stderr=True,
-          detach=True)
-
-        prefix = "Server %s: " % self.name
-        watch_thread = Thread(target = watch_container, args=(container,prefix))
-        watch_thread.daemon = True
-        watch_thread.start()
-
-      except Exception as e:
-        tee_output(prefix, "Running docker cointainer: %s failed" % test_docker_file)
-        print(e)
-        return 1
-
-    return 0
-  ############################################################
-  # End start
-  ############################################################
-
-  ############################################################
-  # verify_urls
-  # Verifys each of the URLs for this test. THis will sinply
-  # curl the URL and check for it's return status.
-  # For each url, a flag will be set on this object for whether
-  # or not it passed
-  # Returns True if all verifications succeeded
-  ############################################################
-  def verify_urls(self, logPath):
-    result = True
-
-    def verify_type(test_type):
-      verificationPath = os.path.join(logPath, test_type)
-      try:
-        os.makedirs(verificationPath)
-      except OSError:
-        pass
-      with open(os.path.join(verificationPath, 'verification.txt'), 'w') as verification:
-        test = self.runTests[test_type]
-        test.setup_out(verification)
-        verification.write(header("VERIFYING %s" % test_type.upper()))
-
-        base_url = "http://%s:%s" % (self.benchmarker.server_host, self.port)
-
-        try:
-          # Verifies headers from the server. This check is made from the
-          # App Server using Pythons requests module. Will do a second check from
-          # the client to make sure the server isn't only accepting connections
-          # from localhost on a multi-machine setup.
-          results = test.verify(base_url)
-
-          # Now verify that the url is reachable from the client machine, unless
-          # we're already failing
-          if not any(result == 'fail' for (result, reason, url) in results):
-            p = subprocess.call(["ssh", "TFB-client", "curl -sSf %s" % base_url + test.get_url()], shell=False, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
-            if p is not 0:
-              results = [('fail', "Server did not respond to request from client machine.", base_url)]
-              logging.warning("""This error usually means your server is only accepting
+class FrameworkTest:
+    def __init__(self, name, directory, benchmarker_config, results, runTests,
+                 args):
+        '''
+        Constructor
+        '''
+        self.name = name
+        self.directory = directory
+        self.benchmarker_config = benchmarker_config
+        self.results = results
+        self.runTests = runTests
+        self.fwroot = benchmarker_config.fwroot
+        self.approach = ""
+        self.classification = ""
+        self.database = ""
+        self.framework = ""
+        self.language = ""
+        self.orm = ""
+        self.platform = ""
+        self.webserver = ""
+        self.os = ""
+        self.database_os = ""
+        self.display_name = ""
+        self.notes = ""
+        self.port = ""
+        self.versus = ""
+        self.docker_files = None
+
+        # setup logging
+        logging.basicConfig(stream=sys.stderr, level=logging.INFO)
+
+        # Used in setup.sh scripts for consistency with
+        # the bash environment variables
+        self.troot = self.directory
+
+        self.__dict__.update(args)
+
+    ##########################################################################################
+    # Public Methods
+    ##########################################################################################
+
+    def start(self, out):
+        '''
+        Start the test implementation
+        '''
+        test_docker_files = ["%s.dockerfile" % self.name]
+        if self.docker_files is not None:
+            if type(self.docker_files) is list:
+                test_docker_files.extend(self.docker_files)
+            else:
+                raise Exception(
+                    "docker_files in benchmark_config.json must be an array")
+
+        docker_helper.build(self.benchmarker_config, [self.name], out)
+
+        docker_helper.run(self.benchmarker_config, test_docker_files, out)
+
+        return 0
+
+    def verify_urls(self, logPath):
+        '''
+        Verifys each of the URLs for this test. THis will sinply curl the URL and 
+        check for it's return status. For each url, a flag will be set on this 
+        object for whether or not it passed.
+        Returns True if all verifications succeeded
+        '''
+        result = True
+
+        def verify_type(test_type):
+            verificationPath = os.path.join(logPath, test_type)
+            try:
+                os.makedirs(verificationPath)
+            except OSError:
+                pass
+            with open(os.path.join(verificationPath, 'verification.txt'),
+                      'w') as verification:
+                test = self.runTests[test_type]
+                test.setup_out(verification)
+                verification.write(header("VERIFYING %s" % test_type.upper()))
+
+                base_url = "http://%s:%s" % (
+                    self.benchmarker_config.server_host, self.port)
+
+                try:
+                    # Verifies headers from the server. This check is made from the
+                    # App Server using Pythons requests module. Will do a second check from
+                    # the client to make sure the server isn't only accepting connections
+                    # from localhost on a multi-machine setup.
+                    results = test.verify(base_url)
+
+                    # Now verify that the url is reachable from the client machine, unless
+                    # we're already failing
+                    if not any(result == 'fail'
+                               for (result, reason, url) in results):
+                        p = subprocess.call(
+                            [
+                                "ssh", self.benchmarker_config.client_host,
+                                "curl -sSf %s" % base_url + test.get_url()
+                            ],
+                            shell=False,
+                            stdout=subprocess.PIPE,
+                            stderr=subprocess.PIPE)
+                        if p is not 0:
+                            results = [(
+                                'fail',
+                                "Server did not respond to request from client machine.",
+                                base_url)]
+                            logging.warning(
+                                """This error usually means your server is only accepting
                 requests from localhost.""")
                 requests from localhost.""")
-        except ConnectionError as e:
-          results = [('fail',"Server did not respond to request", base_url)]
-          logging.warning("Verifying test %s for %s caused an exception: %s", test_type, self.name, e)
-        except Exception as e:
-          results = [('fail',"""Caused Exception in TFB
+                except ConnectionError as e:
+                    results = [('fail', "Server did not respond to request",
+                                base_url)]
+                    logging.warning(
+                        "Verifying test %s for %s caused an exception: %s",
+                        test_type, self.name, e)
+                except Exception as e:
+                    results = [('fail', """Caused Exception in TFB
             This almost certainly means your return value is incorrect,
             This almost certainly means your return value is incorrect,
             but also that you have found a bug. Please submit an issue
             but also that you have found a bug. Please submit an issue
             including this message: %s\n%s""" % (e, traceback.format_exc()),
             including this message: %s\n%s""" % (e, traceback.format_exc()),
-            base_url)]
-          logging.warning("Verifying test %s for %s caused an exception: %s", test_type, self.name, e)
-          traceback.format_exc()
-
-        test.failed = any(result == 'fail' for (result, reason, url) in results)
-        test.warned = any(result == 'warn' for (result, reason, url) in results)
-        test.passed = all(result == 'pass' for (result, reason, url) in results)
-
-        def output_result(result, reason, url):
-          specific_rules_url = "http://frameworkbenchmarks.readthedocs.org/en/latest/Project-Information/Framework-Tests/#specific-test-requirements"
-          color = Fore.GREEN
-          if result.upper() == "WARN":
-            color = Fore.YELLOW
-          elif result.upper() == "FAIL":
-            color = Fore.RED
-
-          verification.write(("   " + color + "%s" + Style.RESET_ALL + " for %s\n") % (result.upper(), url))
-          print("   {!s}{!s}{!s} for {!s}\n".format(color, result.upper(), Style.RESET_ALL, url))
-          if reason is not None and len(reason) != 0:
-            for line in reason.splitlines():
-              verification.write("     " + line + '\n')
-              print("     " + line)
-            if not test.passed:
-              verification.write("     See %s\n" % specific_rules_url)
-              print("     See {!s}\n".format(specific_rules_url))
-
-        [output_result(r1,r2,url) for (r1, r2, url) in results]
-
-        if test.failed:
-          self.benchmarker.report_verify_results(self, test_type, 'fail')
-        elif test.warned:
-          self.benchmarker.report_verify_results(self, test_type, 'warn')
-        elif test.passed:
-          self.benchmarker.report_verify_results(self, test_type, 'pass')
-        else:
-          raise Exception("Unknown error - test did not pass,warn,or fail")
-
-        verification.flush()
-
-    result = True
-    for test_type in self.runTests:
-      verify_type(test_type)
-      if self.runTests[test_type].failed:
-        result = False
-
-    return result
-  ############################################################
-  # End verify_urls
-  ############################################################
-
-  ############################################################
-  # benchmark
-  # Runs the benchmark for each type of test that it implements
-  # JSON/DB/Query.
-  ############################################################
-  def benchmark(self, logPath):
-
-    def benchmark_type(test_type):
-      benchmarkPath = os.path.join(logPath, test_type)
-      try:
-        os.makedirs(benchmarkPath)
-      except OSError:
-        pass
-      with open(os.path.join(benchmarkPath, 'benchmark.txt'), 'w') as out:
-        out.write("BENCHMARKING %s ... " % test_type.upper())
-
-        test = self.runTests[test_type]
-        test.setup_out(out)
-        output_file = self.benchmarker.output_file(self.name, test_type)
-        if not os.path.exists(output_file):
-          # Open to create the empty file
-          with open(output_file, 'w'):
-            pass
-
-        if not test.failed:
-          if test_type == 'plaintext': # One special case
-            remote_script = self.__generate_pipeline_script(test.get_url(), self.port, test.accept_header)
-          elif test_type == 'query' or test_type == 'update':
-            remote_script = self.__generate_query_script(test.get_url(), self.port, test.accept_header, self.benchmarker.query_levels)
-          elif test_type == 'cached_query':
-            remote_script = self.__generate_query_script(test.get_url(), self.port, test.accept_header, self.benchmarker.cached_query_levels)
-          else:
-            remote_script = self.__generate_concurrency_script(test.get_url(), self.port, test.accept_header)
-
-          # Begin resource usage metrics collection
-          self.__begin_logging(test_type)
-
-          # Run the benchmark
-          with open(output_file, 'w') as raw_file:
-            p = subprocess.Popen(self.benchmarker.client_ssh_string.split(" "), stdin=subprocess.PIPE, stdout=raw_file, stderr=raw_file)
-            p.communicate(remote_script)
-            out.flush()
-
-          # End resource usage metrics collection
-          self.__end_logging()
-
-        results = self.__parse_test(test_type)
-        print("Benchmark results:")
-        pprint(results)
-
-        self.benchmarker.report_benchmark_results(framework=self, test=test_type, results=results['results'])
-        out.write( "Complete\n" )
-        out.flush()
-
-    for test_type in self.runTests:
-      benchmark_type(test_type)
-  ############################################################
-  # End benchmark
-  ############################################################
-
-  ############################################################
-  # parse_all
-  # Method meant to be run for a given timestamp
-  ############################################################
-  def parse_all(self):
-    for test_type in self.runTests:
-      if os.path.exists(self.benchmarker.get_output_file(self.name, test_type)):
-        results = self.__parse_test(test_type)
-        self.benchmarker.report_benchmark_results(framework=self, test=test_type, results=results['results'])
-
-  ##########################################################################################
-  # Private Methods
-  ##########################################################################################
-
-  ############################################################
-  # __parse_test(test_type)
-  ############################################################
-  def __parse_test(self, test_type):
-    try:
-      results = dict()
-      results['results'] = []
-      stats = []
-
-      if os.path.exists(self.benchmarker.get_output_file(self.name, test_type)):
-        with open(self.benchmarker.output_file(self.name, test_type)) as raw_data:
-          is_warmup = True
-          rawData = None
-          for line in raw_data:
-
-            if "Queries:" in line or "Concurrency:" in line:
-              is_warmup = False
-              rawData = None
-              continue
-            if "Warmup" in line or "Primer" in line:
-              is_warmup = True
-              continue
-
-            if not is_warmup:
-              if rawData == None:
-                rawData = dict()
-                results['results'].append(rawData)
-
-              #if "Requests/sec:" in line:
-              #  m = re.search("Requests/sec:\s+([0-9]+)", line)
-              #  rawData['reportedResults'] = m.group(1)
-
-              # search for weighttp data such as succeeded and failed.
-              if "Latency" in line:
-                m = re.findall("([0-9]+\.*[0-9]*[us|ms|s|m|%]+)", line)
-                if len(m) == 4:
-                  rawData['latencyAvg'] = m[0]
-                  rawData['latencyStdev'] = m[1]
-                  rawData['latencyMax'] = m[2]
-              #    rawData['latencyStdevPercent'] = m[3]
-
-              #if "Req/Sec" in line:
-              #  m = re.findall("([0-9]+\.*[0-9]*[k|%]*)", line)
-              #  if len(m) == 4:
-              #    rawData['requestsAvg'] = m[0]
-              #    rawData['requestsStdev'] = m[1]
-              #    rawData['requestsMax'] = m[2]
-              #    rawData['requestsStdevPercent'] = m[3]
-
-              #if "requests in" in line:
-              #  m = re.search("requests in ([0-9]+\.*[0-9]*[ms|s|m|h]+)", line)
-              #  if m != None:
-              #    # parse out the raw time, which may be in minutes or seconds
-              #    raw_time = m.group(1)
-              #    if "ms" in raw_time:
-              #      rawData['total_time'] = float(raw_time[:len(raw_time)-2]) / 1000.0
-              #    elif "s" in raw_time:
-              #      rawData['total_time'] = float(raw_time[:len(raw_time)-1])
-              #    elif "m" in raw_time:
-              #      rawData['total_time'] = float(raw_time[:len(raw_time)-1]) * 60.0
-              #    elif "h" in raw_time:
-              #      rawData['total_time'] = float(raw_time[:len(raw_time)-1]) * 3600.0
-
-              if "requests in" in line:
-                m = re.search("([0-9]+) requests in", line)
-                if m != None:
-                  rawData['totalRequests'] = int(m.group(1))
-
-              if "Socket errors" in line:
-                if "connect" in line:
-                  m = re.search("connect ([0-9]+)", line)
-                  rawData['connect'] = int(m.group(1))
-                if "read" in line:
-                  m = re.search("read ([0-9]+)", line)
-                  rawData['read'] = int(m.group(1))
-                if "write" in line:
-                  m = re.search("write ([0-9]+)", line)
-                  rawData['write'] = int(m.group(1))
-                if "timeout" in line:
-                  m = re.search("timeout ([0-9]+)", line)
-                  rawData['timeout'] = int(m.group(1))
-
-              if "Non-2xx" in line:
-                m = re.search("Non-2xx or 3xx responses: ([0-9]+)", line)
-                if m != None:
-                  rawData['5xx'] = int(m.group(1))
-              if "STARTTIME" in line:
-                m = re.search("[0-9]+", line)
-                rawData["startTime"] = int(m.group(0))
-              if "ENDTIME" in line:
-                m = re.search("[0-9]+", line)
-                rawData["endTime"] = int(m.group(0))
-                test_stats = self.__parse_stats(test_type, rawData["startTime"], rawData["endTime"], 1)
-                # rawData["averageStats"] = self.__calculate_average_stats(test_stats)
-                stats.append(test_stats)
-      with open(self.benchmarker.stats_file(self.name, test_type) + ".json", "w") as stats_file:
-        json.dump(stats, stats_file, indent=2)
-
-
-      return results
-    except IOError:
-      return None
-  ############################################################
-  # End benchmark
-  ############################################################
-
-  ############################################################
-  # __generate_concurrency_script(url, port)
-  # Generates the string containing the bash script that will
-  # be run on the client to benchmark a single test. This
-  # specifically works for the variable concurrency tests (JSON
-  # and DB)
-  ############################################################
-  def __generate_concurrency_script(self, url, port, accept_header, wrk_command="wrk"):
-    headers = self.headers_template.format(server_host=self.benchmarker.server_host, accept=accept_header)
-    return self.concurrency_template.format(max_concurrency=max(self.benchmarker.concurrency_levels),
-      name=self.name, duration=self.benchmarker.duration,
-      levels=" ".join("{}".format(item) for item in self.benchmarker.concurrency_levels),
-      server_host=self.benchmarker.server_host, port=port, url=url, headers=headers, wrk=wrk_command)
-
-  ############################################################
-  # __generate_pipeline_script(url, port)
-  # Generates the string containing the bash script that will
-  # be run on the client to benchmark a single pipeline test.
-  ############################################################
-  def __generate_pipeline_script(self, url, port, accept_header, wrk_command="wrk"):
-    headers = self.headers_template.format(server_host=self.benchmarker.server_host, accept=accept_header)
-    return self.pipeline_template.format(max_concurrency=max(self.benchmarker.pipeline_concurrency_levels),
-      name=self.name, duration=self.benchmarker.duration,
-      levels=" ".join("{}".format(item) for item in self.benchmarker.pipeline_concurrency_levels),
-      server_host=self.benchmarker.server_host, port=port, url=url, headers=headers, wrk=wrk_command,
-      pipeline=16)
-
-  ############################################################
-  # __generate_query_script(url, port)
-  # Generates the string containing the bash script that will
-  # be run on the client to benchmark a single test. This
-  # specifically works for the variable query tests (Query)
-  ############################################################
-  def __generate_query_script(self, url, port, accept_header, query_levels):
-    headers = self.headers_template.format(server_host=self.benchmarker.server_host, accept=accept_header)
-    return self.query_template.format(max_concurrency=max(self.benchmarker.concurrency_levels),
-      name=self.name, duration=self.benchmarker.duration,
-      levels=" ".join("{}".format(item) for item in query_levels),
-      server_host=self.benchmarker.server_host, port=port, url=url, headers=headers)
-
-  ############################################################
-  # Returns True if any test type this this framework test will use a DB
-  ############################################################
-  def requires_database(self):
-    '''Returns True/False if this test requires a database'''
-    return any(tobj.requires_db for (ttype,tobj) in self.runTests.iteritems())
-
-  ############################################################
-  # __begin_logging
-  # Starts a thread to monitor the resource usage, to be synced with the client's time
-  # TODO: MySQL and InnoDB are possible. Figure out how to implement them.
-  ############################################################
-  def __begin_logging(self, test_type):
-    output_file = "{file_name}".format(file_name=self.benchmarker.get_stats_file(self.name, test_type))
-    dstat_string = "dstat -Tafilmprs --aio --fs --ipc --lock --raw --socket --tcp \
-                                      --raw --socket --tcp --udp --unix --vm --disk-util \
-                                      --rpc --rpcd --output {output_file}".format(output_file=output_file)
-    cmd = shlex.split(dstat_string)
-    dev_null = open(os.devnull, "w")
-    self.subprocess_handle = subprocess.Popen(cmd, stdout=dev_null, stderr=subprocess.STDOUT)
-
-  ##############################################################
-  # Begin __end_logging
-  # Stops the logger thread and blocks until shutdown is complete.
-  ##############################################################
-  def __end_logging(self):
-    self.subprocess_handle.terminate()
-    self.subprocess_handle.communicate()
-
-  ##############################################################
-  # Begin __parse_stats
-  # For each test type, process all the statistics, and return a multi-layered dictionary
-  # that has a structure as follows:
-  # (timestamp)
-  # | (main header) - group that the stat is in
-  # | | (sub header) - title of the stat
-  # | | | (stat) - the stat itself, usually a floating point number
-  ##############################################################
-  def __parse_stats(self, test_type, start_time, end_time, interval):
-    stats_dict = dict()
-    stats_file = self.benchmarker.stats_file(self.name, test_type)
-    with open(stats_file) as stats:
-      while(stats.next() != "\n"): # dstat doesn't output a completely compliant CSV file - we need to strip the header
-        pass
-      stats_reader = csv.reader(stats)
-      main_header = stats_reader.next()
-      sub_header = stats_reader.next()
-      time_row = sub_header.index("epoch")
-      int_counter = 0
-      for row in stats_reader:
-        time = float(row[time_row])
-        int_counter+=1
-        if time < start_time:
-          continue
-        elif time > end_time:
-          return stats_dict
-        if int_counter % interval != 0:
-          continue
-        row_dict = dict()
-        for nextheader in main_header:
-          if nextheader != "":
-            row_dict[nextheader] = dict()
-        header = ""
-        for item_num, column in enumerate(row):
-          if(len(main_header[item_num]) != 0):
-            header = main_header[item_num]
-          row_dict[header][sub_header[item_num]] = float(column) # all the stats are numbers, so we want to make sure that they stay that way in json
-        stats_dict[time] = row_dict
-    return stats_dict
-  ##############################################################
-  # End __parse_stats
-  ##############################################################
-
-  def __getattr__(self, name):
-    """For backwards compatibility, we used to pass benchmarker
-    as the argument to the setup.sh files"""
-    try:
-      x = getattr(self.benchmarker, name)
-    except AttributeError:
-      print("AttributeError: {!s} not a member of FrameworkTest or Benchmarker".format(name))
-      print("This is probably a bug")
-      raise
-    return x
-
-  ##############################################################
-  # Begin __calculate_average_stats
-  # We have a large amount of raw data for the statistics that
-  # may be useful for the stats nerds, but most people care about
-  # a couple of numbers. For now, we're only going to supply:
-  # * Average CPU
-  # * Average Memory
-  # * Total network use
-  # * Total disk use
-  # More may be added in the future. If they are, please update
-  # the above list.
-  # Note: raw_stats is directly from the __parse_stats method.
-  # Recall that this consists of a dictionary of timestamps,
-  # each of which contain a dictionary of stat categories which
-  # contain a dictionary of stats
-  ##############################################################
-  def __calculate_average_stats(self, raw_stats):
-    raw_stat_collection = dict()
-
-    for timestamp, time_dict in raw_stats.items():
-      for main_header, sub_headers in time_dict.items():
-        item_to_append = None
-        if 'cpu' in main_header:
-          # We want to take the idl stat and subtract it from 100
-          # to get the time that the CPU is NOT idle.
-          item_to_append = sub_headers['idl'] - 100.0
-        elif main_header == 'memory usage':
-          item_to_append = sub_headers['used']
-        elif 'net' in main_header:
-          # Network stats have two parts - recieve and send. We'll use a tuple of
-          # style (recieve, send)
-          item_to_append = (sub_headers['recv'], sub_headers['send'])
-        elif 'dsk' or 'io' in main_header:
-          # Similar for network, except our tuple looks like (read, write)
-          item_to_append = (sub_headers['read'], sub_headers['writ'])
-        if item_to_append is not None:
-          if main_header not in raw_stat_collection:
-            raw_stat_collection[main_header] = list()
-          raw_stat_collection[main_header].append(item_to_append)
-
-    # Simple function to determine human readable size
-    # http://stackoverflow.com/questions/1094841/reusable-library-to-get-human-readable-version-of-file-size
-    def sizeof_fmt(num):
-      # We'll assume that any number we get is convertable to a float, just in case
-      num = float(num)
-      for x in ['bytes','KB','MB','GB']:
-        if num < 1024.0 and num > -1024.0:
-          return "%3.1f%s" % (num, x)
-        num /= 1024.0
-      return "%3.1f%s" % (num, 'TB')
-
-    # Now we have our raw stats in a readable format - we need to format it for display
-    # We need a floating point sum, so the built in sum doesn't cut it
-    display_stat_collection = dict()
-    for header, values in raw_stat_collection.items():
-      display_stat = None
-      if 'cpu' in header:
-        display_stat = sizeof_fmt(math.fsum(values) / len(values))
-      elif main_header == 'memory usage':
-        display_stat = sizeof_fmt(math.fsum(values) / len(values))
-      elif 'net' in main_header:
-        receive, send = zip(*values) # unzip
-        display_stat = {'receive': sizeof_fmt(math.fsum(receive)), 'send': sizeof_fmt(math.fsum(send))}
-      else: # if 'dsk' or 'io' in header:
-        read, write = zip(*values) # unzip
-        display_stat = {'read': sizeof_fmt(math.fsum(read)), 'write': sizeof_fmt(math.fsum(write))}
-      display_stat_collection[header] = display_stat
-    return display_stat
-  ###########################################################################################
-  # End __calculate_average_stats
-  #########################################################################################
-
-
-  ##########################################################################################
-  # Constructor
-  ##########################################################################################
-  def __init__(self, name, directory, benchmarker, runTests, args):
-    self.name = name
-    self.directory = directory
-    self.benchmarker = benchmarker
-    self.runTests = runTests
-    self.fwroot = benchmarker.fwroot
-    self.approach = ""
-    self.classification = ""
-    self.database = ""
-    self.framework = ""
-    self.language = ""
-    self.orm = ""
-    self.platform = ""
-    self.webserver = ""
-    self.os = ""
-    self.database_os = ""
-    self.display_name = ""
-    self.notes = ""
-    self.versus = ""
-    self.docker_files = None
-
-    # setup logging
-    logging.basicConfig(stream=sys.stderr, level=logging.INFO)
-
-    self.install_root="%s/%s" % (self.fwroot, "installs")
-
-    # Used in setup.sh scripts for consistency with
-    # the bash environment variables
-    self.troot = self.directory
-    self.iroot = self.install_root
-
-    self.__dict__.update(args)
-  ############################################################
-  # End __init__
-  ############################################################
-############################################################
-# End FrameworkTest
-############################################################
-
-
-# Static methods
-
-def test_order(type_name):
-  """
-  This sort ordering is set up specifically to return the length
-  of the test name. There were SO many problems involved with
-  'plaintext' being run first (rather, just not last) that we
-  needed to ensure that it was run last for every framework.
-  """
-  return len(type_name)
-
-
-def validate_urls(test_name, test_keys):
-  """
-  Separated from validate_test because urls are not required anywhere. We know a url is incorrect if it is
-  empty or does not start with a "/" character. There is no validation done to ensure the url conforms to
-  the suggested url specifications, although those suggestions are presented if a url fails validation here.
-  """
-  example_urls = {
-    "json_url":         "/json",
-    "db_url":           "/mysql/db",
-    "query_url":        "/mysql/queries?queries=  or  /mysql/queries/",
-    "fortune_url":      "/mysql/fortunes",
-    "update_url":       "/mysql/updates?queries=  or  /mysql/updates/",
-    "plaintext_url":    "/plaintext",
-    "cached_query_url": "/mysql/cached_queries?queries=  or /mysql/cached_queries"
-  }
-
-  for test_url in ["json_url","db_url","query_url","fortune_url","update_url","plaintext_url","cached_query_url"]:
-    key_value = test_keys.get(test_url, None)
-    if key_value != None and not key_value.startswith('/'):
-      errmsg = """`%s` field in test \"%s\" does not appear to be a valid url: \"%s\"\n
-        Example `%s` url: \"%s\"
-      """ % (test_url, test_name, key_value, test_url, example_urls[test_url])
-      raise Exception(errmsg)
-
-
-def validate_test(test_name, test_keys, directory):
-  """
-  Validate benchmark config values for this test based on a schema
-  """
-  # Ensure that each FrameworkTest has a framework property, inheriting from top-level if not
-  if not test_keys['framework']:
-    test_keys['framework'] = config['framework']
-
-  recommended_lang = directory.split('/')[-2]
-  windows_url = "https://github.com/TechEmpower/FrameworkBenchmarks/issues/1038"
-  schema = {
-    'language': {
-      'help': ('language', 'The language of the framework used, suggestion: %s' % recommended_lang)
-    },
-    'webserver': {
-      'help': ('webserver', 'Name of the webserver also referred to as the "front-end server"')
-    },
-    'classification': {
-      'allowed': [
-        ('Fullstack', '...'),
-        ('Micro', '...'),
-        ('Platform', '...')
-      ]
-    },
-    'database': {
-      'allowed': [
-        ('MySQL', 'One of the most popular databases around the web and in TFB'),
-        ('Postgres', 'An advanced SQL database with a larger feature set than MySQL'),
-        ('MongoDB', 'A popular document-store database'),
-        ('Cassandra', 'A highly performant and scalable NoSQL database'),
-        ('Elasticsearch', 'A distributed RESTful search engine that is used as a database for TFB tests'),
-        ('Redis', 'An open-sourced, BSD licensed, advanced key-value cache and store'),
-        ('SQLite', 'A network-less database, still supported for backwards compatibility'),
-        ('SQLServer', 'Microsoft\'s SQL implementation'),
-        ('None', 'No database was used for these tests, as is the case with Json Serialization and Plaintext')
-      ]
-    },
-    'approach': {
-      'allowed': [
-        ('Realistic', '...'),
-        ('Stripped', '...')
-      ]
-    },
-    'orm': {
-      'allowed': [
-        ('Full', 'Has a full suite of features like lazy loading, caching, multiple language support, sometimes pre-configured with scripts.'),
-        ('Micro', 'Has basic database driver capabilities such as establishing a connection and sending queries.'),
-        ('Raw', 'Tests that do not use an ORM will be classified as "raw" meaning they use the platform\'s raw database connectivity.')
-      ]
-    },
-    'platform': {
-      'help': ('platform', 'Name of the platform this framework runs on, e.g. Node.js, PyPy, hhvm, JRuby ...')
-    },
-    'framework': {
-      # Guranteed to be here and correct at this point
-      # key is left here to produce the set of required keys
-    },
-    'os': {
-      'allowed': [
-        ('Linux', 'Our best-supported host OS, it is recommended that you build your tests for Linux hosts'),
-        ('Windows', 'TFB is not fully-compatible on windows, contribute towards our work on compatibility: %s' % windows_url)
-      ]
-    },
-    'database_os': {
-      'allowed': [
-        ('Linux', 'Our best-supported host OS, it is recommended that you build your tests for Linux hosts'),
-        ('Windows', 'TFB is not fully-compatible on windows, contribute towards our work on compatibility: %s' % windows_url)
-      ]
-    }
-  }
-
-  # Confirm required keys are present
-  required_keys = schema.keys()
-  missing = list(set(required_keys) - set(test_keys))
-
-  if len(missing) > 0:
-    missingstr = (", ").join(map(str, missing))
-    raise Exception("benchmark_config.json for test %s is invalid, please amend by adding the following required keys: [%s]"
-      % (test_name, missingstr))
-
-  # Check the (all optional) test urls
-  validate_urls(test_name, test_keys)
-
-  # Check values of keys against schema
-  for key in required_keys:
-    val = test_keys.get(key, "").lower()
-    has_predefined_acceptables = 'allowed' in schema[key]
-
-    if has_predefined_acceptables:
-      allowed = schema[key].get('allowed', [])
-      acceptable_values, descriptors = zip(*allowed)
-      acceptable_values = [a.lower() for a in acceptable_values]
-
-      if val not in acceptable_values:
-        msg = ("Invalid `%s` value specified for test \"%s\" in framework \"%s\"; suggestions:\n"
-          % (key, test_name, test_keys['framework']))
-        helpinfo = ('\n').join(["  `%s` -- %s" % (v, desc) for (v, desc) in zip(acceptable_values, descriptors)])
-        fullerr = msg + helpinfo + "\n"
-        raise Exception(fullerr)
-
-    elif not has_predefined_acceptables and val == "":
-      msg = ("Value for `%s` in test \"%s\" in framework \"%s\" was missing:\n"
-        % (key, test_name, test_keys['framework']))
-      helpinfo = "  %s -- %s" % schema[key]['help']
-      fullerr = msg + helpinfo + '\n'
-      raise Exception(fullerr)
-
-def parse_config(config, directory, benchmarker):
-  """
-  Parses a config file into a list of FrameworkTest objects
-  """
-  tests = []
-
-  # The config object can specify multiple tests
-  # Loop over them and parse each into a FrameworkTest
-  for test in config['tests']:
-
-    tests_to_run = [name for (name,keys) in test.iteritems()]
-    if "default" not in tests_to_run:
-      logging.warn("Framework %s does not define a default test in benchmark_config.json", config['framework'])
-
-    # Check that each test configuration is acceptable
-    # Throw exceptions if a field is missing, or how to improve the field
-    for test_name, test_keys in test.iteritems():
-      # Validates the benchmark_config entry
-      validate_test(test_name, test_keys, directory)
-
-      # Map test type to a parsed FrameworkTestType object
-      runTests = dict()
-      for type_name, type_obj in benchmarker.types.iteritems():
-        try:
-          # Makes a FrameWorkTestType object using some of the keys in config
-          # e.g. JsonTestType uses "json_url"
-          runTests[type_name] = type_obj.copy().parse(test_keys)
-        except AttributeError as ae:
-          # This is quite common - most tests don't support all types
-          # Quitely log it and move on (debug logging is on in travis and this causes
-          # ~1500 lines of debug, so I'm totally ignoring it for now
-          # logging.debug("Missing arguments for test type %s for framework test %s", type_name, test_name)
-          pass
-
-      # We need to sort by test_type to run
-      sortedTestKeys = sorted(runTests.keys(), key=test_order)
-      sortedRunTests = OrderedDict()
-      for sortedTestKey in sortedTestKeys:
-        sortedRunTests[sortedTestKey] = runTests[sortedTestKey]
-
-      # Prefix all test names with framework except 'default' test
-      # Done at the end so we may still refer to the primary test as `default` in benchmark config error messages
-      if test_name == 'default':
-        test_name = config['framework']
-      else:
-        test_name = "%s-%s" % (config['framework'], test_name)
-
-      # By passing the entire set of keys, each FrameworkTest will have a member for each key
-      tests.append(FrameworkTest(test_name, directory, benchmarker, sortedRunTests, test_keys))
-
-  return tests
+                                base_url)]
+                    logging.warning(
+                        "Verifying test %s for %s caused an exception: %s",
+                        test_type, self.name, e)
+                    traceback.format_exc()
+
+                test.failed = any(
+                    result == 'fail' for (result, reason, url) in results)
+                test.warned = any(
+                    result == 'warn' for (result, reason, url) in results)
+                test.passed = all(
+                    result == 'pass' for (result, reason, url) in results)
+
+                def output_result(result, reason, url):
+                    specific_rules_url = "http://frameworkbenchmarks.readthedocs.org/en/latest/Project-Information/Framework-Tests/#specific-test-requirements"
+                    color = Fore.GREEN
+                    if result.upper() == "WARN":
+                        color = Fore.YELLOW
+                    elif result.upper() == "FAIL":
+                        color = Fore.RED
+
+                    verification.write((
+                        "   " + color + "%s" + Style.RESET_ALL + " for %s\n") %
+                                       (result.upper(), url))
+                    print("   {!s}{!s}{!s} for {!s}\n".format(
+                        color, result.upper(), Style.RESET_ALL, url))
+                    if reason is not None and len(reason) != 0:
+                        for line in reason.splitlines():
+                            verification.write("     " + line + '\n')
+                            print("     " + line)
+                        if not test.passed:
+                            verification.write(
+                                "     See %s\n" % specific_rules_url)
+                            print("     See {!s}\n".format(specific_rules_url))
+
+                [output_result(r1, r2, url) for (r1, r2, url) in results]
+
+                if test.failed:
+                    self.results.report_verify_results(self, test_type, 'fail')
+                elif test.warned:
+                    self.results.report_verify_results(self, test_type, 'warn')
+                elif test.passed:
+                    self.results.report_verify_results(self, test_type, 'pass')
+                else:
+                    raise Exception(
+                        "Unknown error - test did not pass,warn,or fail")
+
+                verification.flush()
+
+        result = True
+        for test_type in self.runTests:
+            verify_type(test_type)
+            if self.runTests[test_type].failed:
+                result = False
+
+        return result

+ 7 - 16
toolset/benchmark/test_types/cached_query_type.py

@@ -1,21 +1,17 @@
-from benchmark.test_types.framework_test_type import FrameworkTestType
-from benchmark.test_types.verifications import (
-    verify_headers,
-    verify_randomnumber_list,
-    verify_query_cases
-)
+from toolset.benchmark.test_types.framework_test_type import FrameworkTestType
+from toolset.benchmark.test_types.verifications import verify_query_cases
 
 
 
 
 class CachedQueryTestType(FrameworkTestType):
 class CachedQueryTestType(FrameworkTestType):
-
-    def __init__(self):
+    def __init__(self, config):
+        self.cached_query_url = ""
         kwargs = {
         kwargs = {
             'name': 'cached_query',
             'name': 'cached_query',
             'accept_header': self.accept('json'),
             'accept_header': self.accept('json'),
             'requires_db': True,
             'requires_db': True,
             'args': ['cached_query_url']
             'args': ['cached_query_url']
         }
         }
-        FrameworkTestType.__init__(self, **kwargs)
+        FrameworkTestType.__init__(self, config, **kwargs)
 
 
     def get_url(self):
     def get_url(self):
         return self.cached_query_url
         return self.cached_query_url
@@ -30,13 +26,8 @@ class CachedQueryTestType(FrameworkTestType):
         '''
         '''
 
 
         url = base_url + self.cached_query_url
         url = base_url + self.cached_query_url
-        cases = [
-            ('2',   'fail'),
-            ('0',   'fail'),
-            ('foo', 'fail'),
-            ('501', 'warn'),
-            ('',    'fail')
-        ]
+        cases = [('2', 'fail'), ('0', 'fail'), ('foo', 'fail'),
+                 ('501', 'warn'), ('', 'fail')]
 
 
         problems = verify_query_cases(self, cases, url)
         problems = verify_query_cases(self, cases, url)
 
 

+ 16 - 13
toolset/benchmark/test_types/db_type.py

@@ -1,25 +1,24 @@
-from benchmark.test_types.framework_test_type import FrameworkTestType
-from benchmark.test_types.verifications import basic_body_verification, verify_headers, verify_randomnumber_object
-
-import json
+from toolset.benchmark.test_types.framework_test_type import FrameworkTestType
+from toolset.benchmark.test_types.verifications import basic_body_verification, verify_headers, verify_randomnumber_object
 
 
 
 
 class DBTestType(FrameworkTestType):
 class DBTestType(FrameworkTestType):
-
-    def __init__(self):
+    def __init__(self, config):
+        self.db_url = ""
         kwargs = {
         kwargs = {
             'name': 'db',
             'name': 'db',
             'accept_header': self.accept('json'),
             'accept_header': self.accept('json'),
             'requires_db': True,
             'requires_db': True,
             'args': ['db_url']
             'args': ['db_url']
         }
         }
-        FrameworkTestType.__init__(self, **kwargs)
+        FrameworkTestType.__init__(self, config, **kwargs)
 
 
     def get_url(self):
     def get_url(self):
         return self.db_url
         return self.db_url
 
 
     def verify(self, base_url):
     def verify(self, base_url):
-        '''Ensures body is valid JSON with a key 'id' and a key 
+        '''
+        Ensures body is valid JSON with a key 'id' and a key 
         'randomNumber', both of which must map to integers
         'randomNumber', both of which must map to integers
         '''
         '''
 
 
@@ -29,20 +28,24 @@ class DBTestType(FrameworkTestType):
         response, problems = basic_body_verification(body, url)
         response, problems = basic_body_verification(body, url)
 
 
         if len(problems) > 0:
         if len(problems) > 0:
-            return problems 
+            return problems
 
 
         # We are allowing the single-object array
         # We are allowing the single-object array
         # e.g. [{'id':5, 'randomNumber':10}] for now,
         # e.g. [{'id':5, 'randomNumber':10}] for now,
         # but will likely make this fail at some point
         # but will likely make this fail at some point
         if type(response) == list:
         if type(response) == list:
             response = response[0]
             response = response[0]
-            problems.append(
-                ('warn', 'Response is a JSON array. Expected JSON object (e.g. [] vs {})', url))
+            problems.append((
+                'warn',
+                'Response is a JSON array. Expected JSON object (e.g. [] vs {})',
+                url))
 
 
             # Make sure there was a JSON object inside the array
             # Make sure there was a JSON object inside the array
             if type(response) != dict:
             if type(response) != dict:
-                problems.append(
-                    ('fail', 'Response is not a JSON object or an array of JSON objects', url))
+                problems.append((
+                    'fail',
+                    'Response is not a JSON object or an array of JSON objects',
+                    url))
                 return problems
                 return problems
 
 
         # Verify response content
         # Verify response content

+ 18 - 19
toolset/benchmark/test_types/fortune_type.py

@@ -1,24 +1,25 @@
-from benchmark.test_types.framework_test_type import FrameworkTestType
-from benchmark.fortune_html_parser import FortuneHTMLParser
-from benchmark.test_types.verifications import basic_body_verification, verify_headers
+from toolset.benchmark.test_types.framework_test_type import FrameworkTestType
+from toolset.benchmark.fortune_html_parser import FortuneHTMLParser
+from toolset.benchmark.test_types.verifications import basic_body_verification, verify_headers
 
 
 
 
 class FortuneTestType(FrameworkTestType):
 class FortuneTestType(FrameworkTestType):
-
-    def __init__(self):
+    def __init__(self, config):
+        self.fortune_url = ""
         kwargs = {
         kwargs = {
             'name': 'fortune',
             'name': 'fortune',
             'accept_header': self.accept('html'),
             'accept_header': self.accept('html'),
             'requires_db': True,
             'requires_db': True,
             'args': ['fortune_url']
             'args': ['fortune_url']
         }
         }
-        FrameworkTestType.__init__(self, **kwargs)
+        FrameworkTestType.__init__(self, config, **kwargs)
 
 
     def get_url(self):
     def get_url(self):
         return self.fortune_url
         return self.fortune_url
 
 
     def verify(self, base_url):
     def verify(self, base_url):
-        '''Parses the given HTML string and asks the 
+        '''
+        Parses the given HTML string and asks the 
         FortuneHTMLParser whether the parsed string is a 
         FortuneHTMLParser whether the parsed string is a 
         valid fortune response
         valid fortune response
         '''
         '''
@@ -44,13 +45,14 @@ class FortuneTestType(FrameworkTestType):
                 return problems
                 return problems
         else:
         else:
             failures = []
             failures = []
-            failures.append(
-                ('fail', 'Invalid according to FortuneHTMLParser', url))
+            failures.append(('fail', 'Invalid according to FortuneHTMLParser',
+                             url))
             failures += self._parseDiffForFailure(diff, failures, url)
             failures += self._parseDiffForFailure(diff, failures, url)
             return failures
             return failures
 
 
     def _parseDiffForFailure(self, diff, failures, url):
     def _parseDiffForFailure(self, diff, failures, url):
-        '''Example diff:
+        '''
+        Example diff:
 
 
         --- Valid
         --- Valid
         +++ Response
         +++ Response
@@ -73,16 +75,13 @@ class FortuneTestType(FrameworkTestType):
                 elif line[0] == '-':
                 elif line[0] == '-':
                     current_pos.append(line[1:])
                     current_pos.append(line[1:])
                 elif line[0] == '@':
                 elif line[0] == '@':
-                    problems.append(('fail',
-                                     "`%s` should be `%s`" % (
-                                         ''.join(current_neg), ''.join(current_pos)),
-                                     url))
+                    problems.append(('fail', "`%s` should be `%s`" %
+                                     (''.join(current_neg),
+                                      ''.join(current_pos)), url))
             if len(current_pos) != 0:
             if len(current_pos) != 0:
-                problems.append(
-                    ('fail',
-                     "`%s` should be `%s`" % (
-                         ''.join(current_neg), ''.join(current_pos)),
-                     url))
+                problems.append(('fail', "`%s` should be `%s`" %
+                                 (''.join(current_neg),
+                                  ''.join(current_pos)), url))
         except:
         except:
             # If there were errors reading the diff, then no diff information
             # If there were errors reading the diff, then no diff information
             pass
             pass

+ 35 - 22
toolset/benchmark/test_types/framework_test_type.py

@@ -1,9 +1,6 @@
 import copy
 import copy
 import sys
 import sys
-import os
 import json
 import json
-import subprocess
-from subprocess import PIPE
 import requests
 import requests
 import MySQLdb
 import MySQLdb
 import psycopg2
 import psycopg2
@@ -14,11 +11,8 @@ import pymongo
 import logging
 import logging
 logging.getLogger('urllib3').setLevel(logging.CRITICAL)
 logging.getLogger('urllib3').setLevel(logging.CRITICAL)
 
 
-from pprint import pprint
-
 
 
 class FrameworkTestType:
 class FrameworkTestType:
-
     '''
     '''
     Interface between a test type (json, query, plaintext, etc) and 
     Interface between a test type (json, query, plaintext, etc) and 
     the rest of TFB. A test type defines a number of keys it expects
     the rest of TFB. A test type defines a number of keys it expects
@@ -29,7 +23,13 @@ class FrameworkTestType:
     exist a member `X.spam = 'foobar'`. 
     exist a member `X.spam = 'foobar'`. 
     '''
     '''
 
 
-    def __init__(self, name, requires_db=False, accept_header=None, args=[]):
+    def __init__(self,
+                 config,
+                 name,
+                 requires_db=False,
+                 accept_header=None,
+                 args=[]):
+        self.config = config
         self.name = name
         self.name = name
         self.requires_db = requires_db
         self.requires_db = requires_db
         self.args = args
         self.args = args
@@ -47,9 +47,12 @@ class FrameworkTestType:
 
 
     def accept(self, content_type):
     def accept(self, content_type):
         return {
         return {
-            'json': 'application/json,text/html;q=0.9,application/xhtml+xml;q=0.9,application/xml;q=0.8,*/*;q=0.7',
-            'html': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
-            'plaintext': 'text/plain,text/html;q=0.9,application/xhtml+xml;q=0.9,application/xml;q=0.8,*/*;q=0.7'
+            'json':
+            'application/json,text/html;q=0.9,application/xhtml+xml;q=0.9,application/xml;q=0.8,*/*;q=0.7',
+            'html':
+            'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
+            'plaintext':
+            'text/plain,text/html;q=0.9,application/xhtml+xml;q=0.9,application/xml;q=0.8,*/*;q=0.7'
         }[content_type]
         }[content_type]
 
 
     def setup_out(self, out):
     def setup_out(self, out):
@@ -76,7 +79,8 @@ class FrameworkTestType:
             return self
             return self
         else:  # This is quite common - most tests don't support all types
         else:  # This is quite common - most tests don't support all types
             raise AttributeError(
             raise AttributeError(
-                "A %s requires the benchmark_config.json to contain %s" % (self.name, self.args))
+                "A %s requires the benchmark_config.json to contain %s" %
+                (self.name, self.args))
 
 
     def request_headers_and_body(self, url):
     def request_headers_and_body(self, url):
         '''
         '''
@@ -94,7 +98,9 @@ class FrameworkTestType:
         self.out.write(str(headers))
         self.out.write(str(headers))
         self.out.write(body)
         self.out.write(body)
         b = 40
         b = 40
-        print("  Response (trimmed to {:d} bytes): \"{!s}\"".format(b, body.strip()[:b]))
+        print("  Response (trimmed to {:d} bytes): \"{!s}\"".format(
+            b,
+            body.strip()[:b]))
         return headers, body
         return headers, body
 
 
     def verify(self, base_url):
     def verify(self, base_url):
@@ -146,7 +152,9 @@ class FrameworkTestType:
 
 
         if database_name == "mysql":
         if database_name == "mysql":
             try:
             try:
-                db = MySQLdb.connect("TFB-database", "benchmarkdbuser", "benchmarkdbpass", "hello_world")
+                db = MySQLdb.connect(self.config.database_host,
+                                     "benchmarkdbuser", "benchmarkdbpass",
+                                     "hello_world")
                 cursor = db.cursor()
                 cursor = db.cursor()
                 cursor.execute("SELECT * FROM World")
                 cursor.execute("SELECT * FROM World")
                 results = cursor.fetchall()
                 results = cursor.fetchall()
@@ -157,11 +165,12 @@ class FrameworkTestType:
                 print(e)
                 print(e)
         elif database_name == "postgres":
         elif database_name == "postgres":
             try:
             try:
-                db = psycopg2.connect(host="TFB-database",
-                                      port="5432",
-                                      user="benchmarkdbuser",
-                                      password="benchmarkdbpass",
-                                      database="hello_world")
+                db = psycopg2.connect(
+                    host=self.config.database_host,
+                    port="5432",
+                    user="benchmarkdbuser",
+                    password="benchmarkdbpass",
+                    database="hello_world")
                 cursor = db.cursor()
                 cursor = db.cursor()
                 cursor.execute("SELECT * FROM \"World\"")
                 cursor.execute("SELECT * FROM \"World\"")
                 results = cursor.fetchall()
                 results = cursor.fetchall()
@@ -177,20 +186,24 @@ class FrameworkTestType:
         elif database_name == "mongodb":
         elif database_name == "mongodb":
             try:
             try:
                 worlds_json = {}
                 worlds_json = {}
-                connection = pymongo.MongoClient(host="TFB-database")
+                connection = pymongo.MongoClient(
+                    host=self.config.database_host)
                 db = connection.hello_world
                 db = connection.hello_world
                 for world in db.world.find():
                 for world in db.world.find():
                     if "randomNumber" in world:
                     if "randomNumber" in world:
                         if "id" in world:
                         if "id" in world:
-                            worlds_json[str(int(world["id"]))] = int(world["randomNumber"])
+                            worlds_json[str(int(world["id"]))] = int(
+                                world["randomNumber"])
                         elif "_id" in world:
                         elif "_id" in world:
-                            worlds_json[str(int(world["_id"]))] = int(world["randomNumber"])
+                            worlds_json[str(int(world["_id"]))] = int(
+                                world["randomNumber"])
                 results_json.append(worlds_json)
                 results_json.append(worlds_json)
                 connection.close()
                 connection.close()
             except Exception as e:
             except Exception as e:
                 print("ERROR: Unable to load current MongoDB World table.")
                 print("ERROR: Unable to load current MongoDB World table.")
                 print(e)
                 print(e)
         else:
         else:
-            raise ValueError("Database: {!s} does not exist".format(database_name))
+            raise ValueError(
+                "Database: {!s} does not exist".format(database_name))
 
 
         return results_json
         return results_json

+ 7 - 12
toolset/benchmark/test_types/json_type.py

@@ -1,29 +1,24 @@
-from benchmark.test_types.framework_test_type import FrameworkTestType
-from benchmark.test_types.verifications import (
-    basic_body_verification,
-    verify_headers,
-    verify_helloworld_object
-)
-
-import json
+from toolset.benchmark.test_types.framework_test_type import FrameworkTestType
+from toolset.benchmark.test_types.verifications import basic_body_verification, verify_headers, verify_helloworld_object
 
 
 
 
 class JsonTestType(FrameworkTestType):
 class JsonTestType(FrameworkTestType):
-
-    def __init__(self):
+    def __init__(self, config):
+        self.json_url = ""
         kwargs = {
         kwargs = {
             'name': 'json',
             'name': 'json',
             'accept_header': self.accept('json'),
             'accept_header': self.accept('json'),
             'requires_db': False,
             'requires_db': False,
             'args': ['json_url']
             'args': ['json_url']
         }
         }
-        FrameworkTestType.__init__(self, **kwargs)
+        FrameworkTestType.__init__(self, config, **kwargs)
 
 
     def get_url(self):
     def get_url(self):
         return self.json_url
         return self.json_url
 
 
     def verify(self, base_url):
     def verify(self, base_url):
-        '''Validates the response is a JSON object of 
+        '''
+        Validates the response is a JSON object of 
         { 'message' : 'hello, world!' }. Case insensitive and 
         { 'message' : 'hello, world!' }. Case insensitive and 
         quoting style is ignored
         quoting style is ignored
         '''
         '''

+ 9 - 9
toolset/benchmark/test_types/plaintext_type.py

@@ -1,17 +1,17 @@
-from benchmark.test_types.framework_test_type import FrameworkTestType
-from benchmark.test_types.verifications import basic_body_verification, verify_headers
+from toolset.benchmark.test_types.framework_test_type import FrameworkTestType
+from toolset.benchmark.test_types.verifications import basic_body_verification, verify_headers
 
 
 
 
 class PlaintextTestType(FrameworkTestType):
 class PlaintextTestType(FrameworkTestType):
-
-    def __init__(self):
+    def __init__(self, config):
+        self.plaintext_url = ""
         kwargs = {
         kwargs = {
             'name': 'plaintext',
             'name': 'plaintext',
             'requires_db': False,
             'requires_db': False,
             'accept_header': self.accept('plaintext'),
             'accept_header': self.accept('plaintext'),
             'args': ['plaintext_url']
             'args': ['plaintext_url']
         }
         }
-        FrameworkTestType.__init__(self, **kwargs)
+        FrameworkTestType.__init__(self, config, **kwargs)
 
 
     def verify(self, base_url):
     def verify(self, base_url):
         url = base_url + self.plaintext_url
         url = base_url + self.plaintext_url
@@ -29,15 +29,15 @@ class PlaintextTestType(FrameworkTestType):
         extra_bytes = len(body) - len(expected)
         extra_bytes = len(body) - len(expected)
 
 
         if expected not in body:
         if expected not in body:
-            return [('fail', "Could not find 'Hello, World!' in response.", url)]
+            return [('fail', "Could not find 'Hello, World!' in response.",
+                     url)]
 
 
         if extra_bytes > 0:
         if extra_bytes > 0:
             problems.append(
             problems.append(
                 ('warn',
                 ('warn',
                  ("Server is returning %s more bytes than are required. "
                  ("Server is returning %s more bytes than are required. "
-                  "This may negatively affect benchmark performance."
-                  % (extra_bytes)),
-                 url))
+                  "This may negatively affect benchmark performance." %
+                  (extra_bytes)), url))
 
 
         problems += verify_headers(headers, url, should_be='plaintext')
         problems += verify_headers(headers, url, should_be='plaintext')
 
 

+ 7 - 18
toolset/benchmark/test_types/query_type.py

@@ -1,23 +1,17 @@
-from benchmark.test_types.framework_test_type import FrameworkTestType
-from benchmark.test_types.verifications import (
-    verify_headers,
-    verify_randomnumber_list,
-    verify_query_cases
-)
-
-import json
+from toolset.benchmark.test_types.framework_test_type import FrameworkTestType
+from toolset.benchmark.test_types.verifications import verify_query_cases
 
 
 
 
 class QueryTestType(FrameworkTestType):
 class QueryTestType(FrameworkTestType):
-
-    def __init__(self):
+    def __init__(self, config):
+        self.query_url = ""
         kwargs = {
         kwargs = {
             'name': 'query',
             'name': 'query',
             'accept_header': self.accept('json'),
             'accept_header': self.accept('json'),
             'requires_db': True,
             'requires_db': True,
             'args': ['query_url']
             'args': ['query_url']
         }
         }
-        FrameworkTestType.__init__(self, **kwargs)
+        FrameworkTestType.__init__(self, config, **kwargs)
 
 
     def get_url(self):
     def get_url(self):
         return self.query_url
         return self.query_url
@@ -32,13 +26,8 @@ class QueryTestType(FrameworkTestType):
         '''
         '''
 
 
         url = base_url + self.query_url
         url = base_url + self.query_url
-        cases = [
-            ('2',   'fail'),
-            ('0',   'fail'),
-            ('foo', 'fail'),
-            ('501', 'warn'),
-            ('',    'fail')
-        ]
+        cases = [('2', 'fail'), ('0', 'fail'), ('foo', 'fail'),
+                 ('501', 'warn'), ('', 'fail')]
 
 
         problems = verify_query_cases(self, cases, url)
         problems = verify_query_cases(self, cases, url)
 
 

+ 9 - 13
toolset/benchmark/test_types/update_type.py

@@ -1,23 +1,24 @@
-from benchmark.test_types.framework_test_type import FrameworkTestType
-from benchmark.test_types.verifications import verify_query_cases
+from toolset.benchmark.test_types.framework_test_type import FrameworkTestType
+from toolset.benchmark.test_types.verifications import verify_query_cases
 
 
 
 
 class UpdateTestType(FrameworkTestType):
 class UpdateTestType(FrameworkTestType):
-
-    def __init__(self):
+    def __init__(self, config):
+        self.update_url = ""
         kwargs = {
         kwargs = {
             'name': 'update',
             'name': 'update',
             'accept_header': self.accept('json'),
             'accept_header': self.accept('json'),
             'requires_db': True,
             'requires_db': True,
             'args': ['update_url', 'database']
             'args': ['update_url', 'database']
         }
         }
-        FrameworkTestType.__init__(self, **kwargs)
+        FrameworkTestType.__init__(self, config, **kwargs)
 
 
     def get_url(self):
     def get_url(self):
         return self.update_url
         return self.update_url
 
 
     def verify(self, base_url):
     def verify(self, base_url):
-        '''Validates the response is a JSON array of 
+        '''
+        Validates the response is a JSON array of 
         the proper length, each JSON Object in the array 
         the proper length, each JSON Object in the array 
         has keys 'id' and 'randomNumber', and these keys 
         has keys 'id' and 'randomNumber', and these keys 
         map to integers. Case insensitive and 
         map to integers. Case insensitive and 
@@ -25,13 +26,8 @@ class UpdateTestType(FrameworkTestType):
         '''
         '''
 
 
         url = base_url + self.update_url
         url = base_url + self.update_url
-        cases = [
-            ('2',   'fail'),
-            ('0',   'fail'),
-            ('foo', 'fail'),
-            ('501', 'warn'),
-            ('',    'fail')
-        ]
+        cases = [('2', 'fail'), ('0', 'fail'), ('foo', 'fail'),
+                 ('501', 'warn'), ('', 'fail')]
         problems = verify_query_cases(self, cases, url, True)
         problems = verify_query_cases(self, cases, url, True)
 
 
         if len(problems) == 0:
         if len(problems) == 0:

+ 90 - 89
toolset/benchmark/test_types/verifications.py

@@ -1,6 +1,5 @@
 import json
 import json
 import re
 import re
-import math
 
 
 
 
 def basic_body_verification(body, url, is_json_check=True):
 def basic_body_verification(body, url, is_json_check=True):
@@ -48,24 +47,23 @@ def verify_headers(headers, url, should_be='json'):
 
 
     problems = []
     problems = []
 
 
-    for v in (v for v in ('Server', 'Date', 'Content-Type') if v.lower() not in headers):
-        problems.append(
-            ('warn', 'Required response header missing: %s' % v, url))
+    for v in (v for v in ('Server', 'Date', 'Content-Type')
+              if v.lower() not in headers):
+        problems.append(('warn', 'Required response header missing: %s' % v,
+                         url))
 
 
-    if all(v.lower() not in headers for v in ('Content-Length', 'Transfer-Encoding')):
-        problems.append(
-            ('warn',
-             'Required response size header missing, please include either "Content-Length" or "Transfer-Encoding"',
-             url))
+    if all(v.lower() not in headers
+           for v in ('Content-Length', 'Transfer-Encoding')):
+        problems.append((
+            'warn',
+            'Required response size header missing, please include either "Content-Length" or "Transfer-Encoding"',
+            url))
 
 
     content_type = headers.get('Content-Type', None)
     content_type = headers.get('Content-Type', None)
 
 
     if content_type is None:
     if content_type is None:
-        problems.append(
-            ('warn',
-             'No content encoding found, expected \"%s\"' % (
-                 expected_type),
-             url))
+        problems.append(('warn', 'No content encoding found, expected \"%s\"' %
+                         (expected_type), url))
     else:
     else:
         # Split out "charset=utf-8" if it's included
         # Split out "charset=utf-8" if it's included
         content_type_list = re.split('; *', content_type.lower())
         content_type_list = re.split('; *', content_type.lower())
@@ -73,31 +71,25 @@ def verify_headers(headers, url, should_be='json'):
         # "text/html" requires charset to be set. The others do not
         # "text/html" requires charset to be set. The others do not
         if expected_type == types['html']:
         if expected_type == types['html']:
             if expected_type not in content_type_list:
             if expected_type not in content_type_list:
-                problems.append(
-                    ('warn',
-                     'Unexpected content encoding, found \"%s\", expected \"%s\".' % (
-                         content_type, expected_type + '; ' + charset),
-                     url))
+                problems.append((
+                    'warn',
+                    'Unexpected content encoding, found \"%s\", expected \"%s\".'
+                    % (content_type, expected_type + '; ' + charset), url))
             elif charset not in content_type_list:
             elif charset not in content_type_list:
-                problems.append(
-                    ('warn',
-                     ('The \"%s\" content type requires \"charset=utf-8\" to be specified.'
-                      % expected_type),
-                     url))
+                problems.append(('warn', (
+                    'The \"%s\" content type requires \"charset=utf-8\" to be specified.'
+                    % expected_type), url))
         else:
         else:
             if expected_type not in content_type_list:
             if expected_type not in content_type_list:
-                problems.append(
-                    ('warn',
-                     'Unexpected content encoding, found \"%s\", expected \"%s\"' % (
-                         content_type, expected_type),
-                     url))
+                problems.append((
+                    'warn',
+                    'Unexpected content encoding, found \"%s\", expected \"%s\"'
+                    % (content_type, expected_type), url))
             elif charset in content_type_list:
             elif charset in content_type_list:
-                problems.append(
-                    ('warn',
-                     ("Content encoding found in \"%s\" where \"%s\" is acceptable.\n"
-                      "Additional response bytes may negatively affect benchmark performance."
-                      % (content_type, expected_type)),
-                     url))
+                problems.append(('warn', (
+                    "Content encoding found in \"%s\" where \"%s\" is acceptable.\n"
+                    "Additional response bytes may negatively affect benchmark performance."
+                    % (content_type, expected_type)), url))
     return problems
     return problems
 
 
 
 
@@ -110,8 +102,7 @@ def verify_helloworld_object(json_object, url):
     problems = []
     problems = []
 
 
     # Make everything case insensitive
     # Make everything case insensitive
-    json_object = {k.lower(): v.lower()
-                   for k, v in json_object.iteritems()}
+    json_object = {k.lower(): v.lower() for k, v in json_object.iteritems()}
 
 
     if 'message' not in json_object:
     if 'message' not in json_object:
         return [('fail', "Missing required key 'message'", url)]
         return [('fail', "Missing required key 'message'", url)]
@@ -121,14 +112,19 @@ def verify_helloworld_object(json_object, url):
             additional = (', ').join(
             additional = (', ').join(
                 [k for k in json_object.keys() if k != 'message'])
                 [k for k in json_object.keys() if k != 'message'])
             problems.append(
             problems.append(
-                ('warn', "Too many JSON key/value pairs, consider removing: %s" % additional, url))
+                ('warn', "Too many JSON key/value pairs, consider removing: %s"
+                 % additional, url))
         if json_len > 27:
         if json_len > 27:
             problems.append(
             problems.append(
-                'warn', "%s additional response byte(s) found. Consider removing unnecessary whitespace." % (json_len - 26))
+                'warn',
+                "%s additional response byte(s) found. Consider removing unnecessary whitespace."
+                % (json_len - 26))
         message = json_object['message']
         message = json_object['message']
 
 
         if message != 'hello, world!':
         if message != 'hello, world!':
-            return [('fail', "Expected message of 'hello, world!', got '%s'" % message, url)]
+            return [('fail',
+                     "Expected message of 'hello, world!', got '%s'" % message,
+                     url)]
         return problems
         return problems
 
 
 
 
@@ -148,7 +144,8 @@ def verify_randomnumber_object(db_object, url, max_infraction='fail'):
         got = str(db_object)[:20]
         got = str(db_object)[:20]
         if len(str(db_object)) > 20:
         if len(str(db_object)) > 20:
             got = str(db_object)[:17] + '...'
             got = str(db_object)[:17] + '...'
-        return [(max_infraction, "Expected a JSON object, got '%s' instead" % got, url)]
+        return [(max_infraction,
+                 "Expected a JSON object, got '%s' instead" % got, url)]
 
 
     # Make keys case insensitive
     # Make keys case insensitive
     db_object = {k.lower(): v for k, v in db_object.iteritems()}
     db_object = {k.lower(): v for k, v in db_object.iteritems()}
@@ -156,15 +153,14 @@ def verify_randomnumber_object(db_object, url, max_infraction='fail'):
 
 
     for v in (v for v in required_keys if v not in db_object):
     for v in (v for v in required_keys if v not in db_object):
         problems.append(
         problems.append(
-            (max_infraction, 'Response object was missing required key: %s' % v, url))
+            (max_infraction,
+             'Response object was missing required key: %s' % v, url))
 
 
     if len(db_object) > len(required_keys):
     if len(db_object) > len(required_keys):
         extras = set(db_object.keys()) - required_keys
         extras = set(db_object.keys()) - required_keys
         problems.append(
         problems.append(
-            ('warn',
-             'An extra key(s) is being included with the db object: %s' % ', '.join(
-                 extras),
-             url))
+            ('warn', 'An extra key(s) is being included with the db object: %s'
+             % ', '.join(extras), url))
 
 
     # All required keys must be present
     # All required keys must be present
     if len(problems) > 0:
     if len(problems) > 0:
@@ -175,31 +171,37 @@ def verify_randomnumber_object(db_object, url, max_infraction='fail'):
         o_id = int(db_object['id'])
         o_id = int(db_object['id'])
 
 
         if o_id > 10000 or o_id < 1:
         if o_id > 10000 or o_id < 1:
-            problems.append(
-                ('warn',
-                 'Response key id should be between 1 and 10,000: ' +
-                 str(o_id),
-                 url))
+            problems.append((
+                'warn',
+                'Response key id should be between 1 and 10,000: ' + str(o_id),
+                url))
     except TypeError as e:
     except TypeError as e:
         problems.append(
         problems.append(
-            (max_infraction, "Response key 'id' does not map to an integer - %s" % e, url))
+            (max_infraction,
+             "Response key 'id' does not map to an integer - %s" % e, url))
 
 
     try:
     try:
         o_rn = int(db_object['randomnumber'])
         o_rn = int(db_object['randomnumber'])
 
 
         if o_rn > 10000:
         if o_rn > 10000:
-            problems.append(
-                ('warn',
-                 'Response key `randomNumber` is over 10,000. This may negatively affect performance by sending extra bytes',
-                 url))
+            problems.append((
+                'warn',
+                'Response key `randomNumber` is over 10,000. This may negatively affect performance by sending extra bytes',
+                url))
     except TypeError as e:
     except TypeError as e:
         problems.append(
         problems.append(
-            (max_infraction, "Response key 'randomnumber' does not map to an integer - %s" % e, url))
+            (max_infraction,
+             "Response key 'randomnumber' does not map to an integer - %s" % e,
+             url))
 
 
     return problems
     return problems
 
 
 
 
-def verify_randomnumber_list(expected_len, headers, body, url, max_infraction='fail'):
+def verify_randomnumber_list(expected_len,
+                             headers,
+                             body,
+                             url,
+                             max_infraction='fail'):
     '''
     '''
     Validates that the object is a list containing a number of
     Validates that the object is a list containing a number of
     randomnumber object. Should closely resemble:
     randomnumber object. Should closely resemble:
@@ -215,21 +217,20 @@ def verify_randomnumber_list(expected_len, headers, body, url, max_infraction='f
     # rather than a list containing one element. We allow this with a warn,
     # rather than a list containing one element. We allow this with a warn,
     # then verify the supplied object
     # then verify the supplied object
     if type(response) is not list:
     if type(response) is not list:
-        problems.append(
-            ('warn', 'Top-level JSON is an object, not an array', url))
+        problems.append(('warn', 'Top-level JSON is an object, not an array',
+                         url))
         problems += verify_randomnumber_object(response, url, max_infraction)
         problems += verify_randomnumber_object(response, url, max_infraction)
         return problems
         return problems
 
 
     if any(type(item) is not dict for item in response):
     if any(type(item) is not dict for item in response):
         problems.append(
         problems.append(
-            (max_infraction, 'Not all items in the JSON array were JSON objects', url))
+            (max_infraction,
+             'Not all items in the JSON array were JSON objects', url))
 
 
     if len(response) != expected_len:
     if len(response) != expected_len:
-        problems.append(
-            (max_infraction,
-             "JSON array length of %s != expected length of %s" % (
-                 len(response), expected_len),
-             url))
+        problems.append((max_infraction,
+                         "JSON array length of %s != expected length of %s" %
+                         (len(response), expected_len), url))
 
 
     # Verify individual objects, arbitrarily stop after 5 bad ones are found
     # Verify individual objects, arbitrarily stop after 5 bad ones are found
     # i.e. to not look at all 500
     # i.e. to not look at all 500
@@ -249,11 +250,12 @@ def verify_randomnumber_list(expected_len, headers, body, url, max_infraction='f
 
 
     return problems
     return problems
 
 
+
 def verify_updates(old_worlds, new_worlds, updates_expected, url):
 def verify_updates(old_worlds, new_worlds, updates_expected, url):
     '''
     '''
     Validates that the /updates requests actually updated values in the database and didn't
     Validates that the /updates requests actually updated values in the database and didn't
     just return a JSON list of the correct number of World items.
     just return a JSON list of the correct number of World items.
-sz
+
     old_worlds  a JSON object containing the state of the Worlds table BEFORE the /updates requests
     old_worlds  a JSON object containing the state of the Worlds table BEFORE the /updates requests
     new_worlds  a JSON object containing the state of the Worlds table AFTER the /updates requests
     new_worlds  a JSON object containing the state of the Worlds table AFTER the /updates requests
     If no items were updated, this validation test returns a "fail."
     If no items were updated, this validation test returns a "fail."
@@ -270,28 +272,30 @@ sz
         for i in range(1, 10001):
         for i in range(1, 10001):
             try:
             try:
                 entry_id = str(i)
                 entry_id = str(i)
-                if entry_id in old_worlds[n] and entry_id  in new_worlds[n]:
+                if entry_id in old_worlds[n] and entry_id in new_worlds[n]:
                     if old_worlds[n][entry_id] != new_worlds[n][entry_id]:
                     if old_worlds[n][entry_id] != new_worlds[n][entry_id]:
                         successful_updates += 1
                         successful_updates += 1
             except Exception as e:
             except Exception as e:
-                print e
+                print(e)
         n += 1
         n += 1
 
 
     if successful_updates == 0:
     if successful_updates == 0:
-        problems.append(
-            ("fail", "No items were updated in the database.", url))
+        problems.append(("fail", "No items were updated in the database.",
+                         url))
     elif successful_updates <= (updates_expected * 0.90):
     elif successful_updates <= (updates_expected * 0.90):
-        problems.append(
-            ("fail", "Only %s items were updated in the database out of roughly %s expected." % (successful_updates, updates_expected), url))
+        problems.append((
+            "fail",
+            "Only %s items were updated in the database out of roughly %s expected."
+            % (successful_updates, updates_expected), url))
     elif successful_updates <= (updates_expected * 0.95):
     elif successful_updates <= (updates_expected * 0.95):
-        problems.append(
-            ("warn",
-             "There may have been an error updating the database. Only %s items were updated in the database out of the roughly %s expected." % (
-                 successful_updates, updates_expected),
-             url))
+        problems.append((
+            "warn",
+            "There may have been an error updating the database. Only %s items were updated in the database out of the roughly %s expected."
+            % (successful_updates, updates_expected), url))
 
 
     return problems
     return problems
 
 
+
 def verify_query_cases(self, cases, url, check_updates=False):
 def verify_query_cases(self, cases, url, check_updates=False):
     '''
     '''
     The /updates and /queries tests accept a `queries` parameter
     The /updates and /queries tests accept a `queries` parameter
@@ -336,8 +340,8 @@ def verify_query_cases(self, cases, url, check_updates=False):
             else:
             else:
                 expected_len = queries
                 expected_len = queries
 
 
-            problems += verify_randomnumber_list(
-                expected_len, headers, body, case_url, max_infraction)
+            problems += verify_randomnumber_list(expected_len, headers, body,
+                                                 case_url, max_infraction)
             problems += verify_headers(headers, case_url)
             problems += verify_headers(headers, case_url)
 
 
             # Only check update changes if we are doing an Update verification and if we're testing
             # Only check update changes if we are doing an Update verification and if we're testing
@@ -346,7 +350,8 @@ def verify_query_cases(self, cases, url, check_updates=False):
             # previously held
             # previously held
             if check_updates and queries >= MAX:
             if check_updates and queries >= MAX:
                 world_db_after = self.get_current_world_table()
                 world_db_after = self.get_current_world_table()
-                problems += verify_updates(world_db_before, world_db_after, MAX, case_url)
+                problems += verify_updates(world_db_before, world_db_after,
+                                           MAX, case_url)
 
 
         except ValueError:
         except ValueError:
             warning = (
             warning = (
@@ -355,15 +360,11 @@ def verify_query_cases(self, cases, url, check_updates=False):
                 '(this will be a failure in future rounds, please fix)')
                 '(this will be a failure in future rounds, please fix)')
 
 
             if body is None:
             if body is None:
-                problems.append(
-                    (max_infraction,
-                     warning % ('No response', q),
-                     case_url))
+                problems.append((max_infraction, warning % ('No response', q),
+                                 case_url))
             elif len(body) == 0:
             elif len(body) == 0:
-                problems.append(
-                    (max_infraction,
-                     warning % ('Empty response', q),
-                     case_url))
+                problems.append((max_infraction, warning % ('Empty response',
+                                                            q), case_url))
             else:
             else:
                 expected_len = 1
                 expected_len = 1
                 # Strictness will be upped in a future round, i.e. Frameworks currently do not have
                 # Strictness will be upped in a future round, i.e. Frameworks currently do not have

+ 0 - 241
toolset/benchmark/utils.py

@@ -1,241 +0,0 @@
-import ConfigParser
-import os
-import glob
-import json
-import socket
-import fnmatch
-
-from ast import literal_eval
-
-def find_docker_file(path, pattern):
-    for root, dirs, files in os.walk(path):
-        for name in files:
-            if fnmatch.fnmatch(name, pattern):
-                return os.path.join(root, name)
-
-def gather_docker_dependencies(docker_file):
-    '''
-    Gathers all the known docker dependencies for the given docker image.
-    '''
-    # Avoid setting up a circular import
-    from setup.linux import setup_util
-    deps = []
-
-    docker_dir = os.path.join(setup_util.get_fwroot(), "toolset", "setup", "linux", "docker")
-
-    if os.path.exists(docker_file):
-        with open(docker_file) as fp:
-            for line in fp.readlines():
-                tokens = line.strip().split(' ')
-                if tokens[0] == "FROM":
-                    # This is magic that our base image points to
-                    if tokens[1] != "ubuntu:16.04":
-                        depToken = tokens[1].strip().split(':')[0].strip().split('/')[1]
-                        deps.append(depToken)
-                        dep_docker_file = os.path.join(os.path.dirname(docker_file), depToken + ".dockerfile")
-                        if not os.path.exists(dep_docker_file):
-                            dep_docker_file = find_docker_file(docker_dir, depToken + ".dockerfile")
-                        deps.extend(gather_docker_dependencies(dep_docker_file))
-
-    return deps
-
-
-def gather_langauges():
-    '''
-    Gathers all the known languages in the suite via the folder names
-    beneath FWROOT.
-    '''
-    # Avoid setting up a circular import
-    from setup.linux import setup_util
-
-    lang_dir = os.path.join(setup_util.get_fwroot(), "frameworks")
-    langs = []
-    for dir in glob.glob(os.path.join(lang_dir, "*")):
-        langs.append(dir.replace(lang_dir,"")[1:])
-    return langs
-
-def gather_tests(include = [], exclude=[], benchmarker=None):
-    '''
-    Given test names as strings, returns a list of FrameworkTest objects.
-    For example, 'aspnet-mysql-raw' turns into a FrameworkTest object with
-    variables for checking the test directory, the test database os, and
-    other useful items.
-
-    With no arguments, every test in this framework will be returned.
-    With include, only tests with this exact name will be returned.
-    With exclude, all tests but those excluded will be returned.
-
-    A benchmarker is needed to construct full FrameworkTest objects. If
-    one is not provided, a default Benchmarker will be created.
-    '''
-
-    # Avoid setting up a circular import
-    from benchmark import framework_test
-    from benchmark.benchmarker import Benchmarker
-    from setup.linux import setup_util
-
-    # Help callers out a bit
-    if include is None:
-        include = []
-    if exclude is None:
-        exclude = []
-
-    # Old, hacky method to exclude all tests was to
-    # request a test known to not exist, such as ''.
-    # If test '' was requested, short-circuit and return
-    # nothing immediately
-    if len(include) == 1 and '' in include:
-        return []
-
-    # Setup default Benchmarker using example configuration
-    if benchmarker is None:
-        default_config = setup_util.get_fwroot() + "/benchmark.cfg"
-        config = ConfigParser.SafeConfigParser()
-        config.readfp(open(default_config))
-        defaults = dict(config.items("Defaults"))
-
-        # Convert strings into proper python types
-        for k,v in defaults.iteritems():
-            try:
-                defaults[k] = literal_eval(v)
-            except Exception:
-                pass
-
-        # Ensure we only run the __init__ method of Benchmarker
-        defaults['install'] = None
-        defaults['results_name'] = "(unspecified, datetime = %Y-%m-%d %H:%M:%S)"
-        defaults['results_environment'] = "My Server Environment"
-        defaults['test_dir'] = None
-        defaults['test_lang'] = None
-        defaults['quiet'] = True
-
-        benchmarker = Benchmarker(defaults)
-
-
-    # Search for configuration files
-    fwroot = setup_util.get_fwroot()
-    config_files = []
-
-    if benchmarker.test_lang:
-        benchmarker.test_dir = []
-        for lang in benchmarker.test_lang:
-            if os.path.exists("{!s}/frameworks/{!s}".format(fwroot, lang)):
-                for test_dir in os.listdir("{!s}/frameworks/{!s}".format(fwroot, lang)):
-                    benchmarker.test_dir.append("{!s}/{!s}".format(lang, test_dir))
-            else:
-                raise Exception("Unable to locate language directory: {!s}".format(lang))
-
-    if benchmarker.test_dir:
-        for test_dir in benchmarker.test_dir:
-            dir_config_files = glob.glob("{!s}/frameworks/{!s}/benchmark_config.json".format(fwroot, test_dir))
-            if len(dir_config_files):
-                config_files.extend(dir_config_files)
-            else:
-                raise Exception("Unable to locate tests in test-dir: {!s}".format(test_dir))
-    else:
-        config_files.extend(glob.glob("{!s}/frameworks/*/*/benchmark_config.json".format(fwroot)))
-
-    tests = []
-    for config_file_name in config_files:
-        config = None
-        with open(config_file_name, 'r') as config_file:
-            try:
-                config = json.load(config_file)
-            except ValueError:
-                # User-friendly errors
-                print("Error loading '{!s}'.".format(config_file_name))
-                raise
-
-        # Find all tests in the config file
-        config_tests = framework_test.parse_config(config,
-                                                   os.path.dirname(config_file_name), benchmarker)
-
-        # Filter
-        for test in config_tests:
-            if len(include) is 0 and len(exclude) is 0:
-                # No filters, we are running everything
-                tests.append(test)
-            elif test.name in exclude:
-                continue
-            elif test.name in include:
-                tests.append(test)
-            else:
-                # An include list exists, but this test is
-                # not listed there, so we ignore it
-                pass
-
-    # Ensure we were able to locate everything that was
-    # explicitly included
-    if 0 != len(include):
-        names = {test.name for test in tests}
-        if 0 != len(set(include) - set(names)):
-            missing = list(set(include) - set(names))
-            raise Exception("Unable to locate tests %s" % missing)
-
-    tests.sort(key=lambda x: x.name)
-    return tests
-
-def gather_frameworks(include = [], exclude=[], benchmarker=None):
-    '''Return a dictionary mapping frameworks->[test1,test2,test3]
-    for quickly grabbing all tests in a grouped manner.
-    Args have the same meaning as gather_tests'''
-
-    tests = gather_tests(include, exclude, benchmarker)
-    frameworks = dict()
-
-    for test in tests:
-        if test.framework not in frameworks:
-            frameworks[test.framework] = []
-        frameworks[test.framework].append(test)
-    return frameworks
-
-def header(message, top='-', bottom='-'):
-    '''
-    Generates a clean header
-    '''
-    topheader = (top * 80)[:80]
-    bottomheader = (bottom * 80)[:80]
-    result = ""
-    if topheader != "":
-        result += "%s" % topheader
-    if message != "":
-        if result == "":
-            result = "  %s" % message
-        else:
-            result += "\n  %s" % message
-    if bottomheader != "":
-        if result == "":
-            result = "%s" % bottomheader
-        else:
-            result += "\n%s" % bottomheader
-    return result + '\n'
-
-def check_services(services):
-
-    def check_service(address, port):
-        try:
-            s = socket.socket()
-            s.settimeout(20)
-            s.connect((address, port))
-            return (True, "")
-        except Exception as ex:
-            return (False, ex)
-        finally:
-            s.close
-
-    res = []
-    for s in services:
-        r = check_service(s[1], s[2])
-        res.append((s[0], r[0], str(r[1])))
-    return res
-
-def verify_database_connections(services):
-    allGo = True
-    messages = []
-    for r in check_services(services):
-        if r[1]:
-            messages.append(r[0] + ": is GO!")
-        else:
-            messages.append(r[0] + ": is _NO_ GO!: ERROR: " + r[2])
-            allGo = False
-    return (allGo, messages)

+ 10 - 9
toolset/continuous/tasks/keep-logs.py

@@ -1,18 +1,16 @@
-#!/usr/bin/python
 #
 #
-# Archives, to the specified folder, the logged output generated by a benchmark 
-# run.  
+# Archives, to the specified folder, the logged output generated by a benchmark
+# run.
 #
 #
 # @author A. Shawn Bandy
 # @author A. Shawn Bandy
 import os
 import os
 import zipfile
 import zipfile
 import datetime
 import datetime
 import requests
 import requests
-import shutil
 # Follows closely from:
 # Follows closely from:
 # http://stackoverflow.com/a/34153816
 # http://stackoverflow.com/a/34153816
 #
 #
-# Paths to the log folders are generated by TFB and where those files 
+# Paths to the log folders are generated by TFB and where those files
 # should be archived.
 # should be archived.
 #
 #
 path_in = os.path.abspath(os.path.normpath(os.path.expanduser(os.path.join( \
 path_in = os.path.abspath(os.path.normpath(os.path.expanduser(os.path.join( \
@@ -24,15 +22,15 @@ path_out = os.path.abspath(os.path.join(os.environ['TFB_LOGSFOLDER'], \
     dt_folder))
     dt_folder))
 
 
 if not os.path.exists(path_out):
 if not os.path.exists(path_out):
-  os.makedirs(path_out)
+    os.makedirs(path_out)
 
 
 zip_path = path_out + '/results.zip'
 zip_path = path_out + '/results.zip'
 
 
 zip_file = zipfile.ZipFile(zip_path, 'w', zipfile.ZIP_DEFLATED)
 zip_file = zipfile.ZipFile(zip_path, 'w', zipfile.ZIP_DEFLATED)
 
 
 for root, dirs, files in os.walk(path_in):
 for root, dirs, files in os.walk(path_in):
-  for file in files:
-    zip_file.write(os.path.join(root, file))
+    for file in files:
+        zip_file.write(os.path.join(root, file))
 
 
 zip_file.close()
 zip_file.close()
 
 
@@ -40,4 +38,7 @@ results_upload_uri = os.environ['TFB_UPLOADURI']
 
 
 if results_upload_uri != None:
 if results_upload_uri != None:
     with open(zip_path, 'rb') as file_to_upload:
     with open(zip_path, 'rb') as file_to_upload:
-        requests.post(results_upload_uri, headers={ 'Content-Type': 'application/zip' }, data=file_to_upload)
+        requests.post(
+            results_upload_uri,
+            headers={'Content-Type': 'application/zip'},
+            data=file_to_upload)

+ 1 - 1
toolset/continuous/tasks/run-benchmarks.sh

@@ -5,4 +5,4 @@
 echo Change to benchmark root
 echo Change to benchmark root
 cd $TFB_REPOPARENT/$TFB_REPONAME
 cd $TFB_REPOPARENT/$TFB_REPONAME
 echo Running tests
 echo Running tests
-toolset/run-tests.py
+PYTHONPATH=$TFB_REPOPARENT/$TFB_REPONAME python toolset/run-tests.py

+ 1 - 1
toolset/continuous/tasks/run-tasks.sh

@@ -20,4 +20,4 @@ $TFB_REPOPARENT/$TFB_REPONAME/toolset/continuous/tasks/run-benchmarks.sh
 #  Tasks after the run   #
 #  Tasks after the run   #
 ##########################
 ##########################
 
 
-$TFB_REPOPARENT/$TFB_REPONAME/toolset/continuous/tasks/keep-logs.py
+PYTHONPATH=$TFB_REPOPARENT/$TFB_REPONAME python $TFB_REPOPARENT/$TFB_REPONAME/toolset/continuous/tasks/keep-logs.py

+ 0 - 105
toolset/initializer.py

@@ -1,105 +0,0 @@
-import subprocess, os
-from setup.linux import setup_util
-
-DEVNULL = open(os.devnull, 'w')
-
-def initialize(args):
-  fwroot = setup_util.get_fwroot()
-  dbuser = args.database_user
-  dbhost = args.database_host
-  dbiden = args.database_identity_file
-  cluser = args.client_user
-  clhost = args.client_host
-  cliden = args.client_identity_file
-  aphost = args.server_host
-
-  # test ssh connections to all the machines
-  client_conn = __check_connection(cluser, clhost, cliden, aphost)
-  database_conn = __check_connection(dbuser, dbhost, dbiden, aphost)
-
-  conn_success = client_conn and database_conn
-  if not conn_success and not args.quiet:
-    return __print_failure()
-  
-  # set up client machine
-  if not __init_client(fwroot, cluser, clhost, cliden, args.quiet) and not args.quiet:
-    return __print_failure()
-
-
-  # set up database software
-  if not __init_database(fwroot, dbuser, dbhost, dbiden, args.quiet) and not args.quiet:
-    return __print_failure()
-
-def __print_failure():
-  print("""
--------------------------------------------------------------------------------
-  This wizard is intended to help configure the required software on all the
-  machines in the ecosystem specified in benchmark.cfg.
-
-  Note: It is expected that you have already set up passwordless-sudo on all
-  of the machines (app, database, client) as well as identity file based 
-  authentication and hostname setup in your hosts file. 
-  More information on this required setup can be found at:
-
-  frameworkbenchmarks.readthedocs.io/en/latest/Development/Installation-Guide/
-
-  Please ensure that your benchmark.cfg is correctly configured as well as all
-  of the machines (app, database, client).
--------------------------------------------------------------------------------""")
-
-def __ssh_string(user, host, identity_file):
-  return ["ssh", "-T", "-o", "StrictHostKeyChecking=no", "%s@%s" % (user, host), "-i", identity_file]
-  
-def __check_connection(user, host, identity_file, app_host):
-  ''' 
-  Checks that the given user and host are accessible via ssh with the given
-  identity file and have the the following permissions:
-    1. passwordless sudo
-    2. ability to ssh back to app machine
-  '''
-  client_conn = True
-  try:
-    p = subprocess.Popen(__ssh_string(user, host, identity_file), 
-      stdin=subprocess.PIPE, stdout=DEVNULL, stderr=DEVNULL)
-    p.communicate("ssh -T -o StrictHostKeyChecking=no %s" % app_host)
-    if p.returncode:
-      client_conn = False
-  except Exception as e:
-    client_conn = False
-  return client_conn
-
-def __init_client(fwroot, user, host, identity_file, quiet):
-  '''
-  Initializes and configures the software required to run the suite on the 
-  client machine.
-  '''
-  if not quiet:
-    print("INSTALL: Installing client software")
-  with open (os.path.join(fwroot, "toolset", "setup", "linux", "client.sh"), "r") as myfile:
-    remote_script=myfile.read()
-    if quiet:
-      p = subprocess.Popen(__ssh_string(user, host, identity_file), 
-        stdin=subprocess.PIPE, stdout=DEVNULL, stderr=DEVNULL)
-    else:
-      p = subprocess.Popen(__ssh_string(user, host, identity_file), 
-        stdin=subprocess.PIPE)
-    p.communicate(remote_script)
-    return p.returncode == 0
-
-def __init_database(fwroot, user, host, identity_file, quiet):
-  '''
-  Initializes and configures the software required to run the suite on the
-  database machine.
-  '''
-  if not quiet:
-    print("INSTALL: Installing database software")
-  with open(os.path.join(fwroot, "toolset", "setup", "linux", "database.sh"), "r") as myfile:
-    remote_script=myfile.read()
-    if quiet:
-      p = subprocess.Popen(__ssh_string(user, host, identity_file), 
-        stdin=subprocess.PIPE, stdout=DEVNULL, stderr=DEVNULL)
-    else:
-      p = subprocess.Popen(__ssh_string(user, host, identity_file), 
-        stdin=subprocess.PIPE)
-    p.communicate(remote_script)
-    return p.returncode == 0

+ 175 - 59
toolset/run-tests.py

@@ -1,37 +1,45 @@
-#!/usr/bin/env python
 import argparse
 import argparse
 import ConfigParser
 import ConfigParser
 import socket
 import socket
 import sys
 import sys
-import time
 import os
 import os
 import platform
 import platform
 import multiprocessing
 import multiprocessing
-import itertools
-import copy
-from benchmark.benchmarker import Benchmarker
-from setup.linux.unbuffered import Unbuffered
-from setup.linux import setup_util
-from scaffolding import Scaffolding
-from initializer import initialize
+from toolset.benchmark.benchmarker import Benchmarker
+from toolset.utils import setup_util
+from toolset.utils.unbuffered import Unbuffered
+from toolset.utils.scaffolding import Scaffolding
+from toolset.utils.initializer import initialize
+from toolset.utils import cleaner
+from toolset.utils.results_helper import Results
+from toolset.utils.benchmark_config import BenchmarkConfig
+from toolset.utils import docker_helper
+from toolset.utils.metadata_helper import gather_tests
 from ast import literal_eval
 from ast import literal_eval
 
 
 # Enable cross-platform colored output
 # Enable cross-platform colored output
 from colorama import init
 from colorama import init
 init()
 init()
 
 
+
 class StoreSeqAction(argparse.Action):
 class StoreSeqAction(argparse.Action):
-    '''Helper class for parsing a sequence from the command line'''
+    '''
+    Helper class for parsing a sequence from the command line
+    '''
+
     def __init__(self, option_strings, dest, nargs=None, **kwargs):
     def __init__(self, option_strings, dest, nargs=None, **kwargs):
-        super(StoreSeqAction, self).__init__(option_strings, dest, type=str, **kwargs)
+        super(StoreSeqAction, self).__init__(
+            option_strings, dest, type=str, **kwargs)
+
     def __call__(self, parser, namespace, values, option_string=None):
     def __call__(self, parser, namespace, values, option_string=None):
         setattr(namespace, self.dest, self.parse_seq(values))
         setattr(namespace, self.dest, self.parse_seq(values))
+
     def parse_seq(self, argument):
     def parse_seq(self, argument):
         result = argument.split(',')
         result = argument.split(',')
         sequences = [x for x in result if ":" in x]
         sequences = [x for x in result if ":" in x]
         for sequence in sequences:
         for sequence in sequences:
             try:
             try:
-                (start,step,end) = sequence.split(':')
+                (start, step, end) = sequence.split(':')
             except ValueError:
             except ValueError:
                 print("  Invalid: {!s}".format(sequence))
                 print("  Invalid: {!s}".format(sequence))
                 print("  Requires start:step:end, e.g. 1:2:10")
                 print("  Requires start:step:end, e.g. 1:2:10")
@@ -45,7 +53,8 @@ class StoreSeqAction(argparse.Action):
 # Main
 # Main
 ###################################################################################################
 ###################################################################################################
 def main(argv=None):
 def main(argv=None):
-    ''' Runs the program. There are three ways to pass arguments
+    '''
+    Runs the program. There are three ways to pass arguments
     1) environment variables TFB_*
     1) environment variables TFB_*
     2) configuration file benchmark.cfg
     2) configuration file benchmark.cfg
     3) command line flags
     3) command line flags
@@ -57,7 +66,7 @@ def main(argv=None):
         argv = sys.argv
         argv = sys.argv
 
 
     # Enable unbuffered output so messages will appear in the proper order with subprocess output.
     # Enable unbuffered output so messages will appear in the proper order with subprocess output.
-    sys.stdout=Unbuffered(sys.stdout)
+    sys.stdout = Unbuffered(sys.stdout)
 
 
     # Update python environment
     # Update python environment
     # 1) Ensure the current directory (which should be the benchmark home directory) is in the path so that the tests can be imported.
     # 1) Ensure the current directory (which should be the benchmark home directory) is in the path so that the tests can be imported.
@@ -69,7 +78,9 @@ def main(argv=None):
     os.environ['FWROOT'] = setup_util.get_fwroot()
     os.environ['FWROOT'] = setup_util.get_fwroot()
     os.environ['IROOT'] = os.environ['FWROOT'] + '/installs'
     os.environ['IROOT'] = os.environ['FWROOT'] + '/installs'
     # 'Ubuntu', '14.04', 'trusty' respectively
     # 'Ubuntu', '14.04', 'trusty' respectively
-    os.environ['TFB_DISTRIB_ID'], os.environ['TFB_DISTRIB_RELEASE'], os.environ['TFB_DISTRIB_CODENAME'] = platform.linux_distribution()
+    os.environ['TFB_DISTRIB_ID'], os.environ[
+        'TFB_DISTRIB_RELEASE'], os.environ[
+            'TFB_DISTRIB_CODENAME'] = platform.linux_distribution()
     # App server cpu count
     # App server cpu count
     os.environ['CPU_COUNT'] = str(multiprocessing.cpu_count())
     os.environ['CPU_COUNT'] = str(multiprocessing.cpu_count())
 
 
@@ -78,21 +89,29 @@ def main(argv=None):
         formatter_class=argparse.RawDescriptionHelpFormatter,
         formatter_class=argparse.RawDescriptionHelpFormatter,
         add_help=False)
         add_help=False)
     conf_parser.add_argument(
     conf_parser.add_argument(
-        '--conf_file', default='benchmark.cfg', metavar='FILE',
-        help='Optional configuration file to provide argument defaults. All config options can be overridden using the command line.')
+        '--conf_file',
+        default='benchmark.cfg',
+        metavar='FILE',
+        help=
+        'Optional configuration file to provide argument defaults. All config options can be overridden using the command line.'
+    )
     args, remaining_argv = conf_parser.parse_known_args()
     args, remaining_argv = conf_parser.parse_known_args()
 
 
     defaults = {}
     defaults = {}
     try:
     try:
-        if not os.path.exists(os.path.join(os.environ['FWROOT'], args.conf_file)) and not os.path.exists(os.path.join(os.environ['FWROOT'] + 'benchmark.cfg')):
+        if not os.path.exists(
+                os.path.join(
+                    os.environ['FWROOT'],
+                    args.conf_file)) and not os.path.exists(
+                        os.path.join(os.environ['FWROOT'] + 'benchmark.cfg')):
             print("No config file found. Aborting!")
             print("No config file found. Aborting!")
             exit(1)
             exit(1)
-        with open (os.path.join(os.environ['FWROOT'], args.conf_file)):
+        with open(os.path.join(os.environ['FWROOT'], args.conf_file)):
             config = ConfigParser.SafeConfigParser()
             config = ConfigParser.SafeConfigParser()
             config.read([os.path.join(os.environ['FWROOT'], args.conf_file)])
             config.read([os.path.join(os.environ['FWROOT'], args.conf_file)])
             defaults.update(dict(config.items("Defaults")))
             defaults.update(dict(config.items("Defaults")))
             # Convert strings into proper python types
             # Convert strings into proper python types
-            for k, v in defaults.iteritems():
+            for k, v in defaults.items():
                 try:
                 try:
                     defaults[k] = literal_eval(v)
                     defaults[k] = literal_eval(v)
                 except Exception:
                 except Exception:
@@ -126,10 +145,12 @@ def main(argv=None):
     ##########################################################
     ##########################################################
     # Set up argument parser
     # Set up argument parser
     ##########################################################
     ##########################################################
-    parser = argparse.ArgumentParser(description="Install or run the Framework Benchmarks test suite.",
-                                     parents=[conf_parser],
-                                     formatter_class=argparse.ArgumentDefaultsHelpFormatter,
-                                     epilog='''If an argument includes (type int-sequence), then it accepts integer lists in multiple forms.
+    parser = argparse.ArgumentParser(
+        description="Install or run the Framework Benchmarks test suite.",
+        parents=[conf_parser],
+        formatter_class=argparse.ArgumentDefaultsHelpFormatter,
+        epilog=
+        '''If an argument includes (type int-sequence), then it accepts integer lists in multiple forms.
         Using a single number e.g. 5 will create a list [5]. Using commas will create a list containing those
         Using a single number e.g. 5 will create a list [5]. Using commas will create a list containing those
         values e.g. 1,3,6 creates [1, 3, 6]. Using three colon-separated numbers of start:step:end will create a
         values e.g. 1,3,6 creates [1, 3, 6]. Using three colon-separated numbers of start:step:end will create a
         list, using the semantics of python's range function, e.g. 1:3:15 creates [1, 4, 7, 10, 13] while
         list, using the semantics of python's range function, e.g. 1:3:15 creates [1, 4, 7, 10, 13] while
@@ -137,57 +158,152 @@ def main(argv=None):
         ''')
         ''')
 
 
     # Install options
     # Install options
-    parser.add_argument('--init', action='store_true', default=False, help='Initializes the benchmark environment')
+    parser.add_argument(
+        '--init',
+        action='store_true',
+        default=False,
+        help='Initializes the benchmark environment')
 
 
     # Suite options
     # Suite options
-    parser.add_argument('--clean', action='store_true', default=False, help='Removes the results directory')
-    parser.add_argument('--new', action='store_true', default=False, help='Initialize a new framework test')
-    parser.add_argument('-v', '--verbose', action='store_true', default=False, help='Causes the configuration to print before any other commands are executed.')
-    parser.add_argument('--quiet', action='store_true', default=False, help='Only print a limited set of messages to stdout, keep the bulk of messages in log files only')
-    parser.add_argument('--results-name', help='Gives a name to this set of results, formatted as a date', default='(unspecified, datetime = %Y-%m-%d %H:%M:%S)')
-    parser.add_argument('--results-environment', help='Describes the environment in which these results were gathered', default='(unspecified, hostname = %s)' % socket.gethostname())
-    parser.add_argument('--results-upload-uri', default=None, help='A URI where the in-progress results.json file will be POSTed periodically')
-    parser.add_argument('--parse', help='Parses the results of the given timestamp and merges that with the latest results')
+    parser.add_argument(
+        '--build',
+        nargs='+',
+        help='Builds the dockerfile(s) for the given test(s)')
+    parser.add_argument(
+        '--clean',
+        action='store_true',
+        default=False,
+        help='Removes the results directory')
+    parser.add_argument(
+        '--new',
+        action='store_true',
+        default=False,
+        help='Initialize a new framework test')
+    parser.add_argument(
+        '-v',
+        '--verbose',
+        action='store_true',
+        default=False,
+        help=
+        'Causes the configuration to print before any other commands are executed.'
+    )
+    parser.add_argument(
+        '--quiet',
+        action='store_true',
+        default=False,
+        help=
+        'Only print a limited set of messages to stdout, keep the bulk of messages in log files only'
+    )
+    parser.add_argument(
+        '--results-name',
+        help='Gives a name to this set of results, formatted as a date',
+        default='(unspecified, datetime = %Y-%m-%d %H:%M:%S)')
+    parser.add_argument(
+        '--results-environment',
+        help='Describes the environment in which these results were gathered',
+        default='(unspecified, hostname = %s)' % socket.gethostname())
+    parser.add_argument(
+        '--results-upload-uri',
+        default=None,
+        help=
+        'A URI where the in-progress results.json file will be POSTed periodically'
+    )
+    parser.add_argument(
+        '--parse',
+        help=
+        'Parses the results of the given timestamp and merges that with the latest results'
+    )
 
 
     # Test options
     # Test options
     parser.add_argument('--test', nargs='+', help='names of tests to run')
     parser.add_argument('--test', nargs='+', help='names of tests to run')
-    parser.add_argument('--test-dir', nargs='+', dest='test_dir', help='name of framework directory containing all tests to run')
-    parser.add_argument('--test-lang', nargs='+', dest='test_lang', help='name of language directory containing all tests to run')
-    parser.add_argument('--exclude', nargs='+', help='names of tests to exclude')
-    parser.add_argument('--type', choices=['all', 'json', 'db', 'query', 'cached_query', 'fortune', 'update', 'plaintext'], default='all', help='which type of test to run')
-    parser.add_argument('-m', '--mode', choices=['benchmark', 'verify', 'debug'], default='benchmark', help='verify mode will only start up the tests, curl the urls and shutdown. debug mode will skip verification and leave the server running.')
-    parser.add_argument('--list-tests', action='store_true', default=False, help='lists all the known tests that can run')
+    parser.add_argument(
+        '--test-dir',
+        nargs='+',
+        dest='test_dir',
+        help='name of framework directory containing all tests to run')
+    parser.add_argument(
+        '--test-lang',
+        nargs='+',
+        dest='test_lang',
+        help='name of language directory containing all tests to run')
+    parser.add_argument(
+        '--exclude', nargs='+', help='names of tests to exclude')
+    parser.add_argument(
+        '--type',
+        choices=[
+            'all', 'json', 'db', 'query', 'cached_query', 'fortune', 'update',
+            'plaintext'
+        ],
+        default='all',
+        help='which type of test to run')
+    parser.add_argument(
+        '-m',
+        '--mode',
+        choices=['benchmark', 'verify', 'debug'],
+        default='benchmark',
+        help=
+        'verify mode will only start up the tests, curl the urls and shutdown. debug mode will skip verification and leave the server running.'
+    )
+    parser.add_argument(
+        '--list-tests',
+        action='store_true',
+        default=False,
+        help='lists all the known tests that can run')
 
 
     # Benchmark options
     # Benchmark options
-    parser.add_argument('--duration', default=15, help='Time in seconds that each test should run for.')
-    parser.add_argument('--sleep', type=int, default=60, help='the amount of time to sleep after starting each test to allow the server to start up.')
+    parser.add_argument(
+        '--duration',
+        default=15,
+        help='Time in seconds that each test should run for.')
+    parser.add_argument(
+        '--sleep',
+        type=int,
+        default=60,
+        help=
+        'the amount of time to sleep after starting each test to allow the server to start up.'
+    )
 
 
-    parser.set_defaults(**defaults) # Must do this after add, or each option's default will override the configuration file default
+    parser.set_defaults(**defaults)
+    # Must do this after add, or each option's default will override the configuration file default
     args = parser.parse_args(remaining_argv)
     args = parser.parse_args(remaining_argv)
 
 
-    if args.new:
-        Scaffolding().scaffold()
-        return 0
+    config = BenchmarkConfig(vars(args))
+    results = Results(config)
+
+    if config.new:
+        Scaffolding()
 
 
-    if args.init:
-        initialize(args)
-        return 0
+    elif config.init:
+        initialize(config)
 
 
-    benchmarker = Benchmarker(vars(args))
+    elif config.build:
+        docker_helper.build(config, config.build, None)
 
 
-    if args.clean:
-        benchmarker.clean_all()
-        return 0
+    elif config.clean:
+        cleaner.clean(results)
+        docker_helper.clean()
+
+    elif config.list_tests:
+        all_tests = gather_tests(benchmarker_config=config)
+
+        for test in all_tests:
+            print(test.name)
+
+    elif config.parse != None:
+        # TODO: broken
+        all_tests = gather_tests(benchmarker_config=config)
+
+        for test in all_tests:
+            test.parse_all()
+
+        results.parse(all_tests)
 
 
-    # Run the benchmarker in the specified mode
-    #   Do not use benchmarker variables for these checks,
-    #   they are either str or bool based on the python version
-    if args.list_tests:
-        benchmarker.run_list_tests()
-    elif args.parse != None:
-        benchmarker.parse_timestamp()
     else:
     else:
+        benchmarker = Benchmarker(config, results)
         return benchmarker.run()
         return benchmarker.run()
 
 
+    return 0
+
+
 if __name__ == "__main__":
 if __name__ == "__main__":
     sys.exit(main())
     sys.exit(main())

+ 0 - 348
toolset/scaffolding.py

@@ -1,348 +0,0 @@
-# -*- coding: utf-8 -*-
-
-import sys
-import os
-import imp
-from shutil import copytree
-from setup.linux.setup_util import replace_text
-from benchmark.utils import gather_frameworks, gather_langauges
-
-class Scaffolding:
-  def scaffold(self):
-    print("""
--------------------------------------------------------------------------------
-    This wizard is intended to help build the scaffolding required for a new 
-    test to be benchmarked.
-
-    From here, you will be prompted for values related to the test you
-    wish to add.
--------------------------------------------------------------------------------""")
-
-    try:
-      self.__gather_display_name()
-      self.__gather_language()
-      self.__gather_approach()
-      self.__gather_classification()
-      self.__gather_orm()
-      self.__gather_webserver()
-      self.__gather_versus()
-      self.__confirm_values()
-      self.__print_success()
-    except:
-      print("")
-
-  def __gather_display_name(self):
-    print("""
-  The name of your test as you wish it to be displayed on the results page.
-
-  Example: Gemini, Gin, Express
-    """)
-    self.__prompt_display_name()
-    while not self.display_name:
-      self.__prompt_display_name()
-    self.name = self.display_name.lower()
-
-  def __prompt_display_name(self):
-    self.display_name = raw_input("Name: ").strip()
-
-    found = False
-    for framework in gather_frameworks():
-      if framework.lower() == self.display_name.lower():
-        found = True
-
-    if found:
-      print("""
-  It appears that there is already a '%s' framework in the test suite. You will
-  have to pick a different name.
-      """ % self.display_name)
-      self.display_name = None
-
-  def __gather_language(self):
-    print("""
-  The language in which your test implementation is written.
-
-  Example: Java, Go, PHP
-    """)
-    self.language = None
-    while not self.language:
-      self.__prompt_language()
-
-  def __prompt_language(self):
-    self.language = raw_input("Language: ").strip()
-
-    known_languages = gather_langauges()
-    language = None
-    for lang in known_languages:
-      if lang.lower() == self.language.lower():
-        language = lang
-
-    if not language:
-      similar = []
-      for lang in known_languages:
-        if lang.lower()[:1] == self.language.lower()[:1]:
-          similar.append(lang)
-      similar = ', '.join(similar)
-
-      print("""
-  That language is not currently in our list of known languages.
-  
-  Here is a list of similar languages present in our benchmark suite that you
-  may have meant:
-
-  %s
-      
-  Did you mean to add the new language, '%s', to the benchmark suite?
-      """ % (similar, self.language))
-      valid = self.__prompt_confirm_new_language(known_languages)
-      while not valid:
-        valid = self.__prompt_confirm_new_language(known_languages)
-
-      if self.confirm_new_lang == 'n':
-        self.language = None
-      else:
-        self.language = self.language.title()
-
-    return self.language
-
-  def __prompt_confirm_new_language(self, known_languages):
-    self.confirm_new_lang = raw_input("Create New Language '%s' (y/n): " % self.language).strip().lower()
-    return self.confirm_new_lang == 'y' or self.confirm_new_lang == 'n'
-
-  def __gather_approach(self):
-    print("""
-  The approach of your test implementation.
-
-  1) Realistic: Uses the framework with most out-of-the-box functionality 
-                enabled. We consider this realistic because most applications 
-                built with the framework will leave these features enabled.
-  2) Stripped:  Removes or outright avoids implementing features that are
-                unnecessary for the particulars of the benchmark exercise. This
-                might illuminate the marginal improvement available in fine-
-                tuning a framework to your application's use-case.
-
-  Note: If you are unsure, then your approach is probably Realistic. The
-        Stripped approach is seldom used and will not have results displayed
-        by default on the results website.
-    """)
-    valid = self.__prompt_approach()
-    while not valid:
-      valid = self.__prompt_approach()
-
-  def __prompt_approach(self):
-    self.approach = raw_input("Approach [1/2]: ").strip()
-    if self.approach == '1':
-      self.approach = 'Realistic'
-    if self.approach == '2':
-      self.approach = 'Stripped'
-    return self.approach == 'Realistic' or self.approach == 'Stripped'
-
-  def __gather_classification(self):
-    print("""
-  The classification of your test implementation.
-
-  1) Fullstack: Robust framework expected to provide high-level functionality 
-                for serving as a web application; for example, ability to 
-                compose views, provide functions for responding with several 
-                data types (json, html, etc), connecting to a database, form 
-                processing, etc.
-  2) Micro:     Simple framework expected to provide enough middleware to build
-                a robust web application such as request routing and some 
-                simple plumbing, but may not include built-in functionality 
-                such as, for example, server-composed views.
-  3) Platform:  Barebones infrastructure for servicing HTTP requests, but does
-                not include a framework at all.
-    """)
-    valid = self.__prompt_classification()
-    while not valid:
-      valid = self.__prompt_classification()
-    if self.classification == 'Platform':
-      self.platform = 'None'
-      self.framework = 'None'
-    else:
-      self.framework = self.display_name
-      self.__gather_platform()
-
-  def __prompt_classification(self):
-    self.classification = raw_input("Classification [1/2/3]: ").strip()
-    if self.classification == '1':
-      self.classification = 'Fullstack'
-    if self.classification == '2':
-      self.classification = 'Micro'
-    if self.classification == '3':
-      self.classification = 'Platform'
-    return self.classification == 'Fullstack' or \
-           self.classification == 'Micro' or \
-           self.classification == 'Platform'
-
-  def __gather_platform(self):
-    print("""
-  The platform of your test implementation.
-
-  The platform is the low-level software or API used to host web applications 
-  for the framework; the platform provides an implementation of the HTTP
-  fundamentals.
-
-  Not all frameworks have a platform and if your programming language provides
-  much of that by which we define a platform, leave black.
-
-  Example: Servlet, Wai, .NET
-    """)
-    self.__prompt_platform()
-    
-  def __prompt_platform(self):
-    self.platform = raw_input("Platform (optional): ").strip()
-    if self.platform == '':
-      self.platform = 'None'
-
-  def __gather_orm(self):
-    print("""
-  How you would classify the ORM (object relational mapper) of your test?
-
-  1) Full:  A feature-rich ORM which provides functionality for interacting 
-            with a database without writing a query in all but the most edge 
-            cases.
-  2) Micro: An ORM which provides functionality for interacting with a database
-            for many trivial operations (querying, updating), but not more 
-            robust cases (for example, gathering relations).
-  3) Raw:   No ORM; raw database access.
-    """)
-    valid = self.__prompt_orm()
-    while not valid:
-      valid = self.__prompt_orm()
-
-  def __prompt_orm(self):
-    self.orm = raw_input("ORM [1/2/3]: ").strip()
-    if self.orm == '1':
-      self.orm = 'Full'
-    if self.orm == '2':
-      self.orm = 'Micro'
-    if self.orm == '3':
-      self.orm = 'Raw'
-    return self.orm == 'Full' or \
-           self.orm == 'Micro' or \
-           self.orm == 'Raw'
-
-  def __gather_webserver(self):
-    print("""
-  Name of the front-end webserver sitting in front of your test implementation.
-
-  Your test implementation may not use a web-server and may act as its own; you
-  can leave this blank in this case.
-
-  Example: nginx, Meinheld, httplight
-    """)
-    self.__prompt_webserver()
-
-  def __prompt_webserver(self):
-    self.webserver = raw_input("Webserver (optional): ").strip()
-    if self.webserver == '':
-      self.webserver = 'None'
-
-  def __gather_versus(self):
-    print("""
-  The name of another test (elsewhere in this project) that is a subset of this
-  framework.
-  This allows for the generation of the framework efficiency chart in the 
-  results web site.
-  For example, Compojure is compared to "servlet" since Compojure is built on 
-  the Servlet platform.
-
-  Example: Servlet, Wai, Undertow
-    """)
-    self.__prompt_versus()
-
-  def __prompt_versus(self):
-    self.versus = raw_input("Versus (optional): ").strip()
-    if self.versus == '':
-      self.versus = 'None'
-
-  def __confirm_values(self):
-    print("""
-    Name: %s
-    Language: %s
-    Approach: %s
-    Classification: %s
-    Platform: %s
-    ORM: %s
-    Webserver: %s
-    Versus: %s
-
-  Finalize the initialization of your test given the above values?
-
-  Note: once you have initialized your test, you can change these values later.
-    """ % (self.display_name, 
-           self.language, 
-           self.approach, 
-           self.classification, 
-           self.platform,
-           self.orm, 
-           self.webserver, 
-           self.versus))
-
-    valid = self.__prompt_confirmation()
-    while not valid:
-      valid = self.__prompt_confirmation()
-
-    if self.confirmation == 'y':
-      self.__build_scaffolding()
-    else:
-      print('Aborting')
-
-  def __prompt_confirmation(self):
-    self.confirmation = raw_input("Initialize [y/n]: ").strip().lower()
-    return self.confirmation == 'y' or self.confirmation == 'n'
-
-  def __build_scaffolding(self):
-    if self.__create_test_folder():
-      self.__copy_scaffold_files()
-      self.__edit_scaffold_files()
-
-  def __create_test_folder(self):
-    self.language_dir = os.path.join("frameworks", self.language)
-    self.test_dir = os.path.join(self.language_dir, self.name)
-
-    if os.path.exists(self.test_dir):
-      print("Test '%s' already exists; aborting." % self.name)
-      return False
-
-    return True
-
-  def __copy_scaffold_files(self):
-    self.scaffold_dir = os.path.join("toolset","setup","scaffolding")
-    copytree(self.scaffold_dir, self.test_dir)
-
-  def __edit_scaffold_files(self):
-    for file in os.listdir(os.path.join(self.test_dir)):
-      replace_text(os.path.join(self.test_dir, file), "\$NAME", self.name)
-      replace_text(os.path.join(self.test_dir, file), "\$DISPLAY_NAME", self.display_name)
-      replace_text(os.path.join(self.test_dir, file), "\$APPROACH", self.approach)
-      replace_text(os.path.join(self.test_dir, file), "\$CLASSIFICATION", self.classification)
-      replace_text(os.path.join(self.test_dir, file), "\$FRAMEWORK", self.framework)
-      replace_text(os.path.join(self.test_dir, file), "\$LANGUAGE", self.language)
-      replace_text(os.path.join(self.test_dir, file), "\$ORM", self.orm)
-      replace_text(os.path.join(self.test_dir, file), "\$PLATFORM", self.platform)
-      replace_text(os.path.join(self.test_dir, file), "\$WEBSERVER", self.webserver)
-      replace_text(os.path.join(self.test_dir, file), "\$VERSUS", self.versus)
-
-  def __print_success(self):
-    print("""
--------------------------------------------------------------------------------
-  Success!
-
-  Your new test structure has been built to the sepcifications of the suite.
-  Here is a brief run-down of what has been built:
-
-    frameworks
-        └─── %s
-              └─── %s
-                    ├─── .gitignore
-                    ├─── benchmark_config.json
-                    ├─── README.md
-                    ├─── setup.sh
-                    ├─── setup_mysql.sh
-                    └─── source_code
-
-  The next step is to read through your README.md and follow the instructions
-  provided therein.
--------------------------------------------------------------------------------"""
-    % (self.language, self.name))

+ 0 - 1
toolset/setup/__init__.py

@@ -1 +0,0 @@
-# Setup

+ 2 - 0
toolset/setup/linux/docker/base.dockerfile → toolset/setup/docker/base.dockerfile

@@ -24,6 +24,8 @@ ENV LC_ALL en_US.UTF-8
 
 
 ARG CPU_COUNT
 ARG CPU_COUNT
 ARG MAX_CONCURRENCY
 ARG MAX_CONCURRENCY
+ARG TFB_DATABASE
 
 
 ENV CPU_COUNT=$CPU_COUNT
 ENV CPU_COUNT=$CPU_COUNT
 ENV MAX_CONCURRENCY=$MAX_CONCURRENCY
 ENV MAX_CONCURRENCY=$MAX_CONCURRENCY
+ENV DBHOST=$TFB_DATABASE

+ 0 - 0
toolset/setup/linux/docker/databases/mongodb/create.js → toolset/setup/docker/databases/mongodb/create.js


+ 0 - 0
toolset/setup/linux/docker/databases/mongodb/mongodb.dockerfile → toolset/setup/docker/databases/mongodb/mongodb.dockerfile


+ 0 - 0
toolset/setup/linux/docker/databases/mysql/create.sql → toolset/setup/docker/databases/mysql/create.sql


+ 0 - 0
toolset/setup/linux/docker/databases/mysql/my.cnf → toolset/setup/docker/databases/mysql/my.cnf


+ 0 - 0
toolset/setup/linux/docker/databases/mysql/mysql.dockerfile → toolset/setup/docker/databases/mysql/mysql.dockerfile


+ 0 - 0
toolset/setup/linux/docker/databases/mysql/mysql.list → toolset/setup/docker/databases/mysql/mysql.list


+ 0 - 0
toolset/setup/linux/docker/databases/postgres/60-postgresql-shm.conf → toolset/setup/docker/databases/postgres/60-postgresql-shm.conf


+ 0 - 0
toolset/setup/linux/docker/databases/postgres/create-postgres-database.sql → toolset/setup/docker/databases/postgres/create-postgres-database.sql


+ 0 - 0
toolset/setup/linux/docker/databases/postgres/create-postgres.sql → toolset/setup/docker/databases/postgres/create-postgres.sql


+ 0 - 0
toolset/setup/linux/docker/databases/postgres/pg_hba.conf → toolset/setup/docker/databases/postgres/pg_hba.conf


+ 0 - 0
toolset/setup/linux/docker/databases/postgres/postgres.dockerfile → toolset/setup/docker/databases/postgres/postgres.dockerfile


+ 0 - 0
toolset/setup/linux/docker/databases/postgres/postgresql.conf → toolset/setup/docker/databases/postgres/postgresql.conf


+ 0 - 0
toolset/setup/linux/docker/languages/d-lang.dockerfile → toolset/setup/docker/languages/d-lang.dockerfile


+ 0 - 0
toolset/setup/linux/docker/languages/dart-lang.dockerfile → toolset/setup/docker/languages/dart-lang.dockerfile


+ 0 - 0
toolset/setup/linux/docker/languages/elixir.dockerfile → toolset/setup/docker/languages/elixir.dockerfile


+ 0 - 0
toolset/setup/linux/docker/languages/erlang.dockerfile → toolset/setup/docker/languages/erlang.dockerfile


+ 0 - 0
toolset/setup/linux/docker/languages/haskell.dockerfile → toolset/setup/docker/languages/haskell.dockerfile


+ 0 - 0
toolset/setup/linux/docker/languages/java.dockerfile → toolset/setup/docker/languages/java.dockerfile


+ 0 - 0
toolset/setup/linux/docker/languages/java8.dockerfile → toolset/setup/docker/languages/java8.dockerfile


+ 0 - 0
toolset/setup/linux/docker/languages/lua.dockerfile → toolset/setup/docker/languages/lua.dockerfile


+ 0 - 0
toolset/setup/linux/docker/languages/nim.dockerfile → toolset/setup/docker/languages/nim.dockerfile


+ 0 - 0
toolset/setup/linux/docker/languages/ruby-2.4.dockerfile → toolset/setup/docker/languages/ruby-2.4.dockerfile


+ 0 - 0
toolset/setup/linux/docker/languages/rust.dockerfile → toolset/setup/docker/languages/rust.dockerfile


+ 0 - 0
toolset/setup/linux/docker/systools/ant.dockerfile → toolset/setup/docker/systools/ant.dockerfile


+ 0 - 0
toolset/setup/linux/docker/systools/gcc-6.dockerfile → toolset/setup/docker/systools/gcc-6.dockerfile


+ 0 - 0
toolset/setup/linux/docker/systools/luarocks.dockerfile → toolset/setup/docker/systools/luarocks.dockerfile


+ 0 - 0
toolset/setup/linux/docker/systools/maven-java8.dockerfile → toolset/setup/docker/systools/maven-java8.dockerfile


+ 0 - 0
toolset/setup/linux/docker/systools/maven-settings.xml → toolset/setup/docker/systools/maven-settings.xml


+ 0 - 0
toolset/setup/linux/docker/systools/maven.dockerfile → toolset/setup/docker/systools/maven.dockerfile


+ 0 - 0
toolset/setup/linux/docker/systools/nimble.dockerfile → toolset/setup/docker/systools/nimble.dockerfile


+ 0 - 0
toolset/setup/linux/docker/systools/sbt-java8.dockerfile → toolset/setup/docker/systools/sbt-java8.dockerfile


+ 0 - 0
toolset/setup/linux/docker/systools/sbt.dockerfile → toolset/setup/docker/systools/sbt.dockerfile


+ 0 - 0
toolset/setup/linux/docker/webservers/nginx.dockerfile → toolset/setup/docker/webservers/nginx.dockerfile


+ 0 - 0
toolset/setup/linux/docker/webservers/nodejs8.dockerfile → toolset/setup/docker/webservers/nodejs8.dockerfile


+ 0 - 0
toolset/setup/linux/docker/webservers/openresty-server.dockerfile → toolset/setup/docker/webservers/openresty-server.dockerfile


+ 0 - 0
toolset/setup/linux/docker/webservers/resin-java8.dockerfile → toolset/setup/docker/webservers/resin-java8.dockerfile


+ 0 - 0
toolset/setup/linux/docker/webservers/resin.dockerfile → toolset/setup/docker/webservers/resin.dockerfile


+ 0 - 1
toolset/setup/linux/__init__.py

@@ -1 +0,0 @@
-# Linux server and client setup

+ 1 - 0
toolset/setup/linux/database.sh

@@ -29,6 +29,7 @@ sudo add-apt-repository \
    $(lsb_release -cs) \
    $(lsb_release -cs) \
    stable"
    stable"
 sudo apt-get update
 sudo apt-get update
+# TODO: this doesn't work on real machines; only single-server setups which conveniently have docker already
 sudo apt-get -qqy install -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confold"  docker-ce
 sudo apt-get -qqy install -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confold"  docker-ce
 if ! sudo grep -q -E "^docker:" /etc/group; then
 if ! sudo grep -q -E "^docker:" /etc/group; then
   sudo groupadd docker
   sudo groupadd docker

+ 3 - 21
toolset/setup/linux/prerequisites.sh

@@ -22,27 +22,8 @@ sudo apt-get -qqy install -o Dpkg::Options::="--force-confdef" -o Dpkg::Options:
   ca-certificates               `# Needed for Docker on Ubuntu 14` \
   ca-certificates               `# Needed for Docker on Ubuntu 14` \
   curl                          `# Needed for Docker on Ubuntu 14` \
   curl                          `# Needed for Docker on Ubuntu 14` \
   software-properties-common    `# Needed for Docker on Ubuntu 14`
   software-properties-common    `# Needed for Docker on Ubuntu 14`
-  # cmake build-essential automake    `# Needed for building code` \
-  # wget unzip                   `# Common tools` \
-  # mercurial                `# Version control systems` \
-  # libpcre3 libpcre3-dev libpcrecpp0 `# Regular expression support` \
-  # libssl-dev libcurl4-openssl-dev   `# SSL libraries` \
-  # zlib1g-dev \
-  # libreadline6-dev \
-  # libbz2-dev \
-  # libyaml-dev libxml2-dev \
-  # libxslt-dev libgdbm-dev ncurses-dev  \
-  # libffi-dev htop libtool bison libevent-dev \
-  # libgstreamer-plugins-base0.10-0 libgstreamer0.10-0 \
-  # liborc-0.4-0 libwxbase2.8-0 libwxgtk2.8-0 libgnutls-dev \
-  # libjson0-dev libmcrypt-dev libicu-dev gettext \
-  #  mlton \
-  # re2c libnuma-dev
 
 
-sudo pip install colorama==0.3.1
-# Version 2.3 has a nice Counter() and other features
-# but it requires --allow-external and --allow-unverified
-sudo pip install progressbar==2.2 requests MySQL-python psycopg2 pymongo
+sudo pip install colorama==0.3.1 requests MySQL-python psycopg2-binary pymongo
 
 
 #
 #
 # Install Docker
 # Install Docker
@@ -76,10 +57,11 @@ sudo sh -c "echo '*            soft    rtprio             99' >> /etc/security/l
 
 
 # Create a tfb command alias for running the toolset
 # Create a tfb command alias for running the toolset
 # For now, this still ensures you have to be in the framework root to run it
 # For now, this still ensures you have to be in the framework root to run it
+export PWD=$(pwd)
 sudo tee /etc/profile.d/tfb.sh <<EOF
 sudo tee /etc/profile.d/tfb.sh <<EOF
 #!/bin/bash
 #!/bin/bash
 tfb() {
 tfb() {
-  $(pwd)/toolset/run-tests.py "\$@"
+  PYTHONPATH=$PWD python $PWD/toolset/run-tests.py "\$@"
 }
 }
 EOF
 EOF
 source /etc/profile.d/tfb.sh
 source /etc/profile.d/tfb.sh

+ 0 - 13
toolset/setup/linux/unbuffered.py

@@ -1,13 +0,0 @@
-# Wrapper for unbuffered stream writing.
-# http://stackoverflow.com/a/107717/376366
-# Used to make sure print output appears in the correct order
-# in log files when spawning subprocesses.
-
-class Unbuffered:
-  def __init__(self, stream):
-    self.stream = stream
-  def write(self, data):
-    self.stream.write(data)
-    self.stream.flush()
-  def __getattr__(self, attr):
-    return getattr(self.stream, attr)

+ 0 - 25
toolset/setup/sqlserver/setup-sqlserver-bootstrap.ps1

@@ -1,25 +0,0 @@
-# To download and run this script, open an elevated Command Prompt and then run:
-#
-# powershell -ExecutionPolicy Bypass -Command "iex (New-Object Net.WebClient).DownloadString('https://raw.github.com/TechEmpower/FrameworkBenchmarks/master/toolset/setup/sqlserver/setup-sqlserver-bootstrap.ps1')"
-
-$basedir = "C:\FrameworkBenchmarks"
-$rawRepo = "https://raw.github.com/TechEmpower/FrameworkBenchmarks/master"
-
-$config_url = $rawRepo + "/config"
-$config_local = $basedir + "\config"
-$setup_sqlserver_url = $rawRepo + "/toolset/setup/sqlserver/setup-sqlserver.ps1"
-$setup_sqlserver_local = $basedir + "\setup-sqlserver.ps1"
-$create_sqlserver_login_and_database_url = $config_url + "/create-sqlserver-login-and-database.sql"
-$create_sqlserver_login_and_database_local = $config_local + "/create-sqlserver-login-and-database.sql"
-$create_sqlserver_url = $config_url + "/create-sqlserver.sql"
-$create_sqlserver_local = $config_local + "/create-sqlserver.sql"
-
-Write-Host "Creating directory: $config_local`n"
-New-Item -Path $config_local -Type Directory -Force | Out-Null
-
-Write-Host "Downloading setup files...`n"
-(New-Object System.Net.WebClient).DownloadFile($setup_sqlserver_url, $setup_sqlserver_local)
-(New-Object System.Net.WebClient).DownloadFile($create_sqlserver_login_and_database_url, $create_sqlserver_login_and_database_local)
-(New-Object System.Net.WebClient).DownloadFile($create_sqlserver_url, $create_sqlserver_local)
-
-powershell -ExecutionPolicy Bypass -File $setup_sqlserver_local

+ 0 - 91
toolset/setup/sqlserver/setup-sqlserver.ps1

@@ -1,91 +0,0 @@
-# This script downloads and installs SQL Server and opens it on port 1433.
-#
-# To run this script, run an elevated Command Prompt and enter:
-#
-# powershell -ExecutionPolicy Bypass -File <this script's filename>
-
-$basedir = "C:\FrameworkBenchmarks"
-$workdir = "$basedir\installs"
-New-Item -Path $workdir -Type directory -Force | Out-Null
-
-If (-Not (Get-Service | ? Name -Eq "MSSQLSERVER")) {
-
-  Write-Host "Could not find default SQL Server instance, MSSQLSERVER."
-  Write-Host "Downloading SQL Server (several GBs)..."
-
-  # URLs from http://www.microsoft.com/en-us/download/details.aspx?id=35575
-
-  $sqlserver_exe_url = "http://download.microsoft.com/download/3/B/D/3BD9DD65-D3E3-43C3-BB50-0ED850A82AD5/SQLServer2012SP1-FullSlipstream-x64-ENU.exe"
-  $sqlserver_exe_local = "$workdir\SQLServer2012SP1-FullSlipstream-x64-ENU.exe"
-  (New-Object System.Net.WebClient).DownloadFile($sqlserver_exe_url, $sqlserver_exe_local)
-
-  $sqlserver_box_url = "http://download.microsoft.com/download/3/B/D/3BD9DD65-D3E3-43C3-BB50-0ED850A82AD5/SQLServer2012SP1-FullSlipstream-x64-ENU.box"
-  $sqlserver_box_local = "$workdir\SQLServer2012SP1-FullSlipstream-x64-ENU.box"
-  (New-Object System.Net.WebClient).DownloadFile($sqlserver_box_url, $sqlserver_box_local)
-
-  Write-Host "Installing SQL Server..."
-
-  # Install only the SQL Server database engine.
-  # Use a default instance name.
-  # Make %COMPUTERNAME%\Administrators have administrative rights.
-  
-  # The following is not used because this is done in PowerShell below.
-  #   /securitymode=SQL /sapwd=S3cr3tS3cr3t /TCPENABLED=1
-  #   Allow Windows Authentication or old-style SQL authentication.
-  #   The password of the sa account is specified.
-  #   SQL Server will be listening on TCP port 1433.
-  #
-  Start-Process "$sqlserver_exe_local" "/q /action=install /features=SQLEngine /INSTANCENAME=MSSQLSERVER /SQLSYSADMINACCOUNTS=Administrators /IACCEPTSQLSERVERLICENSETERMS" -Wait
-}
-
-# In case we just installed SQL Server and the environment variables haven't been refreshed, manually
-# refresh PSModulePath so that Import-Module sqlps will work.
-
-$env:PSModulePath = [Environment]::GetEnvironmentVariable("PSModulePath", [System.EnvironmentVariableTarget]::Machine)
-
-Import-Module sqlps
-
-Write-Host "Setting SQL Server to start on boot..."
-
-Set-Service MSSQLSERVER -StartupType Automatic
-
-Write-Host "Ensuring that SQL Server is started..."
-
-Start-Service MSSQLSERVER
-
-Write-Host "Enabling SQL authentication..."
-
-# Enable SQL authentication
-$s = New-Object ('Microsoft.SqlServer.Management.Smo.Server')
-$s.Settings.LoginMode = [Microsoft.SqlServer.Management.SMO.ServerLoginMode]::Mixed
-$s.Alter()
-
-Write-Host "Configuring SQL Server to listen on TCP (default port 1433)..."
-
-# Enable the TCP protocol on the default instance.
-$wmi = New-Object ('Microsoft.SqlServer.Management.Smo.Wmi.ManagedComputer')
-$uri = "ManagedComputer[@Name='" + (Get-Content env:computername) + "']/ ServerInstance[@Name='MSSQLSERVER']/ServerProtocol[@Name='Tcp']"
-$Tcp = $wmi.GetSmoObject($uri)
-$Tcp.IsEnabled = $true
-$Tcp.Alter()
-
-Write-Host "Restarting SQL Server..."
-
-Restart-Service -Name MSSQLSERVER
-
-If (-Not (Get-NetFirewallPortFilter | ? LocalPort -Eq "1433")) {
-  Write-Host "Opening port 1433 in firewall..."
-  New-NetFirewallRule -DisplayName "SQL 1433" -Action Allow -Direction Inbound -LocalPort 1433 -Protocol TCP | Out-Null
-} else {
-  Write-Host "Port 1433 is already configured in firewall."
-}
-
-Write-Host "Creating SQL Server login and populated database..."
-
-# Connect with Windows Authentication, assuming that we have access.
-Invoke-Sqlcmd -InputFile "$basedir\config\create-sqlserver-login-and-database.sql" -OutputSqlErrors $True -QueryTimeout 180
-
-# Now that benchmarkdbuser has been created, we can connect with those credentials.
-Invoke-Sqlcmd -Username benchmarkdbuser -Password B3nchmarkDBPass -Database hello_world -InputFile "$basedir\config\create-sqlserver.sql" -OutputSqlErrors $True -QueryTimeout 180
-
-Write-Host "Done."

+ 0 - 28
toolset/setup/windows/databases/sqlserver/create-sqlserver-login-and-database.sql

@@ -1,28 +0,0 @@
--- This SQL Server T-SQL script creates the database user and hello_world database.
---
--- To run this script, login to an administrator account in Windows, open a command prompt and run:
---
--- "%ProgramFiles%\Microsoft SQL Server\110\Tools\binn\sqlcmd.exe" -i <filename of this file>
---
-
-IF EXISTS (SELECT * FROM sys.server_principals WHERE name = 'benchmarkdbuser')
-    DROP LOGIN benchmarkdbuser
-GO
-
--- This password has mixed-case and a number to satisfy the Windows password policy
-CREATE LOGIN benchmarkdbuser WITH PASSWORD = 'B3nchmarkDBPass'
-GO
-
-IF EXISTS(SELECT * FROM SYS.DATABASES WHERE NAME='hello_world')
-    DROP DATABASE hello_world
-GO
-
-CREATE DATABASE hello_world
-GO
-USE hello_world
-GO
-
--- Give this user total power over the database
-CREATE USER benchmarkdbuser FOR LOGIN benchmarkdbuser
-EXEC sp_addrolemember 'db_owner', 'benchmarkdbuser'
-GO

+ 0 - 55
toolset/setup/windows/databases/sqlserver/create-sqlserver.sql

@@ -1,55 +0,0 @@
--- This SQL Server T-SQL script creates and populates the World and Fortune tables.
---
--- To run this script, make sure that you've already run create-sqlserver-login-and-database.sql
--- to create the database user and database, then open a command prompt and run:
---
--- "%ProgramFiles%\Microsoft SQL Server\110\Tools\binn\sqlcmd.exe" -U benchmarkdbuser -P B3nchmarkDBPass -d hello_world -i <filename of this file>
-
-IF OBJECT_ID('World', 'U') IS NOT NULL
-    DROP TABLE World
-GO
-
-CREATE TABLE World (
-  id int NOT NULL IDENTITY PRIMARY KEY,
-  randomNumber int NOT NULL default 0
-)
-GO
-
--- Populate World table
-DECLARE @RowCount INT
-DECLARE @Random INT
-SET @RowCount = 0
-
-WHILE @RowCount < 10000
-BEGIN
-	SELECT @Random = ((10000 + 1) - 1) * RAND() + 1
-	INSERT INTO World (randomNumber) VALUES (@Random)
-	SET @RowCount = @RowCount + 1
-END
-
-GO
-
-IF OBJECT_ID('Fortune', 'U') IS NOT NULL
-    DROP TABLE Fortune
-GO
-
--- Note that this uses nvarchar to make sure that the column is Unicode.
-CREATE TABLE Fortune (
-  id int NOT NULL IDENTITY PRIMARY KEY,
-  message nvarchar(2048) NOT NULL
-)
-GO
-
-INSERT INTO Fortune (message) VALUES (N'fortune: No such file or directory');
-INSERT INTO Fortune (message) VALUES (N'A computer scientist is someone who fixes things that aren''t broken.');
-INSERT INTO Fortune (message) VALUES (N'After enough decimal places, nobody gives a damn.');
-INSERT INTO Fortune (message) VALUES (N'A bad random number generator: 1, 1, 1, 1, 1, 4.33e+67, 1, 1, 1');
-INSERT INTO Fortune (message) VALUES (N'A computer program does what you tell it to do, not what you want it to do.');
-INSERT INTO Fortune (message) VALUES (N'Emacs is a nice operating system, but I prefer UNIX. — Tom Christaensen');
-INSERT INTO Fortune (message) VALUES (N'Any program that runs right is obsolete.');
-INSERT INTO Fortune (message) VALUES (N'A list is only as strong as its weakest link. — Donald Knuth');
-INSERT INTO Fortune (message) VALUES (N'Feature: A bug with seniority.');
-INSERT INTO Fortune (message) VALUES (N'Computers make very fast, very accurate mistakes.');
-INSERT INTO Fortune (message) VALUES (N'<script>alert("This should not be displayed in a browser alert box.");</script>');
-INSERT INTO Fortune (message) VALUES (N'フレームワークのベンチマーク');
-GO

+ 0 - 42
toolset/setup/windows/installer-bootstrap.ps1

@@ -1,42 +0,0 @@
-param($noexit)
-
-if (!$noexit) {
-    Write-Host "`nRestarting PowerShell with -NoExit...`n"
-    powershell -NoExit -File $MyInvocation.MyCommand.Path 1
-    return
-}
-
-$basedir = "C:\FrameworkBenchmarks"
-$workdir = $basedir + "\installs"
-
-$repo = "https://github.com/TechEmpower/FrameworkBenchmarks"
-$installer = $basedir + "\toolset\setup\windows\installer.ps1"
-
-$git = "C:\Git\bin\git.exe"
-$gitinstaller_file = "Git-1.8.1.2-preview20130201.exe"
-$gitinstaller_url = "https://msysgit.googlecode.com/files/" + $gitinstaller_file
-$gitinstaller_local = $workdir + "\" + $gitinstaller_file
-
-Write-Host "Creating work directory: $workdir `n"
-New-Item -Path $workdir -Type Directory -Force | Out-Null
-
-Write-Host "Downloading git...`n"
-(New-Object System.Net.WebClient).DownloadFile($gitinstaller_url, $gitinstaller_local)
-
-Write-Host "Installing git...`n"
-Start-Process $gitinstaller_local '/silent /dir="C:\Git"' -Wait
-$env:Path += ";C:\Git\bin"; [Environment]::SetEnvironmentVariable("Path", $env:Path, [System.EnvironmentVariableTarget]::Machine)
-
-Write-Host "Removing git installation files...`n"
-Remove-Item -Recurse -Force $basedir
-
-if (-not (Test-Path $basedir))
-{
-    Write-Host "Downloading FrameworkBenchmarks from git...`n"
-    &$git "clone" $repo $basedir | Out-Host
-}
-
-
-Write-Host "`nLaunching installer...`n"
-Set-ExecutionPolicy -ExecutionPolicy Bypass -ErrorAction 'SilentlyContinue'
-powershell -NoExit -File $installer

+ 0 - 352
toolset/setup/windows/installer.ps1

@@ -1,352 +0,0 @@
-#
-# Versions of software (will need to be updated from time to time)
-#
-
-$node_installer_file      = "node-v0.10.13-x64.msi"
-$node_installer_path      = "v0.10.13/x64/$node_installer_file"
-$python_installer_file    = "python-2.7.5.amd64.msi"
-$python_installer_path    = "2.7.5/$python_installer_file"
-$python_version           = "27"
-$wincache_installer_file  = "wincache-1.3.4-5.4-nts-vc9-x86.exe"
-$wincache_installer_path  = "wincache-1.3.4/$wincache_installer_file"
-$go_installer_file        = "go1.2.windows-amd64.msi"
-$jre_installer_file       = "jdk-7u65-windows-x64.exe"
-$jdk_installer_file       = "jdk-7u65-windows-x64.exe"
-$jdk_master_hash          = "f0270817998c7408b24a2dd9ac420346" 
-# http://www.oracle.com/technetwork/java/javase/downloads/java-se-binaries-checksum-1956892.html
-$resin_version            = "resin-4.0.41"
-$resin_installer_file     = "$resin_version.zip"
-$ant_version              = "apache-ant-1.9.2"
-$ant_installer_file       = "$ant_version-bin.zip"
-$maven_version            = "apache-maven-3.0.5"
-$maven_installer_file     = "$maven_version-bin.zip"
-$maven_installer_path     = "maven-3/3.0.5/binaries/$maven_installer_file"
-$scala_version            = "2.10.2"
-$play_version             = "2.2.0"
-$play_installer_file      = "play-$play_version.zip"
-$sbt_version              = "0.13.5"
-$mercurial_installer_file = "mercurial-2.6.1-x64.msi"
-$cygwin_installer_file    = "setup-x86_64.exe"
-
-
-$basedir = "C:\FrameworkBenchmarks"
-$workdir = "$basedir\installs"
-New-Item -Path $workdir -Type directory -Force | Out-Null
-
-function GetMd5FileHash($fileName) {
-    [Reflection.Assembly]::LoadWithPartialName("System.Security") | out-null
-    $md5 = [System.Security.Cryptography.MD5]::Create()
-
-    $file = [System.IO.File]::OpenRead($fileName)
-    $hash = $md5.ComputeHash($file)
-    $file.Dispose()
-
-    $sb = New-Object System.Text.StringBuilder
-    $hash | % { [Void]$sb.Append($_.ToString("x2")) }
-    $sb.ToString()
-}
-
-#
-# Chocolatey package manager
-#
-Write-Host "Installing Chocolatey package manager"
-Invoke-Expression ((new-object net.webclient).DownloadString('https://chocolatey.org/install.ps1'))
-
-#
-# ASP.NET
-#
-Write-Host "Installing IIS, .NET and ASP.NET..."
-
-# Enable Windows Update to get rid of the yellow warnings
-# But this is not strictly neccessary
-$Updates = (New-Object -ComObject "Microsoft.Update.AutoUpdate").Settings
-$Updates.NotificationLevel = 2 # Notify before download
-$Updates.Save()
-$Updates.Refresh()
-
-Install-WindowsFeature Web-Server
-Install-WindowsFeature Web-Mgmt-Console
-Install-WindowsFeature NET-Framework-45-ASPNET
-Install-WindowsFeature Web-Asp-Net45
-
-$env:Path += ";C:\Windows\system32\inetsrv"; [Environment]::SetEnvironmentVariable("Path", $env:Path, [System.EnvironmentVariableTarget]::Machine)
-# Optimize performance
-appcmd set config -section:httpProtocol /allowKeepAlive:true | Out-Null
-appcmd set config -section:httpLogging /dontLog:True | Out-Null
-# Enable detailed error pages
-#appcmd set config -section:system.webServer/httpErrors -errorMode:Detailed | Out-Null
-# Increase queue length for DefaultAppPool to avoid HTTP 503 errors coming from HTTP.SYS
-appcmd set apppool DefaultAppPool /queueLength:65535
-# Increase appConcurrentRequestLimit to avoid HTTP 503.2 errors from IIS http://support.microsoft.com/kb/943891
-appcmd set config -section:system.webServer/serverRuntime /appConcurrentRequestLimit:65535
-
-# URL Rewrite
-$rewrite_url = "http://download.microsoft.com/download/6/7/D/67D80164-7DD0-48AF-86E3-DE7A182D6815/rewrite_2.0_rtw_x64.msi"
-$rewrite_local = "$workdir\rewrite_2.0_rtw_x64.msi"
-(New-Object System.Net.WebClient).DownloadFile($rewrite_url, $rewrite_local)
-Start-Process "msiexec" "/i $rewrite_local /passive" -Wait
-
-#
-# Tools for building .NET projects on the server
-#
-Write-Host "`nInstalling .NET build tools...`n"
-
-# .NET Framework 4.5 SDK
-$sdktools_url = "http://download.microsoft.com/download/F/1/3/F1300C9C-A120-4341-90DF-8A52509B23AC/standalonesdk/sdksetup.exe"
-$sdktools_local = "$workdir\sdksetup.exe"
-(New-Object System.Net.WebClient).DownloadFile($sdktools_url, $sdktools_local)
-Start-Process "$workdir\sdksetup.exe" "/features OptionId.NetFxSoftwareDevelopmentKit /q /layout $workdir\sdksetup" -Wait
-Start-Process "msiexec" "/i $workdir\sdksetup\Redistributable\4.5.50710\sdk_tools4.msi VSEXTUI=1" -Wait
-
-# Web Deploy 3.0
-$webdeploy_url = "http://download.microsoft.com/download/1/B/3/1B3F8377-CFE1-4B40-8402-AE1FC6A0A8C3/WebDeploy_amd64_en-US.msi"
-$webdeploy_local = "$workdir\WebDeploy_amd64_en-US.msi"
-(New-Object System.Net.WebClient).DownloadFile($webdeploy_url, $webdeploy_local)
-Start-Process "msiexec" "/i $webdeploy_local /passive" -Wait
-
-#
-# node.js
-#
-Write-Host "Installing node.js...`n"
-$node_installer_url = "http://nodejs.org/dist/$node_installer_path"
-$node_installer_local = "$workdir\$node_installer_file"
-(New-Object System.Net.WebClient).DownloadFile($node_installer_url, $node_installer_local)
-
-Start-Process $node_installer_local '/passive' -Wait
-$env:Path += ";C:\Program Files\nodejs"; [Environment]::SetEnvironmentVariable("Path", $env:Path, [System.EnvironmentVariableTarget]::Machine)
-
-#
-# Python
-#
-Write-Host "Installing Python...`n"
-$python_installer_url = "http://www.python.org/ftp/python/$python_installer_path"
-$python_installer_local = "$workdir\$python_installer_file"
-(New-Object System.Net.WebClient).DownloadFile($python_installer_url, $python_installer_local)
-
-Start-Process $python_installer_local '/passive' -Wait
-$env:Path += ";C:\Python$python_version"; [Environment]::SetEnvironmentVariable("Path", $env:Path, [System.EnvironmentVariableTarget]::Machine)
-
-#
-# PHP
-#
-Write-Host "Installing PHP..."
-
-# Locate current PHP 5.4 release
-Write-Host "Looking for current PHP 5.4 release"
-$php_download_page_url = 'http://windows.php.net/download/'
-$php_download_page_file = [IO.Path]::GetTempFileName()
-Write-Host "Downloading from $php_download_page_url into $php_download_page_file"
-Try {
-    (New-Object System.Net.WebClient).DownloadFile($php_download_page_url, $php_download_page_file)
-} Catch {
-    Write-Host "ERROR: Could not download from $php_download_page_url."
-    Write-Host $_.Exception.Message
-    Exit 1
-}
-$file = (cat $php_download_page_file) -join ""
-if ($file -match '(?s)h4 id="php-5.4-nts-VC9-x86".*?href="/downloads/releases/(.*?)">Zip</a>') {
-    $php_installer_file = $matches[1]
-    $php_installer_url = "http://windows.php.net/downloads/releases/$php_installer_file"
-    Write-Host "Current PHP 5.4 release found at $php_installer_url"
-}
-else {
-    Write-Host "ERROR: Current PHP release was not found. Aborting."
-    Exit 1
-}
-
-# Download PHP
-$php_installer_local = "$workdir\$php_installer_file"
-Try {
-    (New-Object System.Net.WebClient).DownloadFile($php_installer_url, $php_installer_local)
-} Catch {
-    Write-Host "ERROR: Could not download from $php_installer_url. "
-    Write-Host $_.Exception.Message
-    Exit 1
-}
-
-# Install PHP
-$php = "C:\PHP"
-[System.Reflection.Assembly]::LoadWithPartialName("System.IO.Compression.FileSystem") | Out-Null
-[System.IO.Compression.ZipFile]::ExtractToDirectory($php_installer_local, $php) | Out-Null
-$env:Path += ";" + $php; [Environment]::SetEnvironmentVariable("Path", $env:Path, [System.EnvironmentVariableTarget]::Machine)
-
-# php.ini
-$phpini = "$php\php.ini"
-Copy-Item "$php\php.ini-production" $phpini
-(Get-Content $phpini) -Replace ";date.timezone =", "date.timezone = UTC" | Set-Content $phpini
-(Get-Content $phpini) -Replace "short_open_tag = Off", "short_open_tag = On" | Set-Content $phpini
-(Get-Content $phpini) -Replace "display_errors = Off", "display_errors = Off" | Set-Content $phpini
-(Get-Content $phpini) -Replace "log_errors = On", "log_errors = Off" | Set-Content $phpini
-(Get-Content $phpini) -Replace "output_buffering = 4096", "output_buffering = Off" | Set-Content $phpini
-(Get-Content $phpini) -Replace ";cgi.force_redirect = 1", "cgi.force_redirect = 0" | Set-Content $phpini
-(Get-Content $phpini) -Replace ";fastcgi.impersonate = 1", "fastcgi.impersonate = 0" | Set-Content $phpini
-(Get-Content $phpini) -Replace ";fastcgi.logging = 0", "fastcgi.logging = 0" | Set-Content $phpini
-(Get-Content $phpini) -Replace '; extension_dir = "./"', "extension_dir = `"$php\ext`"" | Set-Content $phpini
-(Get-Content $phpini) -Replace ";extension=", "extension=" | Set-Content $phpini
-(Get-Content $phpini) -Replace "extension=php_(interbase|oci8|oci8_11g|firebird|oci|pspell|sybase_ct|zip|pdo_firebird|pdo_oci|snmp).dll.*", "" | Set-Content $phpini
-
-# IIS with PHP via FastCGI
-Install-WindowsFeature Web-CGI | Out-Null
-appcmd set config -section:system.webServer/fastCgi /+"[fullPath='C:\PHP\php-cgi.exe', arguments='', maxInstances='0', instanceMaxRequests='10000', queueLength='1000', rapidFailsPerMinute='10', idleTimeout='300', activityTimeout='30', requestTimeout='90', protocol='NamedPipe', flushNamedPipe='False']" /commit:apphost | Out-Null
-appcmd set config -section:system.webServer/fastCgi /+"[fullPath='C:\PHP\php-cgi.exe'].environmentVariables.[name='PHPRC', value='C:\PHP\php.ini']" /commit:apphost | Out-Null
-appcmd set config -section:system.webServer/handlers /+"[name='PHP FastCGI', path='*.php', modules='FastCgiModule', verb='*', scriptProcessor='C:\PHP\php-cgi.exe', resourceType='File', requireAccess='Script']" /commit:apphost | Out-Null
-
-# phpinfo() test file
-Set-Content "c:\inetpub\wwwroot\phpinfo.php" "<?php phpinfo(); ?>"
-
-# wincache
-$wincache_url = "http://heanet.dl.sourceforge.net/project/wincache/$wincache_installer_path"
-$wincache_local = "$workdir\$wincache_installer_file"
-(New-Object System.Net.WebClient).DownloadFile($wincache_url, $wincache_local)
-Start-Process $wincache_local "/q /T:$php\ext" -Wait
-Move-Item "$php\ext\wincache*" "c:\inetpub\wwwroot"
-Set-ItemProperty "c:\inetpub\wwwroot\wincache.php" -name IsReadOnly -value $false
-(Get-Content "c:\inetpub\wwwroot\wincache.php") -Replace "'USE_AUTHENTICATION', 1", "'USE_AUTHENTICATION', 0" | Set-Content "c:\inetpub\wwwroot\wincache.php"
-Add-Content $phpini "`n`n[PHP]`n"
-Add-Content $phpini "extension=php_wincache.dll"
-
-# composer
-$composer_url = "https://getcomposer.org/Composer-Setup.exe"
-$composer_local = "$workdir\Composer-Setup.exe"
-(New-Object System.Net.WebClient).DownloadFile($composer_url, $composer_local)
-Start-Process $composer_local "/silent" -Wait
-$env:Path += ";C:\ProgramData\Composer\bin"; [Environment]::SetEnvironmentVariable("Path", $env:Path, [System.EnvironmentVariableTarget]::Machine)
-
-Write-Host ""
-
-#
-# Go
-#
-Write-Host "Installing Go...`n"
-$go_url = "http://go.googlecode.com/files/$go_installer_file"
-$go_local = "$workdir\$go_installer_file"
-(New-Object System.Net.WebClient).DownloadFile($go_url, $go_local)
-Start-Process $go_local "/passive" -Wait
-$env:Path += ";C:\Go\bin"; [Environment]::SetEnvironmentVariable("Path", $env:Path, [System.EnvironmentVariableTarget]::Machine)
-
-#
-# Java
-#
-Write-Host "Installing Java...`n"
-
-# jre
-#Write-Host "Installing JRE...`n"
-#$jre_url = "http://img.cs.montana.edu/windows/$jre_installer_file"
-#$jre_local = "$workdir\$jre_installer_file"
-#$jre_dir = "C:\Java\jre"
-#(New-Object System.Net.WebClient).DownloadFile($jre_url, $jre_local)
-#Start-Process $jre_local "/s INSTALLDIR=$jre_dir" -Wait
-#$env:Path += ";$jre_dir\bin"; [Environment]::SetEnvironmentVariable("Path", $env:Path, [System.EnvironmentVariableTarget]::Machine)
-#$env:JAVA_HOME = $jre_dir; [Environment]::SetEnvironmentVariable("JAVA_HOME", $jre_dir, [System.EnvironmentVariableTarget]::Machine)
-
-# jdk
-Write-Host "Installing JDK...`n"
-$jdk_url = "http://ghaffarian.net/downloads/Java/JDK/$jdk_installer_file"
-$jdk_local = "$workdir\$jdk_installer_file"
-$jdk_dir = "C:\Java\jdk"
-(New-Object System.Net.WebClient).DownloadFile($jdk_url, $jdk_local)
-
-$jdk_local_hash = GetMd5FileHash($jdk_local)
-if ($jdk_master_hash -ne $jdk_local_hash)
-{
-    Write-Host $jdk_master_hash
-    Write-Host $jdk_local_hash
-    Write-Host "JDK file checksum mismatch. Aborting!"
-    Exit 1
-}
-
-Start-Process $jdk_local "/s INSTALLDIR=$jdk_dir" -Wait
-$env:Path += ";$jdk_dir\bin"; [Environment]::SetEnvironmentVariable("Path", $env:Path, [System.EnvironmentVariableTarget]::Machine)
-$env:JAVA_HOME = $jdk_dir; [Environment]::SetEnvironmentVariable("JAVA_HOME", $jdk_dir, [System.EnvironmentVariableTarget]::Machine)
-
-# resin
-Write-Host "Installing Resin...`n"
-$resin_url = "http://www.caucho.com/download/$resin_installer_file"
-$resin_local = "$workdir\$resin_installer_file"
-$resin_dir = "C:\Java\resin"
-(New-Object System.Net.WebClient).DownloadFile($resin_url, $resin_local)
-[System.Reflection.Assembly]::LoadWithPartialName("System.IO.Compression.FileSystem") | Out-Null
-[System.IO.Compression.ZipFile]::ExtractToDirectory($resin_local, $workdir) | Out-Null
-Move-Item "$workdir\$resin_version" $resin_dir
-Copy-Item "$basedir\config\resin.properties" "$resin_dir\conf\resin.properties"
-[Environment]::SetEnvironmentVariable("RESIN_HOME", $resin_dir, [System.EnvironmentVariableTarget]::Machine)
-#$env:Path += ";$resin_dir\bin"; [Environment]::SetEnvironmentVariable("Path", $env:Path, [System.EnvironmentVariableTarget]::Machine)
-
-# ant
-#Write-Host "Installing Ant...`n"
-#$ant_url = "http://apache.mirrors.hoobly.com//ant/binaries/$ant_installer_file"
-#$ant_local = "$workdir\$ant_installer_file"
-#$ant_dir = "C:\Java\ant"
-#(New-Object System.Net.WebClient).DownloadFile($ant_url, $ant_local)
-#[System.Reflection.Assembly]::LoadWithPartialName("System.IO.Compression.FileSystem") | Out-Null
-#[System.IO.Compression.ZipFile]::ExtractToDirectory($ant_local, $workdir) | Out-Null
-#Move-Item "$workdir\$ant_version" $ant_dir
-#$env:Path += ";$ant_dir\bin"; [Environment]::SetEnvironmentVariable("Path", $env:Path, [System.EnvironmentVariableTarget]::Machine)
-
-# maven
-Write-Host "Installing Maven...`n"
-$maven_url = "http://mirror.cc.columbia.edu/pub/software/apache/maven/$maven_installer_path"
-$maven_local = "$workdir\$maven_installer_file"
-$maven_dir = "C:\Java\maven"
-(New-Object System.Net.WebClient).DownloadFile($maven_url, $maven_local)
-[System.Reflection.Assembly]::LoadWithPartialName("System.IO.Compression.FileSystem") | Out-Null
-[System.IO.Compression.ZipFile]::ExtractToDirectory($maven_local, $workdir) | Out-Null
-Move-Item "$workdir\$maven_version" $maven_dir
-$env:Path += ";$maven_dir\bin"; [Environment]::SetEnvironmentVariable("Path", $env:Path, [System.EnvironmentVariableTarget]::Machine)
-
-# scala
-cinst scala -version $scala_version
-
-# play
-$play_url = "http://downloads.typesafe.com/play/$play_version/$play_installer_file"
-$play_local = "$workdir\$play_installer_file"
-$play_dir = "C:\Java\play"
-(New-Object System.Net.WebClient).DownloadFile($play_url, $play_local)
-[System.Reflection.Assembly]::LoadWithPartialName("System.IO.Compression.FileSystem") | Out-Null
-[System.IO.Compression.ZipFile]::ExtractToDirectory($play_local, $workdir) | Out-Null
-Move-Item "$workdir\play-$play_version" $play_dir
-$env:Path += ";$play_dir"; [Environment]::SetEnvironmentVariable("Path", $env:Path, [System.EnvironmentVariableTarget]::Machine)
-
-# sbt
-$sbt_installer_file = "sbt-$sbt_version.zip"
-$sbt_url = "http://dl.bintray.com/sbt/native-packages/sbt/$sbt_version/$sbt_installer_file"
-$sbt_local = "$workdir\$sbt_installer_file"
-$sbt_dir = "C:\Java\sbt"
-(New-Object System.Net.WebClient).DownloadFile($sbt_url, $sbt_local)
-[System.Reflection.Assembly]::LoadWithPartialName("System.IO.Compression.FileSystem") | Out-Null
-[System.IO.Compression.ZipFile]::ExtractToDirectory($sbt_local, $workdir) | Out-Null
-Move-Item "$workdir\sbt" $sbt_dir
-$env:Path += ";$sbt_dir\bin"; [Environment]::SetEnvironmentVariable("Path", $env:Path, [System.EnvironmentVariableTarget]::Machine)
-
-#
-# Firewall
-#
-Write-Host "Configuring firewall...`n"
-New-NetFirewallRule -DisplayName "HTTP 8080" -Action Allow -Direction Inbound -LocalPort 8080 -Protocol TCP | Out-Null
-
-#
-# Mercurial
-#
-Write-Host "Installing Mercurial...`n"
-$hg_installer_url = "https://bitbucket.org/tortoisehg/files/downloads/$mercurial_installer_file"
-$hg_installer_local = "$workdir\$mercurial_installer_file"
-(New-Object System.Net.WebClient).DownloadFile($hg_installer_url, $hg_installer_local)
-
-Start-Process $hg_installer_local '/passive' -Wait
-$env:Path += ";C:\Program Files\Mercurial"; [Environment]::SetEnvironmentVariable("Path", $env:Path, [System.EnvironmentVariableTarget]::Machine)
-
-#
-# Cygwin (including sftp)
-#
-Write-Host "Installing Cygwin...`n"
-$cygwin_installer_url = "http://cygwin.com/$cygwin_installer_file"
-$cygwin_installer_dir = $workdir + "\cygwin-installer"
-New-Item -Path $cygwin_installer_dir -Type directory -Force | Out-Null
-$cygwin_installer_local = "$cygwin_installer_dir\$cygwin_installer_file"
-(New-Object System.Net.WebClient).DownloadFile($cygwin_installer_url, $cygwin_installer_local)
-
-$cygwin_install_dir = "C:\Cygwin"
-Start-Process $cygwin_installer_local "-q -n -l $cygwin_installer_dir -s http://mirrors.kernel.org/sourceware/cygwin/ -R $cygwin_install_dir -P openssh" -WorkingDirectory "$cygwin_installer_dir" -Wait -RedirectStandardOutput $cygwin_installer_dir\install.log
-$env:Path += ";$cygwin_install_dir;$cygwin_install_dir\bin"; [Environment]::SetEnvironmentVariable("Path", $env:Path, [System.EnvironmentVariableTarget]::Machine)
-
-cd $basedir

+ 0 - 340
toolset/setup/windows/installer.ps1~

@@ -1,340 +0,0 @@
-#
-# Versions of software (will need to be updated from time to time)
-#
-
-$node_installer_file      = "node-v0.10.13-x64.msi"
-$node_installer_path      = "v0.10.13/x64/$node_installer_file"
-$python_installer_file    = "python-2.7.5.amd64.msi"
-$python_installer_path    = "2.7.5/$python_installer_file"
-$python_version           = "27"
-$wincache_installer_file  = "wincache-1.3.4-5.4-nts-vc9-x86.exe"
-$wincache_installer_path  = "wincache-1.3.4/$wincache_installer_file"
-$go_installer_file        = "go1.2.windows-amd64.msi"
-$jre_installer_file       = "jre-7u25-windows-x64.exe"
-$jdk_installer_file       = "jdk-7u45-windows-x64.exe"
-$jdk_master_hash          = "943527ed9111cbb746d4ab2bb2c31cd6" 
-# https://www.oracle.com/technetwork/java/javase/downloads/java-se-binaries-checksum-1956892.html
-$resin_version            = "resin-4.0.36"
-$resin_installer_file     = "$resin_version.zip"
-$ant_version              = "apache-ant-1.9.2"
-$ant_installer_file       = "$ant_version-bin.zip"
-$maven_version            = "apache-maven-3.0.5"
-$maven_installer_file     = "$maven_version-bin.zip"
-$maven_installer_path     = "maven-3/3.0.5/binaries/$maven_installer_file"
-$scala_version            = "2.10.2"
-$play_version             = "2.2.0"
-$play_installer_file      = "play-$play_version.zip"
-$mercurial_installer_file = "mercurial-2.6.1-x64.msi"
-$cygwin_installer_file    = "setup-x86_64.exe"
-
-
-$basedir = "C:\FrameworkBenchmarks"
-$workdir = "$basedir\installs"
-New-Item -Path $workdir -Type directory -Force | Out-Null
-
-function GetMd5FileHash($fileName) {
-    [Reflection.Assembly]::LoadWithPartialName("System.Security") | out-null
-    $md5 = [System.Security.Cryptography.MD5]::Create()
-
-    $file = [System.IO.File]::OpenRead($fileName)
-    $hash = $md5.ComputeHash($file)
-    $file.Dispose()
-
-    $sb = New-Object System.Text.StringBuilder
-    $hash | % { [Void]$sb.Append($_.ToString("x2")) }
-    $sb.ToString()
-}
-
-#
-# Chocolatey package manager
-#
-Write-Host "Installing Chocolatey package manager"
-Invoke-Expression ((new-object net.webclient).DownloadString('https://chocolatey.org/install.ps1'))
-
-#
-# ASP.NET
-#
-Write-Host "Installing IIS, .NET and ASP.NET..."
-
-# Enable Windows Update to get rid of the yellow warnings
-# But this is not strictly neccessary
-$Updates = (New-Object -ComObject "Microsoft.Update.AutoUpdate").Settings
-$Updates.NotificationLevel = 2 # Notify before download
-$Updates.Save()
-$Updates.Refresh()
-
-Install-WindowsFeature Web-Server
-Install-WindowsFeature Web-Mgmt-Console
-Install-WindowsFeature NET-Framework-45-ASPNET
-Install-WindowsFeature Web-Asp-Net45
-
-$env:Path += ";C:\Windows\system32\inetsrv"; [Environment]::SetEnvironmentVariable("Path", $env:Path, [System.EnvironmentVariableTarget]::Machine)
-# Optimize performance
-appcmd set config -section:httpProtocol /allowKeepAlive:true | Out-Null
-appcmd set config -section:httpLogging /dontLog:True | Out-Null
-# Enable detailed error pages
-#appcmd set config -section:system.webServer/httpErrors -errorMode:Detailed | Out-Null
-# Increase queue length for DefaultAppPool to avoid HTTP 503 errors coming from HTTP.SYS
-appcmd set apppool DefaultAppPool /queueLength:65535
-# Increase appConcurrentRequestLimit to avoid HTTP 503.2 errors from IIS http://support.microsoft.com/kb/943891
-appcmd set config -section:system.webServer/serverRuntime /appConcurrentRequestLimit:65535
-
-# URL Rewrite
-$rewrite_url = "http://download.microsoft.com/download/6/7/D/67D80164-7DD0-48AF-86E3-DE7A182D6815/rewrite_2.0_rtw_x64.msi"
-$rewrite_local = "$workdir\rewrite_2.0_rtw_x64.msi"
-(New-Object System.Net.WebClient).DownloadFile($rewrite_url, $rewrite_local)
-Start-Process "msiexec" "/i $rewrite_local /passive" -Wait
-
-#
-# Tools for building .NET projects on the server
-#
-Write-Host "`nInstalling .NET build tools...`n"
-
-# .NET Framework 4.5 SDK
-$sdktools_url = "http://download.microsoft.com/download/F/1/3/F1300C9C-A120-4341-90DF-8A52509B23AC/standalonesdk/sdksetup.exe"
-$sdktools_local = "$workdir\sdksetup.exe"
-(New-Object System.Net.WebClient).DownloadFile($sdktools_url, $sdktools_local)
-Start-Process "$workdir\sdksetup.exe" "/features OptionId.NetFxSoftwareDevelopmentKit /q /layout $workdir\sdksetup" -Wait
-Start-Process "msiexec" "/i $workdir\sdksetup\Redistributable\4.5.50710\sdk_tools4.msi VSEXTUI=1" -Wait
-
-# Web Deploy 3.0
-$webdeploy_url = "http://download.microsoft.com/download/1/B/3/1B3F8377-CFE1-4B40-8402-AE1FC6A0A8C3/WebDeploy_amd64_en-US.msi"
-$webdeploy_local = "$workdir\WebDeploy_amd64_en-US.msi"
-(New-Object System.Net.WebClient).DownloadFile($webdeploy_url, $webdeploy_local)
-Start-Process "msiexec" "/i $webdeploy_local /passive" -Wait
-
-#
-# node.js
-#
-Write-Host "Installing node.js...`n"
-$node_installer_url = "http://nodejs.org/dist/$node_installer_path"
-$node_installer_local = "$workdir\$node_installer_file"
-(New-Object System.Net.WebClient).DownloadFile($node_installer_url, $node_installer_local)
-
-Start-Process $node_installer_local '/passive' -Wait
-$env:Path += ";C:\Program Files\nodejs"; [Environment]::SetEnvironmentVariable("Path", $env:Path, [System.EnvironmentVariableTarget]::Machine)
-
-#
-# Python
-#
-Write-Host "Installing Python...`n"
-$python_installer_url = "http://www.python.org/ftp/python/$python_installer_path"
-$python_installer_local = "$workdir\$python_installer_file"
-(New-Object System.Net.WebClient).DownloadFile($python_installer_url, $python_installer_local)
-
-Start-Process $python_installer_local '/passive' -Wait
-$env:Path += ";C:\Python$python_version"; [Environment]::SetEnvironmentVariable("Path", $env:Path, [System.EnvironmentVariableTarget]::Machine)
-
-#
-# PHP
-#
-Write-Host "Installing PHP..."
-
-# Locate current PHP 5.4 release
-Write-Host "Looking for current PHP 5.4 release"
-$php_download_page_url = 'http://windows.php.net/download/'
-$php_download_page_file = [IO.Path]::GetTempFileName()
-Write-Host "Downloading from $php_download_page_url into $php_download_page_file"
-Try {
-    (New-Object System.Net.WebClient).DownloadFile($php_download_page_url, $php_download_page_file)
-} Catch {
-    Write-Host "ERROR: Could not download from $php_download_page_url."
-    Write-Host $_.Exception.Message
-    Exit 1
-}
-$file = (cat $php_download_page_file) -join ""
-if ($file -match '(?s)h4 id="php-5.4-nts-VC9-x86".*?href="/downloads/releases/(.*?)">Zip</a>') {
-    $php_installer_file = $matches[1]
-    $php_installer_url = "http://windows.php.net/downloads/releases/$php_installer_file"
-    Write-Host "Current PHP 5.4 release found at $php_installer_url"
-}
-else {
-    Write-Host "ERROR: Current PHP release was not found. Aborting."
-    Exit 1
-}
-
-# Download PHP
-$php_installer_local = "$workdir\$php_installer_file"
-Try {
-    (New-Object System.Net.WebClient).DownloadFile($php_installer_url, $php_installer_local)
-} Catch {
-    Write-Host "ERROR: Could not download from $php_installer_url. "
-    Write-Host $_.Exception.Message
-    Exit 1
-}
-
-# Install PHP
-$php = "C:\PHP"
-[System.Reflection.Assembly]::LoadWithPartialName("System.IO.Compression.FileSystem") | Out-Null
-[System.IO.Compression.ZipFile]::ExtractToDirectory($php_installer_local, $php) | Out-Null
-$env:Path += ";" + $php; [Environment]::SetEnvironmentVariable("Path", $env:Path, [System.EnvironmentVariableTarget]::Machine)
-
-# php.ini
-$phpini = "$php\php.ini"
-Copy-Item "$php\php.ini-production" $phpini
-(Get-Content $phpini) -Replace ";date.timezone =", "date.timezone = UTC" | Set-Content $phpini
-(Get-Content $phpini) -Replace "short_open_tag = Off", "short_open_tag = On" | Set-Content $phpini
-(Get-Content $phpini) -Replace "display_errors = Off", "display_errors = Off" | Set-Content $phpini
-(Get-Content $phpini) -Replace "log_errors = On", "log_errors = Off" | Set-Content $phpini
-(Get-Content $phpini) -Replace "output_buffering = 4096", "output_buffering = Off" | Set-Content $phpini
-(Get-Content $phpini) -Replace ";cgi.force_redirect = 1", "cgi.force_redirect = 0" | Set-Content $phpini
-(Get-Content $phpini) -Replace ";fastcgi.impersonate = 1", "fastcgi.impersonate = 0" | Set-Content $phpini
-(Get-Content $phpini) -Replace ";fastcgi.logging = 0", "fastcgi.logging = 0" | Set-Content $phpini
-(Get-Content $phpini) -Replace '; extension_dir = "./"', "extension_dir = `"$php\ext`"" | Set-Content $phpini
-(Get-Content $phpini) -Replace ";extension=", "extension=" | Set-Content $phpini
-(Get-Content $phpini) -Replace "extension=php_(interbase|oci8|oci8_11g|firebird|oci|pspell|sybase_ct|zip|pdo_firebird|pdo_oci|snmp).dll.*", "" | Set-Content $phpini
-
-# IIS with PHP via FastCGI
-Install-WindowsFeature Web-CGI | Out-Null
-appcmd set config -section:system.webServer/fastCgi /+"[fullPath='C:\PHP\php-cgi.exe', arguments='', maxInstances='0', instanceMaxRequests='10000', queueLength='1000', rapidFailsPerMinute='10', idleTimeout='300', activityTimeout='30', requestTimeout='90', protocol='NamedPipe', flushNamedPipe='False']" /commit:apphost | Out-Null
-appcmd set config -section:system.webServer/fastCgi /+"[fullPath='C:\PHP\php-cgi.exe'].environmentVariables.[name='PHPRC', value='C:\PHP\php.ini']" /commit:apphost | Out-Null
-appcmd set config -section:system.webServer/handlers /+"[name='PHP FastCGI', path='*.php', modules='FastCgiModule', verb='*', scriptProcessor='C:\PHP\php-cgi.exe', resourceType='File', requireAccess='Script']" /commit:apphost | Out-Null
-
-# phpinfo() test file
-Set-Content "c:\inetpub\wwwroot\phpinfo.php" "<?php phpinfo(); ?>"
-
-# wincache
-$wincache_url = "http://heanet.dl.sourceforge.net/project/wincache/$wincache_installer_path"
-$wincache_local = "$workdir\$wincache_installer_file"
-(New-Object System.Net.WebClient).DownloadFile($wincache_url, $wincache_local)
-Start-Process $wincache_local "/q /T:$php\ext" -Wait
-Move-Item "$php\ext\wincache*" "c:\inetpub\wwwroot"
-Set-ItemProperty "c:\inetpub\wwwroot\wincache.php" -name IsReadOnly -value $false
-(Get-Content "c:\inetpub\wwwroot\wincache.php") -Replace "'USE_AUTHENTICATION', 1", "'USE_AUTHENTICATION', 0" | Set-Content "c:\inetpub\wwwroot\wincache.php"
-Add-Content $phpini "`n`n[PHP]`n"
-Add-Content $phpini "extension=php_wincache.dll"
-
-# composer
-$composer_url = "https://getcomposer.org/Composer-Setup.exe"
-$composer_local = "$workdir\Composer-Setup.exe"
-(New-Object System.Net.WebClient).DownloadFile($composer_url, $composer_local)
-Start-Process $composer_local "/silent" -Wait
-$env:Path += ";C:\ProgramData\Composer\bin"; [Environment]::SetEnvironmentVariable("Path", $env:Path, [System.EnvironmentVariableTarget]::Machine)
-
-Write-Host ""
-
-#
-# Go
-#
-Write-Host "Installing Go...`n"
-$go_url = "http://go.googlecode.com/files/$go_installer_file"
-$go_local = "$workdir\$go_installer_file"
-(New-Object System.Net.WebClient).DownloadFile($go_url, $go_local)
-Start-Process $go_local "/passive" -Wait
-$env:Path += ";C:\Go\bin"; [Environment]::SetEnvironmentVariable("Path", $env:Path, [System.EnvironmentVariableTarget]::Machine)
-
-#
-# Java
-#
-Write-Host "Installing Java...`n"
-
-# jre
-#Write-Host "Installing JRE...`n"
-#$jre_url = "http://img.cs.montana.edu/windows/$jre_installer_file"
-#$jre_local = "$workdir\$jre_installer_file"
-#$jre_dir = "C:\Java\jre"
-#(New-Object System.Net.WebClient).DownloadFile($jre_url, $jre_local)
-#Start-Process $jre_local "/s INSTALLDIR=$jre_dir" -Wait
-#$env:Path += ";$jre_dir\bin"; [Environment]::SetEnvironmentVariable("Path", $env:Path, [System.EnvironmentVariableTarget]::Machine)
-#$env:JAVA_HOME = $jre_dir; [Environment]::SetEnvironmentVariable("JAVA_HOME", $jre_dir, [System.EnvironmentVariableTarget]::Machine)
-
-# jdk
-Write-Host "Installing JDK...`n"
-$jdk_url = "http://ghaffarian.net/downloads/Java/JDK/$jdk_installer_file"
-$jdk_local = "$workdir\$jdk_installer_file"
-$jdk_dir = "C:\Java\jdk"
-(New-Object System.Net.WebClient).DownloadFile($jdk_url, $jdk_local)
-
-$jdk_local_hash = GetMd5FileHash($jdk_local)
-if ($jdk_master_hash -ne $jdk_local_hash)
-{
-    Write-Host $jdk_master_hash
-    Write-Host $jdk_local_hash
-    Write-Host "JDK file checksum mismatch. Aborting!"
-    Exit 1
-}
-
-Start-Process $jdk_local "/s INSTALLDIR=$jdk_dir" -Wait
-$env:Path += ";$jdk_dir\bin"; [Environment]::SetEnvironmentVariable("Path", $env:Path, [System.EnvironmentVariableTarget]::Machine)
-$env:JAVA_HOME = $jdk_dir; [Environment]::SetEnvironmentVariable("JAVA_HOME", $jdk_dir, [System.EnvironmentVariableTarget]::Machine)
-
-# resin
-Write-Host "Installing Resin...`n"
-$resin_url = "http://www.caucho.com/download/$resin_installer_file"
-$resin_local = "$workdir\$resin_installer_file"
-$resin_dir = "C:\Java\resin"
-(New-Object System.Net.WebClient).DownloadFile($resin_url, $resin_local)
-[System.Reflection.Assembly]::LoadWithPartialName("System.IO.Compression.FileSystem") | Out-Null
-[System.IO.Compression.ZipFile]::ExtractToDirectory($resin_local, $workdir) | Out-Null
-Move-Item "$workdir\$resin_version" $resin_dir
-Copy-Item "$basedir\config\resin.properties" "$resin_dir\conf\resin.properties"
-[Environment]::SetEnvironmentVariable("RESIN_HOME", $resin_dir, [System.EnvironmentVariableTarget]::Machine)
-#$env:Path += ";$resin_dir\bin"; [Environment]::SetEnvironmentVariable("Path", $env:Path, [System.EnvironmentVariableTarget]::Machine)
-
-# ant
-#Write-Host "Installing Ant...`n"
-#$ant_url = "http://apache.mirrors.hoobly.com//ant/binaries/$ant_installer_file"
-#$ant_local = "$workdir\$ant_installer_file"
-#$ant_dir = "C:\Java\ant"
-#(New-Object System.Net.WebClient).DownloadFile($ant_url, $ant_local)
-#[System.Reflection.Assembly]::LoadWithPartialName("System.IO.Compression.FileSystem") | Out-Null
-#[System.IO.Compression.ZipFile]::ExtractToDirectory($ant_local, $workdir) | Out-Null
-#Move-Item "$workdir\$ant_version" $ant_dir
-#$env:Path += ";$ant_dir\bin"; [Environment]::SetEnvironmentVariable("Path", $env:Path, [System.EnvironmentVariableTarget]::Machine)
-
-# maven
-Write-Host "Installing Maven...`n"
-$maven_url = "http://mirror.cc.columbia.edu/pub/software/apache/maven/$maven_installer_path"
-$maven_local = "$workdir\$maven_installer_file"
-$maven_dir = "C:\Java\maven"
-(New-Object System.Net.WebClient).DownloadFile($maven_url, $maven_local)
-[System.Reflection.Assembly]::LoadWithPartialName("System.IO.Compression.FileSystem") | Out-Null
-[System.IO.Compression.ZipFile]::ExtractToDirectory($maven_local, $workdir) | Out-Null
-Move-Item "$workdir\$maven_version" $maven_dir
-$env:Path += ";$maven_dir\bin"; [Environment]::SetEnvironmentVariable("Path", $env:Path, [System.EnvironmentVariableTarget]::Machine)
-
-# scala
-cinst scala -version $scala_version
-
-# play
-$play_url = "http://downloads.typesafe.com/play/$play_version/$play_installer_file"
-$play_local = "$workdir\$play_installer_file"
-$play_dir = "C:\Java\play"
-(New-Object System.Net.WebClient).DownloadFile($play_url, $play_local)
-[System.Reflection.Assembly]::LoadWithPartialName("System.IO.Compression.FileSystem") | Out-Null
-[System.IO.Compression.ZipFile]::ExtractToDirectory($play_local, $workdir) | Out-Null
-Move-Item "$workdir\play-$play_version" $play_dir
-$env:Path += ";$play_dir"; [Environment]::SetEnvironmentVariable("Path", $env:Path, [System.EnvironmentVariableTarget]::Machine)
-
-#
-# Firewall
-#
-Write-Host "Configuring firewall...`n"
-New-NetFirewallRule -DisplayName "HTTP 8080" -Action Allow -Direction Inbound -LocalPort 8080 -Protocol TCP | Out-Null
-
-#
-# Mercurial
-#
-Write-Host "Installing Mercurial...`n"
-$hg_installer_url = "https://bitbucket.org/tortoisehg/files/downloads/$mercurial_installer_file"
-$hg_installer_local = "$workdir\$mercurial_installer_file"
-(New-Object System.Net.WebClient).DownloadFile($hg_installer_url, $hg_installer_local)
-
-Start-Process $hg_installer_local '/passive' -Wait
-$env:Path += ";C:\Program Files\Mercurial"; [Environment]::SetEnvironmentVariable("Path", $env:Path, [System.EnvironmentVariableTarget]::Machine)
-
-#
-# Cygwin (including sftp)
-#
-Write-Host "Installing Cygwin...`n"
-$cygwin_installer_url = "http://cygwin.com/$cygwin_installer_file"
-$cygwin_installer_dir = $workdir + "\cygwin-installer"
-New-Item -Path $cygwin_installer_dir -Type directory -Force | Out-Null
-$cygwin_installer_local = "$cygwin_installer_dir\$cygwin_installer_file"
-(New-Object System.Net.WebClient).DownloadFile($cygwin_installer_url, $cygwin_installer_local)
-
-$cygwin_install_dir = "C:\Cygwin"
-Start-Process $cygwin_installer_local "-q -n -l $cygwin_installer_dir -s http://mirrors.kernel.org/sourceware/cygwin/ -R $cygwin_install_dir -P openssh" -WorkingDirectory "$cygwin_installer_dir" -Wait -RedirectStandardOutput $cygwin_installer_dir\install.log
-$env:Path += ";$cygwin_install_dir;$cygwin_install_dir\bin"; [Environment]::SetEnvironmentVariable("Path", $env:Path, [System.EnvironmentVariableTarget]::Machine)
-
-cd $basedir

+ 35 - 13
toolset/travis/travis_diff.py

@@ -16,12 +16,16 @@ from sets import Set
 
 
 # Returns a unique list of fw_depends changes
 # Returns a unique list of fw_depends changes
 def get_fw_depends_changes(changes_output):
 def get_fw_depends_changes(changes_output):
-    return list(Set(re.findall(r"toolset/setup/linux/.+/(.+)\.sh", changes_output, re.M)))
+    return list(
+        Set(
+            re.findall(r"toolset/setup/linux/.+/(.+)\.sh", changes_output,
+                       re.M)))
 
 
 
 
 # Returns a unique list of frameworks that have been changed
 # Returns a unique list of frameworks that have been changed
 def fw_found_in_changes(changes_output):
 def fw_found_in_changes(changes_output):
-    return re.search(r"" + re.escape(os.environ['TESTDIR']), changes_output, re.M)
+    return re.search(r"" + re.escape(os.environ['TESTDIR']), changes_output,
+                     re.M)
 
 
 
 
 # Cleans up diffing and grep output and into an array of strings
 # Cleans up diffing and grep output and into an array of strings
@@ -34,16 +38,19 @@ def quit_diffing(should_test_run):
         print("travis-diff-continue")
         print("travis-diff-continue")
     exit(0)
     exit(0)
 
 
+
 # TODO: Remove this
 # TODO: Remove this
 quit_diffing(True)
 quit_diffing(True)
 
 
 # COMMIT MESSAGES:
 # COMMIT MESSAGES:
 # Before any complicated diffing, check for forced runs from the commit message
 # Before any complicated diffing, check for forced runs from the commit message
-last_commit_msg = subprocess.check_output(['bash', '-c', 'git log -1 --pretty=%B'])
+last_commit_msg = subprocess.check_output(
+    ['bash', '-c', 'git log -1 --pretty=%B'])
 
 
 # Forced *fw-only* specific tests
 # Forced *fw-only* specific tests
 if re.search(r'\[ci fw-only.+\]', last_commit_msg, re.M):
 if re.search(r'\[ci fw-only.+\]', last_commit_msg, re.M):
-    if re.search(r'\[ci fw-only(.?)+ ' + re.escape(os.environ['TESTDIR']) + '( .+\]|])', last_commit_msg, re.M):
+    if re.search(r'\[ci fw-only(.?)+ ' + re.escape(os.environ['TESTDIR']) +
+                 '( .+\]|])', last_commit_msg, re.M):
         print("This test has been forced to run from the commit message.")
         print("This test has been forced to run from the commit message.")
         quit_diffing(True)
         quit_diffing(True)
     else:
     else:
@@ -54,7 +61,9 @@ if re.search(r'\[ci run-all\]', last_commit_msg, re.M):
     print("All tests have been forced to run from the commit message.")
     print("All tests have been forced to run from the commit message.")
     quit_diffing(True)
     quit_diffing(True)
 # Forced framework run
 # Forced framework run
-if re.search(r'\[ci fw(.?)+ ' + re.escape(os.environ['TESTDIR']) + '( .+\]|\])', last_commit_msg, re.M):
+if re.search(
+        r'\[ci fw(.?)+ ' + re.escape(os.environ['TESTDIR']) + '( .+\]|\])',
+        last_commit_msg, re.M):
     print('This test has been forced to run from the commit message.')
     print('This test has been forced to run from the commit message.')
     quit_diffing(True)
     quit_diffing(True)
 
 
@@ -67,7 +76,8 @@ commit_range = ""
 if is_PR:
 if is_PR:
     print('I am testing a pull request')
     print('I am testing a pull request')
     first_commit = os.environ['TRAVIS_COMMIT_RANGE'].split('...')[0]
     first_commit = os.environ['TRAVIS_COMMIT_RANGE'].split('...')[0]
-    last_commit = subprocess.check_output("git rev-list -n 1 FETCH_HEAD^2", shell=True).rstrip('\n')
+    last_commit = subprocess.check_output(
+        "git rev-list -n 1 FETCH_HEAD^2", shell=True).rstrip('\n')
     print("Guessing that first commit in PR is : {!s}".format(first_commit))
     print("Guessing that first commit in PR is : {!s}".format(first_commit))
     print("Guessing that final commit in PR is : {!s}".format(last_commit))
     print("Guessing that final commit in PR is : {!s}".format(last_commit))
 
 
@@ -78,19 +88,27 @@ if is_PR:
         print("Only one commit in range, examining {!s}".format(last_commit))
         print("Only one commit in range, examining {!s}".format(last_commit))
         commit_range = "-m --first-parent -1 {!s}".format(last_commit)
         commit_range = "-m --first-parent -1 {!s}".format(last_commit)
     else:
     else:
-        commit_range = "--first-parent {!s}...{!s}".format(first_commit, last_commit)
+        commit_range = "--first-parent {!s}...{!s}".format(
+            first_commit, last_commit)
 
 
 if not is_PR:
 if not is_PR:
     print('I am not testing a pull request')
     print('I am not testing a pull request')
-    commit_range = "--first-parent -m {!s}".format(os.environ['TRAVIS_COMMIT_RANGE'])
+    commit_range = "--first-parent -m {!s}".format(
+        os.environ['TRAVIS_COMMIT_RANGE'])
 
 
     # Handle 1
     # Handle 1
     if commit_range == "":
     if commit_range == "":
-        commit_range = "--first-parent -m -1 {!s}".format(os.environ['TRAVIS_COMMIT'])
+        commit_range = "--first-parent -m -1 {!s}".format(
+            os.environ['TRAVIS_COMMIT'])
 
 
 print("Using commit range `{!s}`".format(commit_range))
 print("Using commit range `{!s}`".format(commit_range))
-print("Running `git log --name-only --pretty=\"format:\" {!s}`".format(commit_range))
-changes = clean_output(subprocess.check_output(['bash', '-c', 'git log --name-only --pretty="format:" {!s}'.format(commit_range)]))
+print("Running `git log --name-only --pretty=\"format:\" {!s}`".format(
+    commit_range))
+changes = clean_output(
+    subprocess.check_output([
+        'bash', '-c',
+        'git log --name-only --pretty="format:" {!s}'.format(commit_range)
+    ]))
 
 
 # Satisfies this requirement:
 # Satisfies this requirement:
 #   Anything in the toolset/ that isn't in the setup/linux/*/ subdirectory
 #   Anything in the toolset/ that isn't in the setup/linux/*/ subdirectory
@@ -116,14 +134,18 @@ i = 0
 while i <= len(fw_depends_changes) - 1:
 while i <= len(fw_depends_changes) - 1:
 
 
     # Generates output of files that contain the fw_depends for this dependency
     # Generates output of files that contain the fw_depends for this dependency
-    more_changes = subprocess.check_output(['bash', '-c', 'grep -RP "fw_depends(.?)+ ' + re.escape(fw_depends_changes[i]) + '( |$)" . || echo ""'])
+    more_changes = subprocess.check_output([
+        'bash', '-c', 'grep -RP "fw_depends(.?)+ ' +
+        re.escape(fw_depends_changes[i]) + '( |$)" . || echo ""'
+    ])
     print("more_changes: {!s}".format(more_changes))
     print("more_changes: {!s}".format(more_changes))
     if fw_found_in_changes(more_changes):
     if fw_found_in_changes(more_changes):
         print("Found changes that affect this framework. Running test.")
         print("Found changes that affect this framework. Running test.")
         quit_diffing(True)
         quit_diffing(True)
 
 
     # Preserves the order of the list, so we can continue with this loop
     # Preserves the order of the list, so we can continue with this loop
-    fw_depends_changes.extend(Set(get_fw_depends_changes(more_changes)) - Set(fw_depends_changes))
+    fw_depends_changes.extend(
+        Set(get_fw_depends_changes(more_changes)) - Set(fw_depends_changes))
     i += 1
     i += 1
 
 
 # If we get here, there was nothing found
 # If we get here, there was nothing found

+ 0 - 4
toolset/travis/travis_setup.sh

@@ -18,10 +18,6 @@ sed -i s/techempower/travis/g ./benchmark.cfg
 
 
 echo "travis ALL=(ALL:ALL) NOPASSWD: ALL" | sudo tee -a /etc/sudoers
 echo "travis ALL=(ALL:ALL) NOPASSWD: ALL" | sudo tee -a /etc/sudoers
 
 
-echo 127.0.0.1 TFB-database | sudo tee --append /etc/hosts
-echo 127.0.0.1 TFB-client   | sudo tee --append /etc/hosts
-echo 127.0.0.1 TFB-server   | sudo tee --append /etc/hosts
-
 source ./toolset/setup/linux/prerequisites.sh
 source ./toolset/setup/linux/prerequisites.sh
 
 
 tfb --init --quiet
 tfb --init --quiet

+ 0 - 0
toolset/utils/__init__.py


+ 86 - 0
toolset/utils/benchmark_config.py

@@ -0,0 +1,86 @@
+from toolset.utils import setup_util
+from toolset.benchmark.test_types import *
+from toolset.utils.output_helper import QuietOutputStream
+
+import logging
+import time
+
+
+class BenchmarkConfig:
+    def __init__(self, args):
+        '''
+        Configures this BenchmarkConfig given the arguments provided.
+        '''
+
+        # Map type strings to their objects
+        types = dict()
+        types['json'] = JsonTestType(self)
+        types['db'] = DBTestType(self)
+        types['query'] = QueryTestType(self)
+        types['fortune'] = FortuneTestType(self)
+        types['update'] = UpdateTestType(self)
+        types['plaintext'] = PlaintextTestType(self)
+        types['cached_query'] = CachedQueryTestType(self)
+
+        # Turn type into a map instead of a string
+        if args['type'] == 'all':
+            args['types'] = types
+        else:
+            args['types'] = {args['type']: types[args['type']]}
+        del args['type']
+
+        args['max_concurrency'] = max(args['concurrency_levels'])
+        if 'pipeline_concurrency_levels' not in args:
+            args['pipeline_concurrency_levels'] = [256, 1024, 4096, 16384]
+
+        self.quiet = False
+        self.client_user = ""
+        self.client_host = ""
+        self.client_identity_file = ""
+        self.database_user = ""
+        self.database_host = ""
+        self.database_identity_file = ""
+        self.parse = False
+        self.new = False
+        self.init = False
+        self.build = False
+        self.clean = False
+        self.list_tests = False
+        self.concurrency_levels = []
+        self.pipeline_concurrency_levels = []
+
+        self.__dict__.update(args)
+
+        self.quiet_out = QuietOutputStream(self.quiet)
+
+        self.start_time = time.time()
+
+        # setup logging
+        logging.basicConfig(stream=self.quiet_out, level=logging.INFO)
+
+        # setup some additional variables
+        if self.database_user == None: self.database_user = self.client_user
+        if self.database_host == None: self.database_host = self.client_host
+        if self.database_identity_file == None:
+            self.database_identity_file = self.client_identity_file
+
+        # Remember root directory
+        self.fwroot = setup_util.get_fwroot()
+
+        # setup current_benchmark.txt location
+        self.current_benchmark = "/tmp/current_benchmark.txt"
+
+        if hasattr(self, 'parse') and self.parse != None:
+            self.timestamp = self.parse
+        else:
+            self.timestamp = time.strftime("%Y%m%d%H%M%S", time.localtime())
+
+        # Setup the ssh command string
+        self.database_ssh_string = "ssh -T -o StrictHostKeyChecking=no " + self.database_user + "@" + self.database_host
+        self.client_ssh_string = "ssh -T -o StrictHostKeyChecking=no " + self.client_user + "@" + self.client_host
+        if self.database_identity_file != None:
+            self.database_ssh_string = self.database_ssh_string + " -i " + self.database_identity_file
+        if self.client_identity_file != None:
+            self.client_ssh_string = self.client_ssh_string + " -i " + self.client_identity_file
+
+        self.run_test_timeout_seconds = 7200

+ 15 - 0
toolset/utils/cleaner.py

@@ -0,0 +1,15 @@
+import os
+import shutil
+
+
+def clean(results):
+    '''
+    Cleans the given directory of all files and folders
+    '''
+    results_dir = os.path.dirname(results.directory)
+    if os.path.exists(results_dir):
+        for file in os.listdir(results_dir):
+            if not os.path.exists(os.path.dirname(file)):
+                shutil.rmtree(os.path.join(results_dir, file))
+            else:
+                os.remove(os.path.join(results_dir, file))

+ 201 - 0
toolset/utils/docker_helper.py

@@ -0,0 +1,201 @@
+import os
+import socket
+import fnmatch
+import subprocess
+import multiprocessing
+import json
+import docker
+
+from threading import Thread
+
+from toolset.utils import setup_util
+from toolset.utils.output_helper import tee_output
+from toolset.utils.metadata_helper import gather_tests
+
+
+def clean():
+    '''
+    Cleans all the docker images from the system
+    '''
+    subprocess.check_call(["docker", "image", "prune", "-f"])
+
+    docker_ids = subprocess.check_output(["docker", "images",
+                                          "-q"]).splitlines()
+    for docker_id in docker_ids:
+        subprocess.check_call(["docker", "image", "rmi", "-f", docker_id])
+
+    subprocess.check_call(["docker", "system", "prune", "-a", "-f"])
+
+
+def build(benchmarker_config, test_names, out):
+    '''
+    Builds the dependency chain as well as the test implementation docker images
+    for the given tests.
+    '''
+    tests = gather_tests(test_names)
+
+    for test in tests:
+        docker_buildargs = {
+            'CPU_COUNT': str(multiprocessing.cpu_count()),
+            'MAX_CONCURRENCY': str(max(benchmarker_config.concurrency_levels)),
+            'TFB_DATABASE': str(benchmarker_config.database_host)
+        }
+
+        test_docker_files = ["%s.dockerfile" % test.name]
+        if test.docker_files is not None:
+            if type(test.docker_files) is list:
+                test_docker_files.extend(test.docker_files)
+            else:
+                raise Exception(
+                    "docker_files in benchmark_config.json must be an array")
+
+        for test_docker_file in test_docker_files:
+            deps = list(
+                reversed(
+                    gather_dependencies(
+                        os.path.join(test.directory, test_docker_file))))
+
+            docker_dir = os.path.join(setup_util.get_fwroot(), "toolset",
+                                      "setup", "docker")
+            for dependency in deps:
+                docker_file = os.path.join(test.directory,
+                                           dependency + ".dockerfile")
+                if not docker_file or not os.path.exists(docker_file):
+                    docker_file = find(docker_dir, dependency + ".dockerfile")
+                if not docker_file:
+                    tee_output(
+                        out,
+                        "Docker build failed; %s could not be found; terminating\n"
+                        % (dependency + ".dockerfile"))
+                    return 1
+
+                # Build the dependency image
+                try:
+                    for line in docker.APIClient(
+                            base_url='unix://var/run/docker.sock').build(
+                                path=os.path.dirname(docker_file),
+                                dockerfile="%s.dockerfile" % dependency,
+                                tag="tfb/%s" % dependency,
+                                buildargs=docker_buildargs,
+                                forcerm=True):
+                        prev_line = os.linesep
+                        if line.startswith('{"stream":'):
+                            line = json.loads(line)
+                            line = line[line.keys()[0]].encode('utf-8')
+                            if prev_line.endswith(os.linesep):
+                                tee_output(out, line)
+                            else:
+                                tee_output(out, line)
+                            prev_line = line
+                except Exception as e:
+                    tee_output(out,
+                               "Docker dependency build failed; terminating\n")
+                    print(e)
+                    return 1
+
+        # Build the test images
+        for test_docker_file in test_docker_files:
+            try:
+                for line in docker.APIClient(
+                        base_url='unix://var/run/docker.sock').build(
+                            path=test.directory,
+                            dockerfile=test_docker_file,
+                            tag="tfb/test/%s" % test_docker_file.replace(
+                                ".dockerfile", ""),
+                            buildargs=docker_buildargs,
+                            forcerm=True):
+                    prev_line = os.linesep
+                    if line.startswith('{"stream":'):
+                        line = json.loads(line)
+                        line = line[line.keys()[0]].encode('utf-8')
+                        if prev_line.endswith(os.linesep):
+                            tee_output(out, line)
+                        else:
+                            tee_output(out, line)
+                        prev_line = line
+            except Exception as e:
+                tee_output(out, "Docker build failed; terminating\n")
+                print(e)
+                return 1
+
+
+def run(benchmarker_config, docker_files, out):
+    '''
+    Run the given Docker container(s)
+    '''
+    client = docker.from_env()
+
+    for docker_file in docker_files:
+        try:
+
+            def watch_container(container):
+                for line in container.logs(stream=True):
+                    tee_output(out, line)
+
+            extra_hosts = {
+                socket.gethostname(): str(benchmarker_config.server_host),
+                'TFB-SERVER': str(benchmarker_config.server_host),
+                'TFB-DATABASE': str(benchmarker_config.database_host),
+                'TFB-CLIENT': str(benchmarker_config.client_host)
+            }
+
+            container = client.containers.run(
+                "tfb/test/%s" % docker_file.replace(".dockerfile", ""),
+                network_mode="host",
+                privileged=True,
+                stderr=True,
+                detach=True,
+                extra_hosts=extra_hosts)
+
+            watch_thread = Thread(target=watch_container, args=(container, ))
+            watch_thread.daemon = True
+            watch_thread.start()
+
+        except Exception as e:
+            tee_output(out,
+                       "Running docker cointainer: %s failed" % docker_file)
+            print(e)
+            return 1
+
+
+def find(path, pattern):
+    '''
+    Finds and returns all the the files matching the given pattern recursively in
+    the given path. 
+    '''
+    for root, dirs, files in os.walk(path):
+        for name in files:
+            if fnmatch.fnmatch(name, pattern):
+                return os.path.join(root, name)
+
+
+def gather_dependencies(docker_file):
+    '''
+    Gathers all the known docker dependencies for the given docker image.
+    '''
+    # Avoid setting up a circular import
+    from toolset.utils import setup_util
+    deps = []
+
+    docker_dir = os.path.join(setup_util.get_fwroot(), "toolset", "setup",
+                              "docker")
+
+    if os.path.exists(docker_file):
+        with open(docker_file) as fp:
+            for line in fp.readlines():
+                tokens = line.strip().split(' ')
+                if tokens[0] == "FROM":
+                    # This is magic that our base image points to
+                    if tokens[1] != "ubuntu:16.04":
+                        depToken = tokens[1].strip().split(':')[
+                            0].strip().split('/')[1]
+                        deps.append(depToken)
+                        dep_docker_file = os.path.join(
+                            os.path.dirname(docker_file),
+                            depToken + ".dockerfile")
+                        if not os.path.exists(dep_docker_file):
+                            dep_docker_file = find(docker_dir,
+                                                   depToken + ".dockerfile")
+                        deps.extend(gather_dependencies(dep_docker_file))
+
+    return deps

+ 129 - 0
toolset/utils/initializer.py

@@ -0,0 +1,129 @@
+import subprocess, os
+from toolset.utils import setup_util
+
+DEVNULL = open(os.devnull, 'w')
+
+
+def initialize(args):
+    fwroot = setup_util.get_fwroot()
+    dbuser = args.database_user
+    dbhost = args.database_host
+    dbiden = args.database_identity_file
+    cluser = args.client_user
+    clhost = args.client_host
+    cliden = args.client_identity_file
+    aphost = args.server_host
+
+    # test ssh connections to all the machines
+    client_conn = __check_connection(cluser, clhost, cliden, aphost)
+    database_conn = __check_connection(dbuser, dbhost, dbiden, aphost)
+
+    conn_success = client_conn and database_conn
+    if not conn_success and not args.quiet:
+        return __print_failure()
+
+    # set up client machine
+    if not __init_client(fwroot, cluser, clhost, cliden,
+                         args.quiet) and not args.quiet:
+        return __print_failure()
+
+    # set up database software
+    if not __init_database(fwroot, dbuser, dbhost, dbiden,
+                           args.quiet) and not args.quiet:
+        return __print_failure()
+
+
+def __print_failure():
+    print("""
+-------------------------------------------------------------------------------
+  This wizard is intended to help configure the required software on all the
+  machines in the ecosystem specified in benchmark.cfg.
+
+  Note: It is expected that you have already set up passwordless-sudo on all
+  of the machines (app, database, client) as well as identity file based 
+  authentication and hostname setup in your hosts file. 
+  More information on this required setup can be found at:
+
+  frameworkbenchmarks.readthedocs.io/en/latest/Development/Installation-Guide/
+
+  Please ensure that your benchmark.cfg is correctly configured as well as all
+  of the machines (app, database, client).
+-------------------------------------------------------------------------------"""
+          )
+
+
+def __ssh_string(user, host, identity_file):
+    return [
+        "ssh", "-T", "-o", "StrictHostKeyChecking=no",
+        "%s@%s" % (user, host), "-i", identity_file
+    ]
+
+
+def __check_connection(user, host, identity_file, app_host):
+    ''' 
+  Checks that the given user and host are accessible via ssh with the given
+  identity file and have the the following permissions:
+    1. passwordless sudo
+    2. ability to ssh back to app machine
+  '''
+    client_conn = True
+    try:
+        p = subprocess.Popen(
+            __ssh_string(user, host, identity_file),
+            stdin=subprocess.PIPE,
+            stdout=DEVNULL,
+            stderr=DEVNULL)
+        p.communicate("ssh -T -o StrictHostKeyChecking=no %s" % app_host)
+        if p.returncode:
+            client_conn = False
+    except Exception:
+        client_conn = False
+    return client_conn
+
+
+def __init_client(fwroot, user, host, identity_file, quiet):
+    '''
+  Initializes and configures the software required to run the suite on the 
+  client machine.
+  '''
+    if not quiet:
+        print("INSTALL: Installing client software")
+    with open(
+            os.path.join(fwroot, "toolset", "setup", "linux", "client.sh"),
+            "r") as myfile:
+        remote_script = myfile.read()
+        if quiet:
+            p = subprocess.Popen(
+                __ssh_string(user, host, identity_file),
+                stdin=subprocess.PIPE,
+                stdout=DEVNULL,
+                stderr=DEVNULL)
+        else:
+            p = subprocess.Popen(
+                __ssh_string(user, host, identity_file), stdin=subprocess.PIPE)
+        p.communicate(remote_script)
+        return p.returncode == 0
+
+
+def __init_database(fwroot, user, host, identity_file, quiet):
+    '''
+  Initializes and configures the software required to run the suite on the
+  database machine.
+  '''
+    if not quiet:
+        print("INSTALL: Installing database software")
+    with open(
+            os.path.join(fwroot, "toolset", "setup", "linux", "database.sh"),
+            "r") as myfile:
+        remote_script = myfile.read()
+        if quiet:
+            p = subprocess.Popen(
+                __ssh_string(user, host, identity_file),
+                stdin=subprocess.PIPE,
+                stdout=DEVNULL,
+                stderr=DEVNULL)
+        else:
+            p = subprocess.Popen(
+                __ssh_string(user, host, identity_file), stdin=subprocess.PIPE)
+        p.communicate(remote_script)
+        return p.returncode == 0

+ 418 - 0
toolset/utils/metadata_helper.py

@@ -0,0 +1,418 @@
+import ConfigParser
+import os
+import glob
+import json
+import logging
+
+from ast import literal_eval
+from collections import OrderedDict
+
+
+def gather_langauges():
+    '''
+    Gathers all the known languages in the suite via the folder names
+    beneath FWROOT.
+    '''
+    # Avoid setting up a circular import
+    from toolset.utils import setup_util
+
+    lang_dir = os.path.join(setup_util.get_fwroot(), "frameworks")
+    langs = []
+    for dir in glob.glob(os.path.join(lang_dir, "*")):
+        langs.append(dir.replace(lang_dir, "")[1:])
+    return langs
+
+
+def gather_tests(include=[], exclude=[], benchmarker_config=None,
+                 results=None):
+    '''
+    Given test names as strings, returns a list of FrameworkTest objects.
+    For example, 'aspnet-mysql-raw' turns into a FrameworkTest object with
+    variables for checking the test directory, the test database os, and
+    other useful items.
+
+    With no arguments, every test in this framework will be returned.
+    With include, only tests with this exact name will be returned.
+    With exclude, all tests but those excluded will be returned.
+
+    A config is needed to construct full FrameworkTest objects. If
+    one is not provided, a default config will be created.
+    '''
+    # Avoid setting up a circular import
+    from toolset.utils.benchmark_config import BenchmarkConfig
+    from toolset.utils import setup_util
+
+    # Help callers out a bit
+    if include is None:
+        include = []
+    if exclude is None:
+        exclude = []
+
+    # Old, hacky method to exclude all tests was to
+    # request a test known to not exist, such as ''.
+    # If test '' was requested, short-circuit and return
+    # nothing immediately
+    if len(include) == 1 and '' in include:
+        return []
+
+    # Setup default BenchmarkerConfig using example configuration
+    if benchmarker_config is None:
+        default_config = setup_util.get_fwroot() + "/benchmark.cfg"
+        config = ConfigParser.SafeConfigParser()
+        config.readfp(open(default_config))
+        defaults = dict(config.items("Defaults"))
+
+        # Convert strings into proper python types
+        for k, v in defaults.items():
+            try:
+                defaults[k] = literal_eval(v)
+            except Exception:
+                pass
+
+        defaults[
+            'results_name'] = "(unspecified, datetime = %Y-%m-%d %H:%M:%S)"
+        defaults['results_environment'] = "My Server Environment"
+        defaults['test_dir'] = None
+        defaults['test_lang'] = None
+        defaults['quiet'] = True
+
+        benchmarker_config = BenchmarkConfig(defaults)
+
+    # Search for configuration files
+    config_files = []
+
+    if benchmarker_config.test_lang:
+        benchmarker_config.test_dir = []
+        for lang in benchmarker_config.test_lang:
+            if os.path.exists("{!s}/frameworks/{!s}".format(
+                    benchmarker_config.fwroot, lang)):
+                for test_dir in os.listdir("{!s}/frameworks/{!s}".format(
+                        benchmarker_config.fwroot, lang)):
+                    benchmarker_config.test_dir.append("{!s}/{!s}".format(
+                        lang, test_dir))
+            else:
+                raise Exception(
+                    "Unable to locate language directory: {!s}".format(lang))
+
+    if benchmarker_config.test_dir:
+        for test_dir in benchmarker_config.test_dir:
+            dir_config_files = glob.glob(
+                "{!s}/frameworks/{!s}/benchmark_config.json".format(
+                    benchmarker_config.fwroot, test_dir))
+            if len(dir_config_files):
+                config_files.extend(dir_config_files)
+            else:
+                raise Exception(
+                    "Unable to locate tests in test-dir: {!s}".format(
+                        test_dir))
+    else:
+        config_files.extend(
+            glob.glob("{!s}/frameworks/*/*/benchmark_config.json".format(
+                benchmarker_config.fwroot)))
+
+    tests = []
+    for config_file_name in config_files:
+        config = None
+        with open(config_file_name, 'r') as config_file:
+            try:
+                config = json.load(config_file)
+            except ValueError:
+                raise Exception(
+                    "Error loading '{!s}'.".format(config_file_name))
+
+        # Find all tests in the config file
+        config_tests = parse_config(config, os.path.dirname(config_file_name),
+                                    benchmarker_config, results)
+
+        # Filter
+        for test in config_tests:
+            if len(include) is 0 and len(exclude) is 0:
+                # No filters, we are running everything
+                tests.append(test)
+            elif test.name in exclude:
+                continue
+            elif test.name in include:
+                tests.append(test)
+            else:
+                # An include list exists, but this test is
+                # not listed there, so we ignore it
+                pass
+
+    # Ensure we were able to locate everything that was
+    # explicitly included
+    if 0 != len(include):
+        names = {test.name for test in tests}
+        if 0 != len(set(include) - set(names)):
+            missing = list(set(include) - set(names))
+            raise Exception("Unable to locate tests %s" % missing)
+
+    tests.sort(key=lambda x: x.name)
+    return tests
+
+
+def gather_remaining_tests(config, results):
+    '''
+    Gathers the tests remaining in a current benchmark run.
+    '''
+    tests = gather_tests(config.test, config.exclude, config, results)
+
+    # If the tests have been interrupted somehow, then we want to resume them where we left
+    # off, rather than starting from the beginning
+    if os.path.isfile(config.current_benchmark):
+        with open(config.current_benchmark, 'r') as interrupted_benchmark:
+            interrupt_bench = interrupted_benchmark.read().strip()
+        for index, atest in enumerate(tests):
+            if atest.name == interrupt_bench:
+                tests = tests[index:]
+                break
+    return tests
+
+
+def gather_frameworks(include=[], exclude=[], config=None):
+    '''
+    Return a dictionary mapping frameworks->[test1,test2,test3]
+    for quickly grabbing all tests in a grouped manner.
+    Args have the same meaning as gather_tests
+    '''
+    tests = gather_tests(include, exclude, config)
+    frameworks = dict()
+
+    for test in tests:
+        if test.framework not in frameworks:
+            frameworks[test.framework] = []
+        frameworks[test.framework].append(test)
+    return frameworks
+
+
+def test_order(type_name):
+    """
+    This sort ordering is set up specifically to return the length
+    of the test name. There were SO many problems involved with
+    'plaintext' being run first (rather, just not last) that we
+    needed to ensure that it was run last for every framework.
+    """
+    return len(type_name)
+
+
+def parse_config(config, directory, benchmarker_config, results):
+    """
+    Parses a config file into a list of FrameworkTest objects
+    """
+    from toolset.benchmark.framework_test import FrameworkTest
+    tests = []
+
+    # The config object can specify multiple tests
+    # Loop over them and parse each into a FrameworkTest
+    for test in config['tests']:
+
+        tests_to_run = [name for (name, keys) in test.iteritems()]
+        if "default" not in tests_to_run:
+            logging.warn(
+                "Framework %s does not define a default test in benchmark_config.json",
+                config['framework'])
+
+        # Check that each test configuration is acceptable
+        # Throw exceptions if a field is missing, or how to improve the field
+        for test_name, test_keys in test.iteritems():
+            # Validates the benchmark_config entry
+            validate_test(test_name, test_keys, directory)
+
+            # Map test type to a parsed FrameworkTestType object
+            runTests = dict()
+            for type_name, type_obj in benchmarker_config.types.iteritems():
+                try:
+                    # Makes a FrameWorkTestType object using some of the keys in config
+                    # e.g. JsonTestType uses "json_url"
+                    runTests[type_name] = type_obj.copy().parse(test_keys)
+                except AttributeError:
+                    # This is quite common - most tests don't support all types
+                    # Quitely log it and move on (debug logging is on in travis and this causes
+                    # ~1500 lines of debug, so I'm totally ignoring it for now
+                    # logging.debug("Missing arguments for test type %s for framework test %s", type_name, test_name)
+                    pass
+
+            # We need to sort by test_type to run
+            sortedTestKeys = sorted(runTests.keys(), key=test_order)
+            sortedRunTests = OrderedDict()
+            for sortedTestKey in sortedTestKeys:
+                sortedRunTests[sortedTestKey] = runTests[sortedTestKey]
+
+            # Prefix all test names with framework except 'default' test
+            # Done at the end so we may still refer to the primary test as `default` in benchmark config error messages
+            if test_name == 'default':
+                test_name = config['framework']
+            else:
+                test_name = "%s-%s" % (config['framework'], test_name)
+
+            # By passing the entire set of keys, each FrameworkTest will have a member for each key
+            tests.append(
+                FrameworkTest(test_name, directory, benchmarker_config,
+                              results, sortedRunTests, test_keys))
+
+    return tests
+
+
+def validate_test(test_name, test_keys, directory):
+    """
+    Validate benchmark config values for this test based on a schema
+    """
+    recommended_lang = directory.split('/')[-2]
+    windows_url = "https://github.com/TechEmpower/FrameworkBenchmarks/issues/1038"
+    schema = {
+        'language': {
+            'help':
+            ('language', 'The language of the framework used, suggestion: %s' %
+             recommended_lang)
+        },
+        'webserver': {
+            'help':
+            ('webserver',
+             'Name of the webserver also referred to as the "front-end server"'
+             )
+        },
+        'classification': {
+            'allowed': [('Fullstack', '...'), ('Micro', '...'), ('Platform',
+                                                                 '...')]
+        },
+        'database': {
+            'allowed':
+            [('MySQL',
+              'One of the most popular databases around the web and in TFB'),
+             ('Postgres',
+              'An advanced SQL database with a larger feature set than MySQL'),
+             ('MongoDB', 'A popular document-store database'),
+             ('Cassandra', 'A highly performant and scalable NoSQL database'),
+             ('Elasticsearch',
+              'A distributed RESTful search engine that is used as a database for TFB tests'
+              ),
+             ('Redis',
+              'An open-sourced, BSD licensed, advanced key-value cache and store'
+              ),
+             ('SQLite',
+              'A network-less database, still supported for backwards compatibility'
+              ), ('SQLServer', 'Microsoft\'s SQL implementation'),
+             ('None',
+              'No database was used for these tests, as is the case with Json Serialization and Plaintext'
+              )]
+        },
+        'approach': {
+            'allowed': [('Realistic', '...'), ('Stripped', '...')]
+        },
+        'orm': {
+            'allowed':
+            [('Full',
+              'Has a full suite of features like lazy loading, caching, multiple language support, sometimes pre-configured with scripts.'
+              ),
+             ('Micro',
+              'Has basic database driver capabilities such as establishing a connection and sending queries.'
+              ),
+             ('Raw',
+              'Tests that do not use an ORM will be classified as "raw" meaning they use the platform\'s raw database connectivity.'
+              )]
+        },
+        'platform': {
+            'help':
+            ('platform',
+             'Name of the platform this framework runs on, e.g. Node.js, PyPy, hhvm, JRuby ...'
+             )
+        },
+        'framework': {
+            # Guranteed to be here and correct at this point
+            # key is left here to produce the set of required keys
+        },
+        'os': {
+            'allowed':
+            [('Linux',
+              'Our best-supported host OS, it is recommended that you build your tests for Linux hosts'
+              ),
+             ('Windows',
+              'TFB is not fully-compatible on windows, contribute towards our work on compatibility: %s'
+              % windows_url)]
+        },
+        'database_os': {
+            'allowed':
+            [('Linux',
+              'Our best-supported host OS, it is recommended that you build your tests for Linux hosts'
+              ),
+             ('Windows',
+              'TFB is not fully-compatible on windows, contribute towards our work on compatibility: %s'
+              % windows_url)]
+        }
+    }
+
+    # Confirm required keys are present
+    required_keys = schema.keys()
+    missing = list(set(required_keys) - set(test_keys))
+
+    if len(missing) > 0:
+        missingstr = (", ").join(map(str, missing))
+        raise Exception(
+            "benchmark_config.json for test %s is invalid, please amend by adding the following required keys: [%s]"
+            % (test_name, missingstr))
+
+    # Check the (all optional) test urls
+    validate_urls(test_name, test_keys)
+
+    # Check values of keys against schema
+    for key in required_keys:
+        val = test_keys.get(key, "").lower()
+        has_predefined_acceptables = 'allowed' in schema[key]
+
+        if has_predefined_acceptables:
+            allowed = schema[key].get('allowed', [])
+            acceptable_values, descriptors = zip(*allowed)
+            acceptable_values = [a.lower() for a in acceptable_values]
+
+            if val not in acceptable_values:
+                msg = (
+                    "Invalid `%s` value specified for test \"%s\" in framework \"%s\"; suggestions:\n"
+                    % (key, test_name, test_keys['framework']))
+                helpinfo = ('\n').join([
+                    "  `%s` -- %s" % (v, desc)
+                    for (v, desc) in zip(acceptable_values, descriptors)
+                ])
+                fullerr = msg + helpinfo + "\n"
+                raise Exception(fullerr)
+
+        elif not has_predefined_acceptables and val == "":
+            msg = (
+                "Value for `%s` in test \"%s\" in framework \"%s\" was missing:\n"
+                % (key, test_name, test_keys['framework']))
+            helpinfo = "  %s -- %s" % schema[key]['help']
+            fullerr = msg + helpinfo + '\n'
+            raise Exception(fullerr)
+
+
+def validate_urls(test_name, test_keys):
+    """
+    Separated from validate_test because urls are not required anywhere. We know a url is incorrect if it is
+    empty or does not start with a "/" character. There is no validation done to ensure the url conforms to
+    the suggested url specifications, although those suggestions are presented if a url fails validation here.
+    """
+    example_urls = {
+        "json_url":
+        "/json",
+        "db_url":
+        "/mysql/db",
+        "query_url":
+        "/mysql/queries?queries=  or  /mysql/queries/",
+        "fortune_url":
+        "/mysql/fortunes",
+        "update_url":
+        "/mysql/updates?queries=  or  /mysql/updates/",
+        "plaintext_url":
+        "/plaintext",
+        "cached_query_url":
+        "/mysql/cached_queries?queries=  or /mysql/cached_queries"
+    }
+
+    for test_url in [
+            "json_url", "db_url", "query_url", "fortune_url", "update_url",
+            "plaintext_url", "cached_query_url"
+    ]:
+        key_value = test_keys.get(test_url, None)
+        if key_value != None and not key_value.startswith('/'):
+            errmsg = """`%s` field in test \"%s\" does not appear to be a valid url: \"%s\"\n
+        Example `%s` url: \"%s\"
+      """ % (test_url, test_name, key_value, test_url, example_urls[test_url])
+            raise Exception(errmsg)

+ 65 - 0
toolset/utils/output_helper.py

@@ -0,0 +1,65 @@
+import os, sys
+from contextlib import contextmanager
+
+
+def header(message, top='-', bottom='-'):
+    '''
+    Generates a clean header
+    '''
+    topheader = (top * 80)[:80]
+    bottomheader = (bottom * 80)[:80]
+    result = ""
+    if topheader != "":
+        result += "%s" % topheader
+    if message != "":
+        if result == "":
+            result = "  %s" % message
+        else:
+            result += "%s  %s" % (os.linesep, message)
+    if bottomheader != "":
+        if result == "":
+            result = "%s" % bottomheader
+        else:
+            result += "%s%s" % (os.linesep, bottomheader)
+    return result + os.linesep
+
+
+def tee_output(out, line):
+    '''
+    Writes to bouth stdout and the provided out file
+    '''
+    sys.stdout.write(line)
+    sys.stdout.flush()
+
+    if out is not None:
+        out.write(line)
+        out.flush()
+
+
+class QuietOutputStream:
+    def __init__(self, is_quiet):
+        self.is_quiet = is_quiet
+        self.null_out = open(os.devnull, 'w')
+
+    def fileno(self):
+        with self.enable():
+            return sys.stdout.fileno()
+
+    def write(self, message):
+        with self.enable():
+            sys.stdout.write(message)
+
+    @contextmanager
+    def enable(self):
+        if self.is_quiet:
+            old_out = sys.stdout
+            old_err = sys.stderr
+            try:
+                sys.stdout = self.null_out
+                sys.stderr = self.null_out
+                yield
+            finally:
+                sys.stdout = old_out
+                sys.stderr = old_err
+        else:
+            yield

+ 211 - 0
toolset/utils/remote_script_helper.py

@@ -0,0 +1,211 @@
+def generate_concurrency_script(benchmarker_config,
+                                name,
+                                url,
+                                port,
+                                accept_header,
+                                wrk_command="wrk"):
+    '''
+    Generates the string containing the bash script that will be run on the 
+    client to benchmark a single test. This specifically works for the variable 
+    concurrency tests.
+    '''
+    headers = headers_template.format(
+        server_host=benchmarker_config.server_host, accept=accept_header)
+    return concurrency_template.format(
+        max_concurrency=max(benchmarker_config.concurrency_levels),
+        name=name,
+        duration=benchmarker_config.duration,
+        levels=" ".join("{}".format(item)
+                        for item in benchmarker_config.concurrency_levels),
+        server_host=benchmarker_config.server_host,
+        port=port,
+        url=url,
+        headers=headers,
+        wrk=wrk_command)
+
+
+def generate_pipeline_script(benchmarker_config,
+                             name,
+                             url,
+                             port,
+                             accept_header,
+                             wrk_command="wrk"):
+    '''
+    Generates the string containing the bash script that will be run on the 
+    client to benchmark a single pipeline test.
+    '''
+    headers = headers_template.format(
+        server_host=benchmarker_config.server_host, accept=accept_header)
+    return pipeline_template.format(
+        max_concurrency=max(benchmarker_config.pipeline_concurrency_levels),
+        name=name,
+        duration=benchmarker_config.duration,
+        levels=" ".join(
+            "{}".format(item)
+            for item in benchmarker_config.pipeline_concurrency_levels),
+        server_host=benchmarker_config.server_host,
+        port=port,
+        url=url,
+        headers=headers,
+        wrk=wrk_command,
+        pipeline=16)
+
+
+def generate_query_script(benchmarker_config, name, url, port, accept_header,
+                          query_levels):
+    '''
+    Generates the string containing the bash script that will be run on the 
+    client to benchmark a single test. This specifically works for the variable 
+    query tests (Query)
+    '''
+    headers = headers_template.format(
+        server_host=benchmarker_config.server_host, accept=accept_header)
+    return query_template.format(
+        max_concurrency=max(benchmarker_config.concurrency_levels),
+        name=name,
+        duration=benchmarker_config.duration,
+        levels=" ".join("{}".format(item) for item in query_levels),
+        server_host=benchmarker_config.server_host,
+        port=port,
+        url=url,
+        headers=headers)
+
+
+##########################################################################################
+# Constants
+##########################################################################################
+headers_template = "-H 'Host: {server_host}' -H 'Accept: {accept}' -H 'Connection: keep-alive'"
+
+# Used for test types that require no pipelining or query string params.
+concurrency_template = """
+    let max_threads=$(cat /proc/cpuinfo | grep processor | wc -l)
+    echo ""
+    echo "---------------------------------------------------------"
+    echo " Running Primer {name}"
+    echo " {wrk} {headers} --latency -d 5 -c 8 --timeout 8 -t 8 \"http://{server_host}:{port}{url}\""
+    echo "---------------------------------------------------------"
+    echo ""
+    {wrk} {headers} --latency -d 5 -c 8 --timeout 8 -t 8 "http://{server_host}:{port}{url}"
+    sleep 5
+
+    echo ""
+    echo "---------------------------------------------------------"
+    echo " Running Warmup {name}"
+    echo " {wrk} {headers} --latency -d {duration} -c {max_concurrency} --timeout 8 -t $max_threads \"http://{server_host}:{port}{url}\""
+    echo "---------------------------------------------------------"
+    echo ""
+    {wrk} {headers} --latency -d {duration} -c {max_concurrency} --timeout 8 -t $max_threads "http://{server_host}:{port}{url}"
+    sleep 5
+
+    echo ""
+    echo "---------------------------------------------------------"
+    echo " Synchronizing time"
+    echo "---------------------------------------------------------"
+    echo ""
+    ntpdate -s pool.ntp.org
+
+    for c in {levels}
+    do
+    echo ""
+    echo "---------------------------------------------------------"
+    echo " Concurrency: $c for {name}"
+    echo " {wrk} {headers} --latency -d {duration} -c $c --timeout 8 -t $(($c>$max_threads?$max_threads:$c)) \"http://{server_host}:{port}{url}\""
+    echo "---------------------------------------------------------"
+    echo ""
+    STARTTIME=$(date +"%s")
+    {wrk} {headers} --latency -d {duration} -c $c --timeout 8 -t "$(($c>$max_threads?$max_threads:$c))" http://{server_host}:{port}{url}
+    echo "STARTTIME $STARTTIME"
+    echo "ENDTIME $(date +"%s")"
+    sleep 2
+    done
+"""
+
+# Used for test types that require pipelining.
+pipeline_template = """
+    let max_threads=$(cat /proc/cpuinfo | grep processor | wc -l)
+    echo ""
+    echo "---------------------------------------------------------"
+    echo " Running Primer {name}"
+    echo " {wrk} {headers} --latency -d 5 -c 8 --timeout 8 -t 8 \"http://{server_host}:{port}{url}\""
+    echo "---------------------------------------------------------"
+    echo ""
+    {wrk} {headers} --latency -d 5 -c 8 --timeout 8 -t 8 "http://{server_host}:{port}{url}"
+    sleep 5
+
+    echo ""
+    echo "---------------------------------------------------------"
+    echo " Running Warmup {name}"
+    echo " {wrk} {headers} --latency -d {duration} -c {max_concurrency} --timeout 8 -t $max_threads \"http://{server_host}:{port}{url}\""
+    echo "---------------------------------------------------------"
+    echo ""
+    {wrk} {headers} --latency -d {duration} -c {max_concurrency} --timeout 8 -t $max_threads "http://{server_host}:{port}{url}"
+    sleep 5
+
+    echo ""
+    echo "---------------------------------------------------------"
+    echo " Synchronizing time"
+    echo "---------------------------------------------------------"
+    echo ""
+    ntpdate -s pool.ntp.org
+
+    for c in {levels}
+    do
+    echo ""
+    echo "---------------------------------------------------------"
+    echo " Concurrency: $c for {name}"
+    echo " {wrk} {headers} --latency -d {duration} -c $c --timeout 8 -t $(($c>$max_threads?$max_threads:$c)) \"http://{server_host}:{port}{url}\" -s ~/pipeline.lua -- {pipeline}"
+    echo "---------------------------------------------------------"
+    echo ""
+    STARTTIME=$(date +"%s")
+    {wrk} {headers} --latency -d {duration} -c $c --timeout 8 -t "$(($c>$max_threads?$max_threads:$c))" http://{server_host}:{port}{url} -s ~/pipeline.lua -- {pipeline}
+    echo "STARTTIME $STARTTIME"
+    echo "ENDTIME $(date +"%s")"
+    sleep 2
+    done
+"""
+
+# Used for test types that require a database -
+# These tests run at a static concurrency level and vary the size of
+# the query sent with each request
+query_template = """
+    let max_threads=$(cat /proc/cpuinfo | grep processor | wc -l)
+    echo ""
+    echo "---------------------------------------------------------"
+    echo " Running Primer {name}"
+    echo " wrk {headers} --latency -d 5 -c 8 --timeout 8 -t 8 \"http://{server_host}:{port}{url}2\""
+    echo "---------------------------------------------------------"
+    echo ""
+    wrk {headers} --latency -d 5 -c 8 --timeout 8 -t 8 "http://{server_host}:{port}{url}2"
+    sleep 5
+
+    echo ""
+    echo "---------------------------------------------------------"
+    echo " Running Warmup {name}"
+    echo " wrk {headers} --latency -d {duration} -c {max_concurrency} --timeout 8 -t $max_threads \"http://{server_host}:{port}{url}2\""
+    echo "---------------------------------------------------------"
+    echo ""
+    wrk {headers} --latency -d {duration} -c {max_concurrency} --timeout 8 -t $max_threads "http://{server_host}:{port}{url}2"
+    sleep 5
+
+    echo ""
+    echo "---------------------------------------------------------"
+    echo " Synchronizing time"
+    echo "---------------------------------------------------------"
+    echo ""
+    ntpdate -s pool.ntp.org
+
+    for c in {levels}
+    do
+    echo ""
+    echo "---------------------------------------------------------"
+    echo " Queries: $c for {name}"
+    echo " wrk {headers} --latency -d {duration} -c {max_concurrency} --timeout 8 -t $max_threads \"http://{server_host}:{port}{url}$c\""
+    echo "---------------------------------------------------------"
+    echo ""
+    STARTTIME=$(date +"%s")
+    wrk {headers} --latency -d {duration} -c {max_concurrency} --timeout 8 -t $max_threads "http://{server_host}:{port}{url}$c"
+    echo "STARTTIME $STARTTIME"
+    echo "ENDTIME $(date +"%s")"
+    sleep 2
+    done
+"""

+ 596 - 0
toolset/utils/results_helper.py

@@ -0,0 +1,596 @@
+from toolset.utils.metadata_helper import gather_remaining_tests, gather_frameworks
+from toolset.utils.output_helper import header
+
+import os
+import logging
+import subprocess
+import uuid
+import time
+import json
+import requests
+import threading
+import re
+import math
+import csv
+from datetime import datetime
+
+# Cross-platform colored text
+from colorama import Fore, Style
+
+
+class Results:
+    def __init__(self, config):
+        '''
+        Constructor
+        '''
+        self.config = config
+        self.directory = os.path.join(self.config.fwroot, "results",
+                                      self.config.timestamp)
+        try:
+            os.makedirs(self.directory)
+        except OSError:
+            pass
+        self.file = os.path.join(self.directory, "results.json")
+
+        self.uuid = str(uuid.uuid4())
+        self.name = datetime.now().strftime(self.config.results_name)
+        self.environmentDescription = self.config.results_environment
+        try:
+            self.git = dict()
+            self.git['commitId'] = self.__get_git_commit_id()
+            self.git['repositoryUrl'] = self.__get_git_repository_url()
+            self.git['branchName'] = self.__get_git_branch_name()
+        except Exception as e:
+            logging.debug(
+                'Could not read local git repository, which is fine. The error was: %s',
+                e)
+            self.git = None
+        self.startTime = int(round(time.time() * 1000))
+        self.completionTime = None
+        self.concurrencyLevels = self.config.concurrency_levels
+        self.pipelineConcurrencyLevels = self.config.pipeline_concurrency_levels
+        self.queryIntervals = self.config.query_levels
+        self.cachedQueryIntervals = self.config.cached_query_levels
+        self.frameworks = [
+            t.name for t in gather_remaining_tests(self.config, self)
+        ]
+        self.duration = self.config.duration
+        self.rawData = dict()
+        self.rawData['json'] = dict()
+        self.rawData['db'] = dict()
+        self.rawData['query'] = dict()
+        self.rawData['fortune'] = dict()
+        self.rawData['update'] = dict()
+        self.rawData['plaintext'] = dict()
+        self.rawData['cached_query'] = dict()
+        self.completed = dict()
+        self.succeeded = dict()
+        self.succeeded['json'] = []
+        self.succeeded['db'] = []
+        self.succeeded['query'] = []
+        self.succeeded['fortune'] = []
+        self.succeeded['update'] = []
+        self.succeeded['plaintext'] = []
+        self.succeeded['cached_query'] = []
+        self.failed = dict()
+        self.failed['json'] = []
+        self.failed['db'] = []
+        self.failed['query'] = []
+        self.failed['fortune'] = []
+        self.failed['update'] = []
+        self.failed['plaintext'] = []
+        self.failed['cached_query'] = []
+        self.verify = dict()
+
+        try:
+            with open(os.path.join(self.directory, 'results.json'), 'r') as f:
+                # Load json file into results object
+                self.__dict__.update(json.load(f))
+        except IOError:
+            logging.warn("results.json for test not found.")
+
+    #############################################################################
+    # PUBLIC FUNCTIONS
+    #############################################################################
+
+    def parse(self, tests):
+        '''
+        Ensures that the system has all necessary software to run
+        the tests. This does not include that software for the individual
+        test, but covers software such as curl and weighttp that
+        are needed.
+        '''
+        # Run the method to get the commmit count of each framework.
+        self.__count_commits()
+        # Call the method which counts the sloc for each framework
+        self.__count_sloc()
+
+        # Time to create parsed files
+        # Aggregate JSON file
+        with open(self.file, "w") as f:
+            f.write(json.dumps(self.__to_jsonable(), indent=2))
+
+    def parse_test(self, framework_test, test_type):
+        '''
+        Parses the given test and test_type from the raw_file.
+        '''
+        try:
+            results = dict()
+            results['results'] = []
+            stats = []
+
+            if os.path.exists(
+                    self.get_raw_file(framework_test.name, test_type)):
+                with open(self.get_raw_file(framework_test.name,
+                                            test_type)) as raw_data:
+
+                    is_warmup = True
+                    rawData = None
+                    for line in raw_data:
+                        if "Queries:" in line or "Concurrency:" in line:
+                            is_warmup = False
+                            rawData = None
+                            continue
+                        if "Warmup" in line or "Primer" in line:
+                            is_warmup = True
+                            continue
+                        if not is_warmup:
+                            if rawData == None:
+                                rawData = dict()
+                                results['results'].append(rawData)
+                            if "Latency" in line:
+                                m = re.findall(
+                                    r"([0-9]+\.*[0-9]*[us|ms|s|m|%]+)", line)
+                                if len(m) == 4:
+                                    rawData['latencyAvg'] = m[0]
+                                    rawData['latencyStdev'] = m[1]
+                                    rawData['latencyMax'] = m[2]
+                            if "requests in" in line:
+                                m = re.search("([0-9]+) requests in", line)
+                                if m != None:
+                                    rawData['totalRequests'] = int(m.group(1))
+                            if "Socket errors" in line:
+                                if "connect" in line:
+                                    m = re.search("connect ([0-9]+)", line)
+                                    rawData['connect'] = int(m.group(1))
+                                if "read" in line:
+                                    m = re.search("read ([0-9]+)", line)
+                                    rawData['read'] = int(m.group(1))
+                                if "write" in line:
+                                    m = re.search("write ([0-9]+)", line)
+                                    rawData['write'] = int(m.group(1))
+                                if "timeout" in line:
+                                    m = re.search("timeout ([0-9]+)", line)
+                                    rawData['timeout'] = int(m.group(1))
+                            if "Non-2xx" in line:
+                                m = re.search(
+                                    "Non-2xx or 3xx responses: ([0-9]+)", line)
+                                if m != None:
+                                    rawData['5xx'] = int(m.group(1))
+                            if "STARTTIME" in line:
+                                m = re.search("[0-9]+", line)
+                                rawData["startTime"] = int(m.group(0))
+                            if "ENDTIME" in line:
+                                m = re.search("[0-9]+", line)
+                                rawData["endTime"] = int(m.group(0))
+                                test_stats = self.__parse_stats(
+                                    framework_test, test_type,
+                                    rawData["startTime"], rawData["endTime"],
+                                    1)
+                                stats.append(test_stats)
+            with open(
+                    self.get_stats_file(framework_test.name, test_type) +
+                    ".json", "w") as stats_file:
+                json.dump(stats, stats_file, indent=2)
+
+            return results
+        except IOError:
+            return None
+
+    def parse_all(self, framework_test):
+        '''
+        Method meant to be run for a given timestamp
+        '''
+        for test_type in framework_test.runTests:
+            if os.path.exists(
+                    self.get_raw_file(framework_test.name, test_type)):
+                results = self.parse_test(framework_test, test_type)
+                self.report_benchmark_results(framework_test, test_type,
+                                              results['results'])
+
+    def write_intermediate(self, test_name, status_message):
+        '''
+        Writes the intermediate results for the given test_name and status_message
+        '''
+        self.completed[test_name] = status_message
+        self.__write_results()
+
+    def set_completion_time(self):
+        '''
+        Sets the completionTime for these results and writes the results
+        '''
+        self.completionTime = int(round(time.time() * 1000))
+        self.__write_results()
+
+    def upload(self):
+        '''
+        Attempts to upload the results.json to the configured results_upload_uri
+        '''
+        if self.config.results_upload_uri != None:
+            try:
+                requests.post(
+                    self.config.results_upload_uri,
+                    headers={'Content-Type': 'application/json'},
+                    data=json.dumps(self, indent=2))
+            except (Exception):
+                logging.error("Error uploading results.json")
+
+    def load(self):
+        '''
+        Load the results.json file
+        '''
+        try:
+            with open(self.file) as f:
+                self.__dict__.update(json.load(f))
+        except (ValueError, IOError):
+            pass
+
+    def get_raw_file(self, test_name, test_type):
+        '''
+        Returns the output file for this test_name and test_type
+        Example: fwroot/results/timestamp/test_type/test_name/raw.txt
+        '''
+        path = os.path.join(self.directory, test_name, test_type, "raw.txt")
+        try:
+            os.makedirs(os.path.dirname(path))
+        except OSError:
+            pass
+        return path
+
+    def get_stats_file(self, test_name, test_type):
+        '''
+        Returns the stats file name for this test_name and
+        Example: fwroot/results/timestamp/test_type/test_name/stats.txt
+        '''
+        path = os.path.join(self.directory, test_name, test_type, "stats.txt")
+        try:
+            os.makedirs(os.path.dirname(path))
+        except OSError:
+            pass
+        return path
+
+    def report_verify_results(self, framework_test, test_type, result):
+        '''
+        Used by FrameworkTest to add verification details to our results
+        
+        TODO: Technically this is an IPC violation - we are accessing
+        the parent process' memory from the child process
+        '''
+        if framework_test.name not in self.verify.keys():
+            self.verify[framework_test.name] = dict()
+        self.verify[framework_test.name][test_type] = result
+
+    def report_benchmark_results(self, framework_test, test_type, results):
+        '''
+        Used by FrameworkTest to add benchmark data to this
+        
+        TODO: Technically this is an IPC violation - we are accessing
+        the parent process' memory from the child process
+        '''
+        if test_type not in self.rawData.keys():
+            self.rawData[test_type] = dict()
+
+        # If results has a size from the parse, then it succeeded.
+        if results:
+            self.rawData[test_type][framework_test.name] = results
+
+            # This may already be set for single-tests
+            if framework_test.name not in self.succeeded[test_type]:
+                self.succeeded[test_type].append(framework_test.name)
+        else:
+            # This may already be set for single-tests
+            if framework_test.name not in self.failed[test_type]:
+                self.failed[test_type].append(framework_test.name)
+
+    def finish(self):
+        '''
+        Finishes these results.
+        '''
+        if not self.config.parse:
+            tests = gather_remaining_tests(self.config, self)
+            # Normally you don't have to use Fore.BLUE before each line, but
+            # Travis-CI seems to reset color codes on newline (see travis-ci/travis-ci#2692)
+            # or stream flush, so we have to ensure that the color code is printed repeatedly
+            prefix = Fore.CYAN
+            for line in header(
+                    "Verification Summary", top='=', bottom='').split('\n'):
+                print(prefix + line)
+            for test in tests:
+                print(prefix + "| Test: {!s}".format(test.name))
+                if test.name in self.verify.keys():
+                    for test_type, result in self.verify[
+                            test.name].iteritems():
+                        if result.upper() == "PASS":
+                            color = Fore.GREEN
+                        elif result.upper() == "WARN":
+                            color = Fore.YELLOW
+                        else:
+                            color = Fore.RED
+                        print(prefix + "|       " + test_type.ljust(13) +
+                              ' : ' + color + result.upper())
+                else:
+                    print(prefix + "|      " + Fore.RED +
+                          "NO RESULTS (Did framework launch?)")
+            print(prefix + header('', top='', bottom='=') + Style.RESET_ALL)
+
+        print("Time to complete: " +
+              str(int(time.time() - self.config.start_time)) + " seconds")
+        print("Results are saved in " + self.directory)
+
+    #############################################################################
+    # PRIVATE FUNCTIONS
+    #############################################################################
+
+    def __to_jsonable(self):
+        '''
+        Returns a dict suitable for jsonification
+        '''
+        toRet = dict()
+
+        toRet['uuid'] = self.uuid
+        toRet['name'] = self.name
+        toRet['environmentDescription'] = self.environmentDescription
+        toRet['git'] = self.git
+        toRet['startTime'] = self.startTime
+        toRet['completionTime'] = self.completionTime
+        toRet['concurrencyLevels'] = self.concurrencyLevels
+        toRet['pipelineConcurrencyLevels'] = self.pipelineConcurrencyLevels
+        toRet['queryIntervals'] = self.queryIntervals
+        toRet['cachedQueryIntervals'] = self.cachedQueryIntervals
+        toRet['frameworks'] = self.frameworks
+        toRet['duration'] = self.duration
+        toRet['rawData'] = self.rawData
+        toRet['completed'] = self.completed
+        toRet['succeeded'] = self.succeeded
+        toRet['failed'] = self.failed
+        toRet['verify'] = self.verify
+
+        return toRet
+
+    def __write_results(self):
+        try:
+            with open(self.file, 'w') as f:
+                f.write(json.dumps(self.__to_jsonable(), indent=2))
+        except (IOError):
+            logging.error("Error writing results.json")
+
+    def __count_sloc(self):
+        '''
+        Counts the significant lines of code for all tests and stores in results.
+        '''
+        frameworks = gather_frameworks(self.config.test, self.config.exclude,
+                                       self.config)
+
+        jsonResult = {}
+        for framework, testlist in frameworks.items():
+            if not os.path.exists(
+                    os.path.join(testlist[0].directory, "source_code")):
+                logging.warn(
+                    "Cannot count lines of code for %s - no 'source_code' file",
+                    framework)
+                continue
+
+            # Unfortunately the source_code files use lines like
+            # ./cpoll_cppsp/www/fortune_old instead of
+            # ./www/fortune_old
+            # so we have to back our working dir up one level
+            wd = os.path.dirname(testlist[0].directory)
+
+            try:
+                command = "cloc --list-file=%s/source_code --yaml" % testlist[
+                    0].directory
+
+                if os.path.exists(
+                        os.path.join(testlist[0].directory, "cloc_defs.txt")):
+                    command += " --read-lang-def %s" % os.path.join(
+                        testlist[0].directory, "cloc_defs.txt")
+                    logging.info("Using custom cloc definitions for %s",
+                                 framework)
+
+                # Find the last instance of the word 'code' in the yaml output. This should
+                # be the line count for the sum of all listed files or just the line count
+                # for the last file in the case where there's only one file listed.
+                command = command + "| grep code | tail -1 | cut -d: -f 2"
+                logging.debug("Running \"%s\" (cwd=%s)", command, wd)
+                lineCount = subprocess.check_output(
+                    command, cwd=wd, shell=True)
+                jsonResult[framework] = int(lineCount)
+            except subprocess.CalledProcessError:
+                continue
+            except ValueError as ve:
+                logging.warn(
+                    "Unable to get linecount for %s due to error '%s'",
+                    framework, ve)
+        self.rawData['slocCounts'] = jsonResult
+
+    def __count_commits(self):
+        '''
+        Count the git commits for all the framework tests
+        '''
+        frameworks = gather_frameworks(self.config.test, self.config.exclude,
+                                       self.config)
+
+        def count_commit(directory, jsonResult):
+            command = "git rev-list HEAD -- " + directory + " | sort -u | wc -l"
+            try:
+                commitCount = subprocess.check_output(command, shell=True)
+                jsonResult[framework] = int(commitCount)
+            except subprocess.CalledProcessError:
+                pass
+
+        # Because git can be slow when run in large batches, this
+        # calls git up to 4 times in parallel. Normal improvement is ~3-4x
+        # in my trials, or ~100 seconds down to ~25
+        # This is safe to parallelize as long as each thread only
+        # accesses one key in the dictionary
+        threads = []
+        jsonResult = {}
+        # t1 = datetime.now()
+        for framework, testlist in frameworks.items():
+            directory = testlist[0].directory
+            t = threading.Thread(
+                target=count_commit, args=(directory, jsonResult))
+            t.start()
+            threads.append(t)
+            # Git has internal locks, full parallel will just cause contention
+            # and slowness, so we rate-limit a bit
+            if len(threads) >= 4:
+                threads[0].join()
+                threads.remove(threads[0])
+
+        # Wait for remaining threads
+        for t in threads:
+            t.join()
+        # t2 = datetime.now()
+        # print "Took %s seconds " % (t2 - t1).seconds
+
+        self.rawData['commitCounts'] = jsonResult
+        self.config.commits = jsonResult
+
+    def __get_git_commit_id(self):
+        '''
+        Get the git commit id for this benchmark
+        '''
+        return subprocess.check_output(["git", "rev-parse", "HEAD"]).strip()
+
+    def __get_git_repository_url(self):
+        '''
+        Gets the git repository url for this benchmark
+        '''
+        return subprocess.check_output(
+            ["git", "config", "--get", "remote.origin.url"]).strip()
+
+    def __get_git_branch_name(self):
+        '''
+        Gets the git branch name for this benchmark
+        '''
+        return subprocess.check_output(
+            'git rev-parse --abbrev-ref HEAD', shell=True).strip()
+
+    def __parse_stats(self, framework_test, test_type, start_time, end_time,
+                      interval):
+        '''
+        For each test type, process all the statistics, and return a multi-layered 
+        dictionary that has a structure as follows:
+
+        (timestamp)
+        | (main header) - group that the stat is in
+        | | (sub header) - title of the stat
+        | | | (stat) - the stat itself, usually a floating point number
+        '''
+        stats_dict = dict()
+        stats_file = self.get_stats_file(framework_test.name, test_type)
+        with open(stats_file) as stats:
+            # dstat doesn't output a completely compliant CSV file - we need to strip the header
+            while (stats.next() != "\n"):
+                pass
+            stats_reader = csv.reader(stats)
+            main_header = stats_reader.next()
+            sub_header = stats_reader.next()
+            time_row = sub_header.index("epoch")
+            int_counter = 0
+            for row in stats_reader:
+                time = float(row[time_row])
+                int_counter += 1
+                if time < start_time:
+                    continue
+                elif time > end_time:
+                    return stats_dict
+                if int_counter % interval != 0:
+                    continue
+                row_dict = dict()
+                for nextheader in main_header:
+                    if nextheader != "":
+                        row_dict[nextheader] = dict()
+                header = ""
+                for item_num, column in enumerate(row):
+                    if (len(main_header[item_num]) != 0):
+                        header = main_header[item_num]
+                    # all the stats are numbers, so we want to make sure that they stay that way in json
+                    row_dict[header][sub_header[item_num]] = float(column)
+                stats_dict[time] = row_dict
+        return stats_dict
+
+    def __calculate_average_stats(self, raw_stats):
+        '''
+        We have a large amount of raw data for the statistics that may be useful 
+        for the stats nerds, but most people care about a couple of numbers. For 
+        now, we're only going to supply:
+          * Average CPU
+          * Average Memory
+          * Total network use
+          * Total disk use
+        More may be added in the future. If they are, please update the above list.
+        
+        Note: raw_stats is directly from the __parse_stats method.
+        
+        Recall that this consists of a dictionary of timestamps, each of which 
+        contain a dictionary of stat categories which contain a dictionary of stats
+        '''
+        raw_stat_collection = dict()
+
+        for time_dict in raw_stats.items()[1]:
+            for main_header, sub_headers in time_dict.items():
+                item_to_append = None
+                if 'cpu' in main_header:
+                    # We want to take the idl stat and subtract it from 100
+                    # to get the time that the CPU is NOT idle.
+                    item_to_append = sub_headers['idl'] - 100.0
+                elif main_header == 'memory usage':
+                    item_to_append = sub_headers['used']
+                elif 'net' in main_header:
+                    # Network stats have two parts - recieve and send. We'll use a tuple of
+                    # style (recieve, send)
+                    item_to_append = (sub_headers['recv'], sub_headers['send'])
+                elif 'dsk' or 'io' in main_header:
+                    # Similar for network, except our tuple looks like (read, write)
+                    item_to_append = (sub_headers['read'], sub_headers['writ'])
+                if item_to_append is not None:
+                    if main_header not in raw_stat_collection:
+                        raw_stat_collection[main_header] = list()
+                    raw_stat_collection[main_header].append(item_to_append)
+
+        # Simple function to determine human readable size
+        # http://stackoverflow.com/questions/1094841/reusable-library-to-get-human-readable-version-of-file-size
+        def sizeof_fmt(num):
+            # We'll assume that any number we get is convertable to a float, just in case
+            num = float(num)
+            for x in ['bytes', 'KB', 'MB', 'GB']:
+                if num < 1024.0 and num > -1024.0:
+                    return "%3.1f%s" % (num, x)
+                num /= 1024.0
+            return "%3.1f%s" % (num, 'TB')
+
+        # Now we have our raw stats in a readable format - we need to format it for display
+        # We need a floating point sum, so the built in sum doesn't cut it
+        display_stat_collection = dict()
+        for header, values in raw_stat_collection.items():
+            display_stat = None
+            if 'cpu' in header:
+                display_stat = sizeof_fmt(math.fsum(values) / len(values))
+            elif main_header == 'memory usage':
+                display_stat = sizeof_fmt(math.fsum(values) / len(values))
+            elif 'net' in main_header:
+                receive, send = zip(*values)  # unzip
+                display_stat = {
+                    'receive': sizeof_fmt(math.fsum(receive)),
+                    'send': sizeof_fmt(math.fsum(send))
+                }
+            else:  # if 'dsk' or 'io' in header:
+                read, write = zip(*values)  # unzip
+                display_stat = {
+                    'read': sizeof_fmt(math.fsum(read)),
+                    'write': sizeof_fmt(math.fsum(write))
+                }
+            display_stat_collection[header] = display_stat
+        return display_stat

+ 356 - 0
toolset/utils/scaffolding.py

@@ -0,0 +1,356 @@
+# -*- coding: utf-8 -*-
+import os
+from shutil import copytree
+from toolset.utils.setup_util import replace_text
+from toolset.utils.metadata_helper import gather_frameworks, gather_langauges
+
+
+class Scaffolding:
+    def __init__(self):
+        print("""
+-------------------------------------------------------------------------------
+    This wizard is intended to help build the scaffolding required for a new 
+    test to be benchmarked.
+
+    From here, you will be prompted for values related to the test you
+    wish to add.
+-------------------------------------------------------------------------------"""
+              )
+
+        try:
+            self.__gather_display_name()
+            self.__gather_language()
+            self.__gather_approach()
+            self.__gather_classification()
+            self.__gather_orm()
+            self.__gather_webserver()
+            self.__gather_versus()
+            self.__confirm_values()
+            self.__print_success()
+        except:
+            print("")
+
+    def __gather_display_name(self):
+        print("""
+  The name of your test as you wish it to be displayed on the results page.
+
+  Example: Gemini, Gin, Express
+    """)
+        self.__prompt_display_name()
+        while not self.display_name:
+            self.__prompt_display_name()
+        self.name = self.display_name.lower()
+
+    def __prompt_display_name(self):
+        self.display_name = input("Name: ").strip()
+
+        found = False
+        for framework in gather_frameworks():
+            if framework.lower() == self.display_name.lower():
+                found = True
+
+        if found:
+            print("""
+  It appears that there is already a '%s' framework in the test suite. You will
+  have to pick a different name.
+      """ % self.display_name)
+            self.display_name = None
+
+    def __gather_language(self):
+        print("""
+  The language in which your test implementation is written.
+
+  Example: Java, Go, PHP
+    """)
+        self.language = None
+        while not self.language:
+            self.__prompt_language()
+
+    def __prompt_language(self):
+        self.language = input("Language: ").strip()
+
+        known_languages = gather_langauges()
+        language = None
+        for lang in known_languages:
+            if lang.lower() == self.language.lower():
+                language = lang
+
+        if not language:
+            similar = []
+            for lang in known_languages:
+                if lang.lower()[:1] == self.language.lower()[:1]:
+                    similar.append(lang)
+            similar = ', '.join(similar)
+
+            print("""
+  That language is not currently in our list of known languages.
+  
+  Here is a list of similar languages present in our benchmark suite that you
+  may have meant:
+
+  %s
+      
+  Did you mean to add the new language, '%s', to the benchmark suite?
+      """ % (similar, self.language))
+            valid = self.__prompt_confirm_new_language(known_languages)
+            while not valid:
+                valid = self.__prompt_confirm_new_language(known_languages)
+
+            if self.confirm_new_lang == 'n':
+                self.language = None
+            else:
+                self.language = self.language.title()
+
+        return self.language
+
+    def __prompt_confirm_new_language(self, known_languages):
+        self.confirm_new_lang = input("Create New Language '%s' (y/n): " %
+                                      self.language).strip().lower()
+        return self.confirm_new_lang == 'y' or self.confirm_new_lang == 'n'
+
+    def __gather_approach(self):
+        print("""
+  The approach of your test implementation.
+
+  1) Realistic: Uses the framework with most out-of-the-box functionality 
+                enabled. We consider this realistic because most applications 
+                built with the framework will leave these features enabled.
+  2) Stripped:  Removes or outright avoids implementing features that are
+                unnecessary for the particulars of the benchmark exercise. This
+                might illuminate the marginal improvement available in fine-
+                tuning a framework to your application's use-case.
+
+  Note: If you are unsure, then your approach is probably Realistic. The
+        Stripped approach is seldom used and will not have results displayed
+        by default on the results website.
+    """)
+        valid = self.__prompt_approach()
+        while not valid:
+            valid = self.__prompt_approach()
+
+    def __prompt_approach(self):
+        self.approach = input("Approach [1/2]: ").strip()
+        if self.approach == '1':
+            self.approach = 'Realistic'
+        if self.approach == '2':
+            self.approach = 'Stripped'
+        return self.approach == 'Realistic' or self.approach == 'Stripped'
+
+    def __gather_classification(self):
+        print("""
+  The classification of your test implementation.
+
+  1) Fullstack: Robust framework expected to provide high-level functionality 
+                for serving as a web application; for example, ability to 
+                compose views, provide functions for responding with several 
+                data types (json, html, etc), connecting to a database, form 
+                processing, etc.
+  2) Micro:     Simple framework expected to provide enough middleware to build
+                a robust web application such as request routing and some 
+                simple plumbing, but may not include built-in functionality 
+                such as, for example, server-composed views.
+  3) Platform:  Barebones infrastructure for servicing HTTP requests, but does
+                not include a framework at all.
+    """)
+        valid = self.__prompt_classification()
+        while not valid:
+            valid = self.__prompt_classification()
+        if self.classification == 'Platform':
+            self.platform = 'None'
+            self.framework = 'None'
+        else:
+            self.framework = self.display_name
+            self.__gather_platform()
+
+    def __prompt_classification(self):
+        self.classification = input("Classification [1/2/3]: ").strip()
+        if self.classification == '1':
+            self.classification = 'Fullstack'
+        if self.classification == '2':
+            self.classification = 'Micro'
+        if self.classification == '3':
+            self.classification = 'Platform'
+        return self.classification == 'Fullstack' or \
+               self.classification == 'Micro' or \
+               self.classification == 'Platform'
+
+    def __gather_platform(self):
+        print("""
+  The platform of your test implementation.
+
+  The platform is the low-level software or API used to host web applications 
+  for the framework; the platform provides an implementation of the HTTP
+  fundamentals.
+
+  Not all frameworks have a platform and if your programming language provides
+  much of that by which we define a platform, leave black.
+
+  Example: Servlet, Wai, .NET
+    """)
+        self.__prompt_platform()
+
+    def __prompt_platform(self):
+        self.platform = input("Platform (optional): ").strip()
+        if self.platform == '':
+            self.platform = 'None'
+
+    def __gather_orm(self):
+        print("""
+  How you would classify the ORM (object relational mapper) of your test?
+
+  1) Full:  A feature-rich ORM which provides functionality for interacting 
+            with a database without writing a query in all but the most edge 
+            cases.
+  2) Micro: An ORM which provides functionality for interacting with a database
+            for many trivial operations (querying, updating), but not more 
+            robust cases (for example, gathering relations).
+  3) Raw:   No ORM; raw database access.
+    """)
+        valid = self.__prompt_orm()
+        while not valid:
+            valid = self.__prompt_orm()
+
+    def __prompt_orm(self):
+        self.orm = input("ORM [1/2/3]: ").strip()
+        if self.orm == '1':
+            self.orm = 'Full'
+        if self.orm == '2':
+            self.orm = 'Micro'
+        if self.orm == '3':
+            self.orm = 'Raw'
+        return self.orm == 'Full' or \
+               self.orm == 'Micro' or \
+               self.orm == 'Raw'
+
+    def __gather_webserver(self):
+        print("""
+  Name of the front-end webserver sitting in front of your test implementation.
+
+  Your test implementation may not use a web-server and may act as its own; you
+  can leave this blank in this case.
+
+  Example: nginx, Meinheld, httplight
+    """)
+        self.__prompt_webserver()
+
+    def __prompt_webserver(self):
+        self.webserver = input("Webserver (optional): ").strip()
+        if self.webserver == '':
+            self.webserver = 'None'
+
+    def __gather_versus(self):
+        print("""
+  The name of another test (elsewhere in this project) that is a subset of this
+  framework.
+  This allows for the generation of the framework efficiency chart in the 
+  results web site.
+  For example, Compojure is compared to "servlet" since Compojure is built on 
+  the Servlet platform.
+
+  Example: Servlet, Wai, Undertow
+    """)
+        self.__prompt_versus()
+
+    def __prompt_versus(self):
+        self.versus = input("Versus (optional): ").strip()
+        if self.versus == '':
+            self.versus = 'None'
+
+    def __confirm_values(self):
+        print("""
+    Name: %s
+    Language: %s
+    Approach: %s
+    Classification: %s
+    Platform: %s
+    ORM: %s
+    Webserver: %s
+    Versus: %s
+
+  Finalize the initialization of your test given the above values?
+
+  Note: once you have initialized your test, you can change these values later.
+    """ % (self.display_name, self.language, self.approach,
+           self.classification, self.platform, self.orm, self.webserver,
+           self.versus))
+
+        valid = self.__prompt_confirmation()
+        while not valid:
+            valid = self.__prompt_confirmation()
+
+        if self.confirmation == 'y':
+            self.__build_scaffolding()
+        else:
+            print('Aborting')
+
+    def __prompt_confirmation(self):
+        self.confirmation = input("Initialize [y/n]: ").strip().lower()
+        return self.confirmation == 'y' or self.confirmation == 'n'
+
+    def __build_scaffolding(self):
+        if self.__create_test_folder():
+            self.__copy_scaffold_files()
+            self.__edit_scaffold_files()
+
+    def __create_test_folder(self):
+        self.language_dir = os.path.join("frameworks", self.language)
+        self.test_dir = os.path.join(self.language_dir, self.name)
+
+        if os.path.exists(self.test_dir):
+            print("Test '%s' already exists; aborting." % self.name)
+            return False
+
+        return True
+
+    def __copy_scaffold_files(self):
+        self.scaffold_dir = os.path.join("toolset", "setup", "scaffolding")
+        copytree(self.scaffold_dir, self.test_dir)
+
+    def __edit_scaffold_files(self):
+        for file in os.listdir(os.path.join(self.test_dir)):
+            replace_text(
+                os.path.join(self.test_dir, file), "\$NAME", self.name)
+            replace_text(
+                os.path.join(self.test_dir, file), "\$DISPLAY_NAME",
+                self.display_name)
+            replace_text(
+                os.path.join(self.test_dir, file), "\$APPROACH", self.approach)
+            replace_text(
+                os.path.join(self.test_dir, file), "\$CLASSIFICATION",
+                self.classification)
+            replace_text(
+                os.path.join(self.test_dir, file), "\$FRAMEWORK",
+                self.framework)
+            replace_text(
+                os.path.join(self.test_dir, file), "\$LANGUAGE", self.language)
+            replace_text(os.path.join(self.test_dir, file), "\$ORM", self.orm)
+            replace_text(
+                os.path.join(self.test_dir, file), "\$PLATFORM", self.platform)
+            replace_text(
+                os.path.join(self.test_dir, file), "\$WEBSERVER",
+                self.webserver)
+            replace_text(
+                os.path.join(self.test_dir, file), "\$VERSUS", self.versus)
+
+    def __print_success(self):
+        print("""
+-------------------------------------------------------------------------------
+  Success!
+
+  Your new test structure has been built to the sepcifications of the suite.
+  Here is a brief run-down of what has been built:
+
+    frameworks
+        └─── %s
+              └─── %s
+                    ├─── .gitignore
+                    ├─── benchmark_config.json
+                    ├─── README.md
+                    ├─── setup.sh
+                    ├─── setup_mysql.sh
+                    └─── source_code
+
+  The next step is to read through your README.md and follow the instructions
+  provided therein.
+-------------------------------------------------------------------------------"""
+              % (self.language, self.name))

+ 47 - 40
toolset/setup/linux/setup_util.py → toolset/utils/setup_util.py

@@ -2,21 +2,22 @@ import re
 import os
 import os
 import sys
 import sys
 import subprocess
 import subprocess
-import platform
 
 
 from threading import Thread
 from threading import Thread
 from Queue import Queue, Empty
 from Queue import Queue, Empty
 
 
+
 class NonBlockingStreamReader:
 class NonBlockingStreamReader:
-  '''
+    '''
   Enables calling readline in a non-blocking manner with a blocking stream, 
   Enables calling readline in a non-blocking manner with a blocking stream, 
   such as the ones returned from subprocess.Popen
   such as the ones returned from subprocess.Popen
 
 
   Originally written by Eyal Arubas, who granted permission to use this inside TFB
   Originally written by Eyal Arubas, who granted permission to use this inside TFB
   See http://eyalarubas.com/python-subproc-nonblock.html
   See http://eyalarubas.com/python-subproc-nonblock.html
   '''
   '''
-  def __init__(self, stream, eof_message = None):
-    '''
+
+    def __init__(self, stream, eof_message=None):
+        '''
     stream: the stream to read from.
     stream: the stream to read from.
             Usually a process' stdout or stderr.
             Usually a process' stdout or stderr.
     eof_message: A message to print to stdout as soon
     eof_message: A message to print to stdout as soon
@@ -24,38 +25,39 @@ class NonBlockingStreamReader:
       want to track the exact moment a stream terminates
       want to track the exact moment a stream terminates
     '''
     '''
 
 
-    self._s = stream
-    self._q = Queue()
-    self._eof_message = eof_message
-    self._poisonpill = 'MAGIC_POISONPILL_STRING'
-
-    def _populateQueue(stream, queue):
-      while True:
-        line = stream.readline()
-        if line: # 'data\n' or '\n'
-          queue.put(line)
-        else:    # '' e.g. EOF
-          if self._eof_message:
-            sys.stdout.write(self._eof_message + '\n')
-          queue.put(self._poisonpill)
-          return
-
-    self._t = Thread(target = _populateQueue,
-            args = (self._s, self._q))
-    self._t.daemon = True
-    self._t.start()
-
-  def readline(self, timeout = None):
-    try:
-      line = self._q.get(block = timeout is not None,
-        timeout = timeout)
-      if line == self._poisonpill: 
-        raise EndOfStream
-      return line
-    except Empty:
-      return None
-
-class EndOfStream(Exception): pass
+        self._s = stream
+        self._q = Queue()
+        self._eof_message = eof_message
+        self._poisonpill = 'MAGIC_POISONPILL_STRING'
+
+        def _populateQueue(stream, queue):
+            while True:
+                line = stream.readline()
+                if line:  # 'data\n' or '\n'
+                    queue.put(line)
+                else:  # '' e.g. EOF
+                    if self._eof_message:
+                        sys.stdout.write(self._eof_message + '\n')
+                    queue.put(self._poisonpill)
+                    return
+
+        self._t = Thread(target=_populateQueue, args=(self._s, self._q))
+        self._t.daemon = True
+        self._t.start()
+
+    def readline(self, timeout=None):
+        try:
+            line = self._q.get(block=timeout is not None, timeout=timeout)
+            if line == self._poisonpill:
+                raise EndOfStream
+            return line
+        except Empty:
+            return None
+
+
+class EndOfStream(Exception):
+    pass
+
 
 
 # Replaces all text found using the regular expression to_replace with the supplied replacement.
 # Replaces all text found using the regular expression to_replace with the supplied replacement.
 def replace_text(file, to_replace, replacement):
 def replace_text(file, to_replace, replacement):
@@ -65,6 +67,7 @@ def replace_text(file, to_replace, replacement):
     with open(file, "w") as f:
     with open(file, "w") as f:
         f.write(replaced_text)
         f.write(replaced_text)
 
 
+
 # Queries the shell for the value of FWROOT
 # Queries the shell for the value of FWROOT
 def get_fwroot():
 def get_fwroot():
 
 
@@ -73,12 +76,16 @@ def get_fwroot():
     else:
     else:
         return os.getcwd()
         return os.getcwd()
 
 
+
 # Turns absolute path into path relative to FWROOT
 # Turns absolute path into path relative to FWROOT
 # Assumes path is underneath FWROOT, not above
 # Assumes path is underneath FWROOT, not above
-# 
-# Useful for clean presentation of paths 
+#
+# Useful for clean presentation of paths
 # e.g. /foo/bar/benchmarks/go/install.sh
 # e.g. /foo/bar/benchmarks/go/install.sh
-# v.s. FWROOT/go/install.sh 
+# v.s. FWROOT/go/install.sh
 def path_relative_to_root(path):
 def path_relative_to_root(path):
     # Requires bash shell parameter expansion
     # Requires bash shell parameter expansion
-    return subprocess.check_output("D=%s && printf \"${D#%s}\""%(path, get_fwroot()), shell=True, executable='/bin/bash')
+    return subprocess.check_output(
+        "D=%s && printf \"${D#%s}\"" % (path, get_fwroot()),
+        shell=True,
+        executable='/bin/bash')

+ 16 - 0
toolset/utils/unbuffered.py

@@ -0,0 +1,16 @@
+# Wrapper for unbuffered stream writing.
+# http://stackoverflow.com/a/107717/376366
+# Used to make sure print output appears in the correct order
+# in log files when spawning subprocesses.
+
+
+class Unbuffered:
+    def __init__(self, stream):
+        self.stream = stream
+
+    def write(self, data):
+        self.stream.write(data)
+        self.stream.flush()
+
+    def __getattr__(self, attr):
+        return getattr(self.stream, attr)

Some files were not shown because too many files changed in this diff