Browse Source

TimeLogger class and other toolset changes (#3630)

Nate 7 years ago
parent
commit
772ca4cda4

+ 0 - 9
frameworks/JavaScript/express/benchmark_config.json

@@ -7,17 +7,12 @@
       "port": 8080,
       "port": 8080,
       "approach": "Realistic",
       "approach": "Realistic",
       "classification": "Micro",
       "classification": "Micro",
-      "database": "None",
       "framework": "express",
       "framework": "express",
       "language": "JavaScript",
       "language": "JavaScript",
       "flavor": "NodeJS",
       "flavor": "NodeJS",
-      "orm": "Full",
-      "platform": "None",
-      "webserver": "None",
       "os": "Linux",
       "os": "Linux",
       "database_os": "Linux",
       "database_os": "Linux",
       "display_name": "express",
       "display_name": "express",
-      "notes": "",
       "versus": "nodejs"
       "versus": "nodejs"
     },
     },
     "mongodb": {
     "mongodb": {
@@ -33,8 +28,6 @@
       "language": "JavaScript",
       "language": "JavaScript",
       "flavor": "NodeJS",
       "flavor": "NodeJS",
       "orm": "Full",
       "orm": "Full",
-      "platform": "None",
-      "webserver": "None",
       "os": "Linux",
       "os": "Linux",
       "database_os": "Linux",
       "database_os": "Linux",
       "display_name": "express",
       "display_name": "express",
@@ -54,8 +47,6 @@
       "language": "JavaScript",
       "language": "JavaScript",
       "flavor": "NodeJS",
       "flavor": "NodeJS",
       "orm": "Full",
       "orm": "Full",
-      "platform": "None",
-      "webserver": "None",
       "os": "Linux",
       "os": "Linux",
       "database_os": "Linux",
       "database_os": "Linux",
       "display_name": "express",
       "display_name": "express",

+ 83 - 123
toolset/benchmark/benchmarker.py

@@ -1,13 +1,15 @@
 from toolset.utils.output_helper import log, FNULL
 from toolset.utils.output_helper import log, FNULL
-from toolset.utils.metadata_helper import gather_tests, gather_remaining_tests
-from toolset.utils import docker_helper
+from toolset.utils.docker_helper import DockerHelper
+from toolset.utils.time_logger import TimeLogger
+from toolset.utils.metadata import Metadata
+from toolset.utils.results import Results
+from toolset.utils.audit import Audit
 
 
 import os
 import os
 import subprocess
 import subprocess
 import traceback
 import traceback
-import socket
+import sys
 import time
 import time
-import json
 import shlex
 import shlex
 from pprint import pprint
 from pprint import pprint
 
 
@@ -15,12 +17,22 @@ from colorama import Fore
 
 
 
 
 class Benchmarker:
 class Benchmarker:
-    def __init__(self, config, results):
+    def __init__(self, config):
         '''
         '''
         Initialize the benchmarker.
         Initialize the benchmarker.
         '''
         '''
         self.config = config
         self.config = config
-        self.results = results
+        self.timeLogger = TimeLogger()
+        self.metadata = Metadata(self)
+        self.audit = Audit(self)
+
+        # a list of all tests for this run
+        self.tests = self.metadata.tests_to_run()
+
+        self.results = Results(self)
+        self.docker_helper = DockerHelper(self)
+
+
 
 
     ##########################################################################################
     ##########################################################################################
     # Public methods
     # Public methods
@@ -34,19 +46,16 @@ class Benchmarker:
         running benchmarks against them.
         running benchmarks against them.
         '''
         '''
         # Generate metadata
         # Generate metadata
-        self.__run_list_test_metadata()
-
-        # Get a list of all known  tests that we can run.
-        all_tests = gather_remaining_tests(self.config, self.results)
+        self.metadata.list_test_metadata()
 
 
         any_failed = False
         any_failed = False
         # Run tests
         # Run tests
         log("Running Tests...", border='=')
         log("Running Tests...", border='=')
-        docker_helper.build_wrk(self.config)
+        self.docker_helper.build_wrk()
 
 
         with open(os.path.join(self.results.directory, 'benchmark.log'),
         with open(os.path.join(self.results.directory, 'benchmark.log'),
                   'w') as benchmark_log:
                   'w') as benchmark_log:
-            for test in all_tests:
+            for test in self.tests:
                 log("Running Test: %s" % test.name, border='-')
                 log("Running Test: %s" % test.name, border='-')
                 with self.config.quiet_out.enable():
                 with self.config.quiet_out.enable():
                     if not self.__run_test(test, benchmark_log):
                     if not self.__run_test(test, benchmark_log):
@@ -57,7 +66,7 @@ class Benchmarker:
         # Parse results
         # Parse results
         if self.config.mode == "benchmark":
         if self.config.mode == "benchmark":
             log("Parsing Results ...", border='=')
             log("Parsing Results ...", border='=')
-            self.results.parse(all_tests)
+            self.results.parse(self.tests)
 
 
         self.results.set_completion_time()
         self.results.set_completion_time()
         self.results.upload()
         self.results.upload()
@@ -65,36 +74,23 @@ class Benchmarker:
 
 
         return any_failed
         return any_failed
 
 
+    def stop(self, signal=None, frame=None):
+        log("Shutting down (may take a moment)")
+        self.docker_helper.stop(kill=True)
+        sys.exit(0)
+
     ##########################################################################################
     ##########################################################################################
     # Private methods
     # Private methods
     ##########################################################################################
     ##########################################################################################
 
 
-    def __run_list_test_metadata(self):
-        '''
-        Prints the metadata for all the available tests
-        '''
-        all_tests = gather_tests(benchmarker_config=self.config)
-        all_tests_json = json.dumps(map(lambda test: {
-          "name": test.name,
-          "approach": test.approach,
-          "classification": test.classification,
-          "database": test.database,
-          "framework": test.framework,
-          "language": test.language,
-          "orm": test.orm,
-          "platform": test.platform,
-          "webserver": test.webserver,
-          "os": test.os,
-          "database_os": test.database_os,
-          "display_name": test.display_name,
-          "notes": test.notes,
-          "versus": test.versus
-        }, all_tests))
-
-        with open(
-                os.path.join(self.results.directory, "test_metadata.json"),
-                "w") as f:
-            f.write(all_tests_json)
+    def __exit_test(self, success, prefix, file, message=None):
+        if message:
+            log(message,
+                prefix=prefix,
+                file=file,
+                color=Fore.RED if success else '')
+        self.timeLogger.log_test_end(log_prefix=prefix, file=file)
+        return success
 
 
     def __run_test(self, test, benchmark_log):
     def __run_test(self, test, benchmark_log):
         '''
         '''
@@ -102,78 +98,65 @@ class Benchmarker:
         optionally benchmarks the webapp, and ultimately stops all services
         optionally benchmarks the webapp, and ultimately stops all services
         started for this test.
         started for this test.
         '''
         '''
+
         log_prefix = "%s: " % test.name
         log_prefix = "%s: " % test.name
+        self.timeLogger.log_test_start()
 
 
         # If the test is in the excludes list, we skip it
         # If the test is in the excludes list, we skip it
-        if self.config.exclude != None and test.name in self.config.exclude:
+        if self.config.exclude and test.name in self.config.exclude:
             message = "Test {name} has been added to the excludes list. Skipping.".format(name=test.name)
             message = "Test {name} has been added to the excludes list. Skipping.".format(name=test.name)
             self.results.write_intermediate(test.name, message)
             self.results.write_intermediate(test.name, message)
-            log(message,
+            return self.__exit_test(
+                success=False,
+                message=message,
                 prefix=log_prefix,
                 prefix=log_prefix,
                 file=benchmark_log)
                 file=benchmark_log)
-            return False
 
 
         database_container = None
         database_container = None
         try:
         try:
-            if self.__is_port_bound(test.port):
-                time.sleep(60)
-
-            if self.__is_port_bound(test.port):
-                # We gave it our all
-                message = "Error: Port %s is not available, cannot start %s" % (test.port, test.name)
-                self.results.write_intermediate(test.name, message)
-                log(message,
-                    prefix=log_prefix,
-                    file=benchmark_log,
-                    color=Fore.RED)
-                return False
-
             # Start database container
             # Start database container
             if test.database.lower() != "none":
             if test.database.lower() != "none":
-                database_container = docker_helper.start_database(
-                    self.config, test.database.lower())
+                database_container = self.docker_helper.start_database(
+                    test.database.lower())
                 if database_container is None:
                 if database_container is None:
                     message = "ERROR: Problem building/running database container"
                     message = "ERROR: Problem building/running database container"
-                    self.results.write_intermediate(test.name, message)
-                    log(message,
+                    return self.__exit_test(
+                        success=False,
+                        message=message,
                         prefix=log_prefix,
                         prefix=log_prefix,
-                        file=benchmark_log,
-                        color=Fore.RED)
-                    return False
+                        file=benchmark_log)
 
 
             # Start webapp
             # Start webapp
             container = test.start()
             container = test.start()
             if container is None:
             if container is None:
-                docker_helper.stop(self.config, container, database_container,
-                                   test)
+                self.docker_helper.stop([container, database_container])
                 message = "ERROR: Problem starting {name}".format(name=test.name)
                 message = "ERROR: Problem starting {name}".format(name=test.name)
                 self.results.write_intermediate(test.name, message)
                 self.results.write_intermediate(test.name, message)
-                log(message,
+                return self.__exit_test(
+                    success=False,
+                    message=message,
                     prefix=log_prefix,
                     prefix=log_prefix,
-                    file=benchmark_log,
-                    color=Fore.RED)
-                return False
+                    file=benchmark_log)
 
 
             slept = 0
             slept = 0
             max_sleep = 60
             max_sleep = 60
             accepting_requests = False
             accepting_requests = False
             while not accepting_requests and slept < max_sleep:
             while not accepting_requests and slept < max_sleep:
-                if not docker_helper.server_container_exists(self.config, container.id):
+                if not self.docker_helper.server_container_exists(container.id):
                     break
                     break
                 accepting_requests = test.is_accepting_requests()
                 accepting_requests = test.is_accepting_requests()
                 time.sleep(1)
                 time.sleep(1)
                 slept += 1
                 slept += 1
 
 
             if not accepting_requests:
             if not accepting_requests:
-                docker_helper.stop(self.config, container, database_container,
-                                   test)
+                self.docker_helper.stop([container, database_container])
                 message = "ERROR: Framework is not accepting requests from client machine"
                 message = "ERROR: Framework is not accepting requests from client machine"
                 self.results.write_intermediate(test.name, message)
                 self.results.write_intermediate(test.name, message)
-                log(message,
+                return self.__exit_test(
+                    success=False,
+                    message=message,
                     prefix=log_prefix,
                     prefix=log_prefix,
-                    file=benchmark_log,
-                    color=Fore.RED)
-                return False
+                    file=benchmark_log)
 
 
             # Debug mode blocks execution here until ctrl+c
             # Debug mode blocks execution here until ctrl+c
             if self.config.mode == "debug":
             if self.config.mode == "debug":
@@ -184,20 +167,28 @@ class Benchmarker:
                 while True:
                 while True:
                     time.sleep(1)
                     time.sleep(1)
 
 
-            # Verify URLs
+            # Verify URLs and audit
             log("Verifying framework URLs", prefix=log_prefix)
             log("Verifying framework URLs", prefix=log_prefix)
+            self.timeLogger.log_verify_start()
             passed_verify = test.verify_urls()
             passed_verify = test.verify_urls()
+            self.audit.audit_test_dir(test.directory)
+            self.timeLogger.log_verify_end(
+                log_prefix=log_prefix,
+                file=benchmark_log)
 
 
             # Benchmark this test
             # Benchmark this test
             if self.config.mode == "benchmark":
             if self.config.mode == "benchmark":
                 log("Benchmarking %s" % test.name,
                 log("Benchmarking %s" % test.name,
                     file=benchmark_log,
                     file=benchmark_log,
                     border='-')
                     border='-')
+                self.timeLogger.log_benchmarking_start()
                 self.__benchmark(test, benchmark_log)
                 self.__benchmark(test, benchmark_log)
+                self.timeLogger.log_benchmarking_end(
+                    log_prefix=log_prefix,
+                    file=benchmark_log)
 
 
             # Stop this test
             # Stop this test
-            docker_helper.stop(self.config, container, database_container,
-                               test)
+            self.docker_helper.stop([container, database_container])
 
 
             # Save results thus far into the latest results directory
             # Save results thus far into the latest results directory
             self.results.write_intermediate(test.name,
             self.results.write_intermediate(test.name,
@@ -209,23 +200,26 @@ class Benchmarker:
             self.results.upload()
             self.results.upload()
 
 
             if self.config.mode == "verify" and not passed_verify:
             if self.config.mode == "verify" and not passed_verify:
-                log("Failed verify!",
+                return self.__exit_test(
+                    success=False,
+                    message="Failed verify!",
                     prefix=log_prefix,
                     prefix=log_prefix,
-                    file=benchmark_log,
-                    color=Fore.RED)
-                return False
+                    file=benchmark_log)
         except Exception as e:
         except Exception as e:
             tb = traceback.format_exc()
             tb = traceback.format_exc()
             self.results.write_intermediate(test.name,
             self.results.write_intermediate(test.name,
                                             "error during test: " + str(e))
                                             "error during test: " + str(e))
-            log("Error during test: %s" % test.name,
-                file=benchmark_log,
-                border='-',
-                color=Fore.RED)
             log(tb, prefix=log_prefix, file=benchmark_log)
             log(tb, prefix=log_prefix, file=benchmark_log)
-            return False
+            return self.__exit_test(
+                success=False,
+                message="Error during test: %s" % test.name,
+                prefix=log_prefix,
+                file=benchmark_log)
 
 
-        return True
+        return self.__exit_test(
+            success=True,
+            prefix=log_prefix,
+            file=benchmark_log)
 
 
     def __benchmark(self, framework_test, benchmark_log):
     def __benchmark(self, framework_test, benchmark_log):
         '''
         '''
@@ -236,7 +230,6 @@ class Benchmarker:
             log("BENCHMARKING %s ... " % test_type.upper(), file=benchmark_log)
             log("BENCHMARKING %s ... " % test_type.upper(), file=benchmark_log)
 
 
             test = framework_test.runTests[test_type]
             test = framework_test.runTests[test_type]
-            test.setup_out(benchmark_log)
             raw_file = self.results.get_raw_file(framework_test.name,
             raw_file = self.results.get_raw_file(framework_test.name,
                                                  test_type)
                                                  test_type)
             if not os.path.exists(raw_file):
             if not os.path.exists(raw_file):
@@ -255,8 +248,7 @@ class Benchmarker:
                                                        framework_test.port,
                                                        framework_test.port,
                                                        test.get_url()))
                                                        test.get_url()))
 
 
-                docker_helper.benchmark(self.config, script, script_variables,
-                                        raw_file)
+                self.docker_helper.benchmark(script, script_variables, raw_file)
 
 
                 # End resource usage metrics collection
                 # End resource usage metrics collection
                 self.__end_logging()
                 self.__end_logging()
@@ -296,35 +288,3 @@ class Benchmarker:
         '''
         '''
         self.subprocess_handle.terminate()
         self.subprocess_handle.terminate()
         self.subprocess_handle.communicate()
         self.subprocess_handle.communicate()
-
-    def __is_port_bound(self, port):
-        '''
-        Check if the requested port is available. If it isn't available, then a
-        previous test probably didn't shutdown properly.
-        '''
-        port = int(port)
-        s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
-        try:
-            # Try to bind to all IP addresses, this port
-            s.bind(("", port))
-            # If we get here, we were able to bind successfully,
-            # which means the port is free.
-        except socket.error:
-            # If we get an exception, it might be because the port is still bound
-            # which would be bad, or maybe it is a privileged port (<1024) and we
-            # are not running as root, or maybe the server is gone, but sockets are
-            # still in TIME_WAIT (SO_REUSEADDR). To determine which scenario, try to
-            # connect.
-            try:
-                s.connect(("127.0.0.1", port))
-                # If we get here, we were able to connect to something, which means
-                # that the port is still bound.
-                return True
-            except socket.error:
-                # An exception means that we couldn't connect, so a server probably
-                # isn't still running on the port.
-                pass
-        finally:
-            s.close()
-
-        return False

+ 18 - 22
toolset/benchmark/framework_test.py

@@ -3,22 +3,20 @@ import traceback
 from requests import ConnectionError
 from requests import ConnectionError
 
 
 from toolset.utils.output_helper import log
 from toolset.utils.output_helper import log
-from toolset.utils import docker_helper
 
 
 # Cross-platform colored text
 # Cross-platform colored text
 from colorama import Fore, Style
 from colorama import Fore, Style
 
 
 
 
 class FrameworkTest:
 class FrameworkTest:
-    def __init__(self, name, directory, benchmarker_config, results, runTests,
+    def __init__(self, name, directory, benchmarker, runTests,
                  args):
                  args):
         '''
         '''
         Constructor
         Constructor
         '''
         '''
         self.name = name
         self.name = name
         self.directory = directory
         self.directory = directory
-        self.benchmarker_config = benchmarker_config
-        self.results = results
+        self.benchmarker = benchmarker
         self.runTests = runTests
         self.runTests = runTests
         self.approach = ""
         self.approach = ""
         self.classification = ""
         self.classification = ""
@@ -45,7 +43,7 @@ class FrameworkTest:
         '''
         '''
         Start the test implementation
         Start the test implementation
         '''
         '''
-        test_log_dir = os.path.join(self.results.directory, self.name.lower())
+        test_log_dir = os.path.join(self.benchmarker.results.directory, self.name.lower())
         build_log_dir = os.path.join(test_log_dir, 'build')
         build_log_dir = os.path.join(test_log_dir, 'build')
         run_log_dir = os.path.join(test_log_dir, 'run')
         run_log_dir = os.path.join(test_log_dir, 'run')
 
 
@@ -58,16 +56,15 @@ class FrameworkTest:
         except OSError:
         except OSError:
             pass
             pass
 
 
-        result = docker_helper.build(self.benchmarker_config, [self.name],
-                                     build_log_dir)
+        result = self.benchmarker.docker_helper.build(self, build_log_dir)
         if result != 0:
         if result != 0:
             return None
             return None
 
 
-        return docker_helper.run(self.benchmarker_config, self, run_log_dir)
+        return self.benchmarker.docker_helper.run(self, run_log_dir)
 
 
     def is_accepting_requests(self):
     def is_accepting_requests(self):
         '''
         '''
-        Determines whether this test implementation is up and accepting 
+        Determines whether this test implementation is up and accepting
         requests.
         requests.
         '''
         '''
         test_type = None
         test_type = None
@@ -75,21 +72,20 @@ class FrameworkTest:
             test_type = any_type
             test_type = any_type
             break
             break
 
 
-        url = "http://%s:%s%s" % (self.benchmarker_config.server_host,
+        url = "http://%s:%s%s" % (self.benchmarker.config.server_host,
                                   self.port,
                                   self.port,
                                   self.runTests[test_type].get_url())
                                   self.runTests[test_type].get_url())
 
 
-        return docker_helper.test_client_connection(self.benchmarker_config,
-                                                    url)
+        return self.benchmarker.docker_helper.test_client_connection(url)
 
 
     def verify_urls(self):
     def verify_urls(self):
         '''
         '''
-        Verifys each of the URLs for this test. This will simply curl the URL and 
-        check for it's return status. For each url, a flag will be set on this 
+        Verifys each of the URLs for this test. This will simply curl the URL and
+        check for it's return status. For each url, a flag will be set on this
         object for whether or not it passed.
         object for whether or not it passed.
         Returns True if all verifications succeeded
         Returns True if all verifications succeeded
         '''
         '''
-        log_path = os.path.join(self.results.directory, self.name.lower())
+        log_path = os.path.join(self.benchmarker.results.directory, self.name.lower())
         result = True
         result = True
 
 
         def verify_type(test_type):
         def verify_type(test_type):
@@ -107,7 +103,7 @@ class FrameworkTest:
                     color=Fore.WHITE + Style.BRIGHT)
                     color=Fore.WHITE + Style.BRIGHT)
 
 
                 base_url = "http://%s:%s" % (
                 base_url = "http://%s:%s" % (
-                    self.benchmarker_config.server_host, self.port)
+                    self.benchmarker.config.server_host, self.port)
 
 
                 try:
                 try:
                     # Verifies headers from the server. This check is made from the
                     # Verifies headers from the server. This check is made from the
@@ -120,8 +116,8 @@ class FrameworkTest:
                     # we're already failing
                     # we're already failing
                     if not any(result == 'fail'
                     if not any(result == 'fail'
                                for (result, reason, url) in results):
                                for (result, reason, url) in results):
-                        docker_helper.test_client_connection(
-                            self.benchmarker_config, base_url + test.get_url())
+                        self.benchmarker.docker_helper.test_client_connection(
+                            base_url + test.get_url())
                 except ConnectionError as e:
                 except ConnectionError as e:
                     results = [('fail', "Server did not respond to request",
                     results = [('fail', "Server did not respond to request",
                                 base_url)]
                                 base_url)]
@@ -167,11 +163,11 @@ class FrameworkTest:
                 [output_result(r1, r2, url) for (r1, r2, url) in results]
                 [output_result(r1, r2, url) for (r1, r2, url) in results]
 
 
                 if test.failed:
                 if test.failed:
-                    self.results.report_verify_results(self, test_type, 'fail')
+                    self.benchmarker.results.report_verify_results(self, test_type, 'fail')
                 elif test.warned:
                 elif test.warned:
-                    self.results.report_verify_results(self, test_type, 'warn')
+                    self.benchmarker.results.report_verify_results(self, test_type, 'warn')
                 elif test.passed:
                 elif test.passed:
-                    self.results.report_verify_results(self, test_type, 'pass')
+                    self.benchmarker.results.report_verify_results(self, test_type, 'pass')
                 else:
                 else:
                     raise Exception(
                     raise Exception(
                         "Unknown error - test did not pass,warn,or fail")
                         "Unknown error - test did not pass,warn,or fail")
@@ -182,4 +178,4 @@ class FrameworkTest:
             if self.runTests[test_type].failed:
             if self.runTests[test_type].failed:
                 result = False
                 result = False
 
 
-        return result
+        return result

+ 3 - 3
toolset/benchmark/test_types/fortune_type.py

@@ -19,8 +19,8 @@ class FortuneTestType(FrameworkTestType):
 
 
     def verify(self, base_url):
     def verify(self, base_url):
         '''
         '''
-        Parses the given HTML string and asks the 
-        FortuneHTMLParser whether the parsed string is a 
+        Parses the given HTML string and asks the
+        FortuneHTMLParser whether the parsed string is a
         valid fortune response
         valid fortune response
         '''
         '''
 
 
@@ -34,7 +34,7 @@ class FortuneTestType(FrameworkTestType):
 
 
         parser = FortuneHTMLParser()
         parser = FortuneHTMLParser()
         parser.feed(body)
         parser.feed(body)
-        (valid, diff) = parser.isValidFortune(self.name, self.out)
+        (valid, diff) = parser.isValidFortune(self.name, body)
 
 
         if valid:
         if valid:
             problems += verify_headers(headers, url, should_be='html')
             problems += verify_headers(headers, url, should_be='html')

+ 10 - 24
toolset/benchmark/test_types/framework_test_type.py

@@ -13,13 +13,13 @@ from toolset.utils.output_helper import log
 
 
 class FrameworkTestType:
 class FrameworkTestType:
     '''
     '''
-    Interface between a test type (json, query, plaintext, etc) and 
+    Interface between a test type (json, query, plaintext, etc) and
     the rest of TFB. A test type defines a number of keys it expects
     the rest of TFB. A test type defines a number of keys it expects
     to find in the benchmark_config.json, and this base class handles extracting
     to find in the benchmark_config.json, and this base class handles extracting
-    those keys and injecting them into the test. For example, if 
+    those keys and injecting them into the test. For example, if
     benchmark_config.json contains a line `"spam" : "foobar"` and a subclasses X
     benchmark_config.json contains a line `"spam" : "foobar"` and a subclasses X
-    passes an argument list of ['spam'], then after parsing there will 
-    exist a member `X.spam = 'foobar'`. 
+    passes an argument list of ['spam'], then after parsing there will
+    exist a member `X.spam = 'foobar'`.
     '''
     '''
 
 
     def __init__(self,
     def __init__(self,
@@ -32,8 +32,6 @@ class FrameworkTestType:
         self.name = name
         self.name = name
         self.requires_db = requires_db
         self.requires_db = requires_db
         self.args = args
         self.args = args
-        self.out = sys.stdout
-        self.err = sys.stderr
 
 
         if accept_header is None:
         if accept_header is None:
             self.accept_header = self.accept('json')
             self.accept_header = self.accept('json')
@@ -54,21 +52,9 @@ class FrameworkTestType:
             'text/plain,text/html;q=0.9,application/xhtml+xml;q=0.9,application/xml;q=0.8,*/*;q=0.7'
             'text/plain,text/html;q=0.9,application/xhtml+xml;q=0.9,application/xml;q=0.8,*/*;q=0.7'
         }[content_type]
         }[content_type]
 
 
-    def setup_out(self, out):
-        '''
-        Sets up file-like objects for logging. Used in 
-        cases where it is hard just return the output. Any
-        output sent to these file objects is also printed to 
-        the console
-
-        NOTE: I detest this. It would be much better to use
-        logging like it's intended
-        '''
-        self.out = out
-
     def parse(self, test_keys):
     def parse(self, test_keys):
         '''
         '''
-        Takes the dict of key/value pairs describing a FrameworkTest 
+        Takes the dict of key/value pairs describing a FrameworkTest
         and collects all variables needed by this FrameworkTestType
         and collects all variables needed by this FrameworkTestType
 
 
         Raises AttributeError if required keys are missing
         Raises AttributeError if required keys are missing
@@ -99,20 +85,20 @@ class FrameworkTestType:
 
 
     def verify(self, base_url):
     def verify(self, base_url):
         '''
         '''
-        Accesses URL used by this test type and checks the return 
+        Accesses URL used by this test type and checks the return
         values for correctness. Most test types run multiple checks,
         values for correctness. Most test types run multiple checks,
         so this returns a list of results. Each result is a 3-tuple
         so this returns a list of results. Each result is a 3-tuple
         of (String result, String reason, String urlTested).
         of (String result, String reason, String urlTested).
 
 
         - result : 'pass','warn','fail'
         - result : 'pass','warn','fail'
-        - reason : Short human-readable reason if result was 
-            warn or fail. Please do not print the response as part of this, 
-            other parts of TFB will do that based upon the current logging 
+        - reason : Short human-readable reason if result was
+            warn or fail. Please do not print the response as part of this,
+            other parts of TFB will do that based upon the current logging
             settings if this method indicates a failure happened
             settings if this method indicates a failure happened
         - urlTested: The exact URL that was queried
         - urlTested: The exact URL that was queried
 
 
         Subclasses should make a best-effort attempt to report as many
         Subclasses should make a best-effort attempt to report as many
-        failures and warnings as they can to help users avoid needing 
+        failures and warnings as they can to help users avoid needing
         to run TFB repeatedly while debugging
         to run TFB repeatedly while debugging
         '''
         '''
         # TODO make String result into an enum to enforce
         # TODO make String result into an enum to enforce

+ 50 - 52
toolset/run-tests.py

@@ -2,22 +2,18 @@ import argparse
 import socket
 import socket
 import sys
 import sys
 import signal
 import signal
+import traceback
 from toolset.benchmark.benchmarker import Benchmarker
 from toolset.benchmark.benchmarker import Benchmarker
 from toolset.utils.scaffolding import Scaffolding
 from toolset.utils.scaffolding import Scaffolding
+from toolset.utils.audit import Audit
 from toolset.utils import cleaner
 from toolset.utils import cleaner
-from toolset.utils.results_helper import Results
 from toolset.utils.benchmark_config import BenchmarkConfig
 from toolset.utils.benchmark_config import BenchmarkConfig
-from toolset.utils import docker_helper
-from toolset.utils.metadata_helper import gather_tests
 from toolset.utils.output_helper import log
 from toolset.utils.output_helper import log
 
 
 # Enable cross-platform colored output
 # Enable cross-platform colored output
 from colorama import init, Fore
 from colorama import init, Fore
 init()
 init()
 
 
-# Required to be globally known
-config = None
-
 
 
 class StoreSeqAction(argparse.Action):
 class StoreSeqAction(argparse.Action):
     '''
     '''
@@ -46,16 +42,6 @@ class StoreSeqAction(argparse.Action):
         return [abs(int(item)) for item in result]
         return [abs(int(item)) for item in result]
 
 
 
 
-def __stop(signal, frame):
-    log("Shutting down (may take a moment)")
-    docker_helper.stop(config)
-    sys.exit(0)
-
-
-signal.signal(signal.SIGTERM, __stop)
-signal.signal(signal.SIGINT, __stop)
-
-
 ###################################################################################################
 ###################################################################################################
 # Main
 # Main
 ###################################################################################################
 ###################################################################################################
@@ -83,9 +69,10 @@ def main(argv=None):
 
 
     # Suite options
     # Suite options
     parser.add_argument(
     parser.add_argument(
-        '--build',
-        nargs='+',
-        help='Builds the dockerfile(s) for the given test(s)')
+        '--audit',
+        action='store_true',
+        default=False,
+        help='Audits framework tests for inconsistencies')
     parser.add_argument(
     parser.add_argument(
         '--clean',
         '--clean',
         action='store_true',
         action='store_true',
@@ -204,40 +191,51 @@ def main(argv=None):
 
 
     args = parser.parse_args()
     args = parser.parse_args()
 
 
-    global config
     config = BenchmarkConfig(args)
     config = BenchmarkConfig(args)
-    results = Results(config)
-
-    if config.new:
-        Scaffolding(config)
-
-    elif config.build:
-        docker_helper.build(config, config.build)
-
-    elif config.clean:
-        cleaner.clean(results)
-        docker_helper.clean(config)
-
-    elif config.list_tests:
-        all_tests = gather_tests(benchmarker_config=config)
-
-        for test in all_tests:
-            log(test.name)
-
-    elif config.parse != None:
-        # TODO: broken
-        all_tests = gather_tests(benchmarker_config=config)
-
-        for test in all_tests:
-            test.parse_all()
-
-        results.parse(all_tests)
-
-    else:
-        benchmarker = Benchmarker(config, results)
-        any_failed = benchmarker.run()
-        if config.mode == "verify":
-            return any_failed
+    benchmarker = Benchmarker(config)
+
+    signal.signal(signal.SIGTERM, benchmarker.stop)
+    signal.signal(signal.SIGINT, benchmarker.stop)
+
+    try:
+        if config.new:
+            Scaffolding(benchmarker)
+
+        elif config.audit:
+            Audit(benchmarker).start_audit()
+
+        elif config.clean:
+            cleaner.clean(benchmarker.results)
+            benchmarker.docker_helper.clean()
+
+        elif config.list_tests:
+            all_tests = benchmarker.metadata.gather_tests()
+
+            for test in all_tests:
+                log(test.name)
+
+        elif config.parse:
+            all_tests = benchmarker.metadata.gather_tests()
+
+            for test in all_tests:
+                test.parse_all()
+
+            benchmarker.results.parse(all_tests)
+
+        else:
+            any_failed = benchmarker.run()
+            if config.mode == "verify":
+                return any_failed
+    except Exception:
+        tb = traceback.format_exc()
+        log("A fatal error has occurred",
+            color=Fore.RED)
+        log(tb)
+        # try one last time to stop docker containers on fatal error
+        try:
+            benchmarker.stop()
+        except:
+            sys.exit(1)
 
 
     return 0
     return 0
 
 

+ 2 - 2
toolset/scaffolding/benchmark_config.json

@@ -8,7 +8,7 @@
         "port": 8080,
         "port": 8080,
         "approach": "$APPROACH",
         "approach": "$APPROACH",
         "classification": "$CLASSIFICATION",
         "classification": "$CLASSIFICATION",
-        "database": "None",
+        "database": "$DATABASE",
         "framework": "$FRAMEWORK",
         "framework": "$FRAMEWORK",
         "language": "$LANGUAGE",
         "language": "$LANGUAGE",
         "flavor": "None",
         "flavor": "None",
@@ -23,4 +23,4 @@
       }
       }
     }
     }
   ]
   ]
-}
+}

+ 34 - 0
toolset/utils/audit.py

@@ -0,0 +1,34 @@
+import os
+from toolset.utils.output_helper import log
+from colorama import Fore
+
+
+class Audit:
+    '''
+    Audits frameworks for inconsistencies
+    '''
+
+    def __init__(self, benchmarker):
+        self.benchmarker = benchmarker
+
+    def start_audit(self):
+        for lang in self.benchmarker.metadata.gather_languages():
+            for test_dir in self.benchmarker.metadata.gather_language_tests(lang):
+                self.audit_test_dir(test_dir)
+
+    def audit_test_dir(self, test_dir):
+        warnings = 0
+        log('Auditing %s:' % test_dir, color=Fore.BLUE)
+
+        if not self.benchmarker.metadata.has_file(test_dir, 'source_code'):
+            log('source_code file is missing')
+            warnings += 1
+
+        if not self.benchmarker.metadata.has_file(test_dir, 'README.md'):
+            log('README.md file is missing')
+            warnings += 1
+
+        if warnings:
+            log('(%s) warning(s)' % warnings, color=Fore.YELLOW)
+        else:
+            log('No problems to report', color=Fore.GREEN)

+ 8 - 3
toolset/utils/benchmark_config.py

@@ -29,11 +29,11 @@ class BenchmarkConfig:
 
 
         self.duration = args.duration
         self.duration = args.duration
         self.exclude = args.exclude
         self.exclude = args.exclude
-        self.build = args.build
         self.quiet = args.quiet
         self.quiet = args.quiet
         self.server_host = args.server_host
         self.server_host = args.server_host
         self.database_host = args.database_host
         self.database_host = args.database_host
         self.client_host = args.client_host
         self.client_host = args.client_host
+        self.audit = args.audit
         self.new = args.new
         self.new = args.new
         self.clean = args.clean
         self.clean = args.clean
         self.mode = args.mode
         self.mode = args.mode
@@ -74,8 +74,13 @@ class BenchmarkConfig:
 
 
         self.start_time = time.time()
         self.start_time = time.time()
 
 
-        # Remember root directory
-        self.fwroot = os.getenv('FWROOT')
+        # Remember directories
+        self.fw_root = os.getenv('FWROOT')
+        self.db_root = os.path.join(self.fw_root, "toolset", "databases")
+        self.lang_root = os.path.join(self.fw_root, "frameworks")
+        self.results_root = os.path.join(self.fw_root, "results")
+        self.wrk_root = os.path.join(self.fw_root, "toolset", "wrk")
+        self.scaffold_root = os.path.join(self.fw_root, "toolset", "scaffolding")
 
 
         if hasattr(self, 'parse') and self.parse != None:
         if hasattr(self, 'parse') and self.parse != None:
             self.timestamp = self.parse
             self.timestamp = self.parse

+ 305 - 333
toolset/utils/docker_helper.py

@@ -1,6 +1,5 @@
 import os
 import os
 import socket
 import socket
-import fnmatch
 import json
 import json
 import docker
 import docker
 import time
 import time
@@ -10,98 +9,104 @@ from threading import Thread
 from colorama import Fore, Style
 from colorama import Fore, Style
 
 
 from toolset.utils.output_helper import log
 from toolset.utils.output_helper import log
-from toolset.utils.metadata_helper import gather_tests
 from toolset.utils.database_helper import test_database
 from toolset.utils.database_helper import test_database
 
 
 
 
-def clean(benchmarker_config):
-    '''
-    Cleans all the docker images from the system
-    '''
-    # Clean the app server images
-    client = docker.DockerClient(
-        base_url=benchmarker_config.server_docker_host)
-
-    client.images.prune()
-    for image in client.images.list():
-        if len(image.tags) > 0:
-            # 'techempower/tfb.test.gemini:0.1' -> 'techempower/tfb.test.gemini'
-            image_tag = image.tags[0].split(':')[0]
-            if image_tag != 'techempower/tfb' and 'techempower' in image_tag:
-                client.images.remove(image.id, force=True)
-    client.images.prune()
-
-    # Clean the database server images
-    client = docker.DockerClient(
-        base_url=benchmarker_config.database_docker_host)
-
-    client.images.prune()
-    for image in client.images.list():
-        if len(image.tags) > 0:
-            # 'techempower/tfb.test.gemini:0.1' -> 'techempower/tfb.test.gemini'
-            image_tag = image.tags[0].split(':')[0]
-            if image_tag != 'techempower/tfb':
-                client.images.remove(image.id, force=True)
-    client.images.prune()
-
-
-def __build(base_url, path, build_log_file, log_prefix, dockerfile, tag):
-    '''
-    Builds docker containers using docker-py low-level api
-    '''
-
-    with open(build_log_file, 'w') as build_log:
-        try:
-            client = docker.APIClient(base_url=base_url)
-            output = client.build(
-                path=path,
-                dockerfile=dockerfile,
-                tag=tag,
-                forcerm=True,
-                pull=True)
-            buffer = ""
-            for token in output:
-                if token.startswith('{"stream":'):
-                    token = json.loads(token)
-                    token = token[token.keys()[0]].encode('utf-8')
-                    buffer += token
-                elif token.startswith('{"errorDetail":'):
-                    token = json.loads(token)
-                    raise Exception(token['errorDetail']['message'])
-                while "\n" in buffer:
-                    index = buffer.index("\n")
-                    line = buffer[:index]
-                    buffer = buffer[index + 1:]
-                    log(line,
+class DockerHelper:
+
+    def __init__(self, benchmarker = None):
+        self.benchmarker = benchmarker
+
+        self.client = docker.DockerClient(
+            base_url=self.benchmarker.config.client_docker_host)
+        self.server = docker.DockerClient(
+            base_url=self.benchmarker.config.server_docker_host)
+        self.database = docker.DockerClient(
+            base_url=self.benchmarker.config.database_docker_host)
+
+    def __build(self, base_url, path, build_log_file, log_prefix, dockerfile, tag):
+        '''
+        Builds docker containers using docker-py low-level api
+        '''
+
+        self.benchmarker.timeLogger.log_build_start()
+        with open(build_log_file, 'w') as build_log:
+            try:
+                client = docker.APIClient(base_url=base_url)
+                output = client.build(
+                    path=path,
+                    dockerfile=dockerfile,
+                    tag=tag,
+                    forcerm=True,
+                    pull=True)
+                buffer = ""
+                for token in output:
+                    if token.startswith('{"stream":'):
+                        token = json.loads(token)
+                        token = token[token.keys()[0]].encode('utf-8')
+                        buffer += token
+                    elif token.startswith('{"errorDetail":'):
+                        token = json.loads(token)
+                        raise Exception(token['errorDetail']['message'])
+                    while "\n" in buffer:
+                        index = buffer.index("\n")
+                        line = buffer[:index]
+                        buffer = buffer[index + 1:]
+                        log(line,
+                            prefix=log_prefix,
+                            file=build_log,
+                            color=Fore.WHITE + Style.BRIGHT \
+                                if re.match(r'^Step \d+\/\d+', line) else '')
+
+                if buffer:
+                    log(buffer,
                         prefix=log_prefix,
                         prefix=log_prefix,
                         file=build_log,
                         file=build_log,
                         color=Fore.WHITE + Style.BRIGHT \
                         color=Fore.WHITE + Style.BRIGHT \
-                            if re.match(r'^Step \d+\/\d+', line) else '')
-
-            if buffer:
-                log(buffer,
+                            if re.match(r'^Step \d+\/\d+', buffer) else '')
+            except Exception:
+                tb = traceback.format_exc()
+                log("Docker build failed; terminating",
                     prefix=log_prefix,
                     prefix=log_prefix,
                     file=build_log,
                     file=build_log,
-                    color=Fore.WHITE + Style.BRIGHT \
-                        if re.match(r'^Step \d+\/\d+', buffer) else '')
-        except Exception:
-            tb = traceback.format_exc()
-            log("Docker build failed; terminating",
-                prefix=log_prefix,
-                file=build_log,
-                color=Fore.RED)
-            log(tb, prefix=log_prefix, file=build_log)
-            raise
-
-
-def build(benchmarker_config, test_names, build_log_dir=os.devnull):
-    '''
-    Builds the test docker containers
-    '''
-    tests = gather_tests(
-        include=test_names, benchmarker_config=benchmarker_config)
-
-    for test in tests:
+                    color=Fore.RED)
+                log(tb, prefix=log_prefix, file=build_log)
+                self.benchmarker.timeLogger.log_build_end(
+                    log_prefix=log_prefix,
+                    file=build_log)
+                raise
+
+            self.benchmarker.timeLogger.log_build_end(
+                log_prefix=log_prefix,
+                file=build_log)
+
+    def clean(self):
+        '''
+        Cleans all the docker images from the system
+        '''
+
+        self.server.images.prune()
+        for image in self.server.images.list():
+            if len(image.tags) > 0:
+                # 'techempower/tfb.test.gemini:0.1' -> 'techempower/tfb.test.gemini'
+                image_tag = image.tags[0].split(':')[0]
+                if image_tag != 'techempower/tfb' and 'techempower' in image_tag:
+                    self.server.images.remove(image.id, force=True)
+        self.server.images.prune()
+
+        self.database.images.prune()
+        for image in self.database.images.list():
+            if len(image.tags) > 0:
+                # 'techempower/tfb.test.gemini:0.1' -> 'techempower/tfb.test.gemini'
+                image_tag = image.tags[0].split(':')[0]
+                if image_tag != 'techempower/tfb' and 'techempower' in image_tag:
+                    self.database.images.remove(image.id, force=True)
+        self.database.images.prune()
+
+    def build(self, test, build_log_dir=os.devnull):
+        '''
+        Builds the test docker containers
+        '''
         log_prefix = "%s: " % test.name
         log_prefix = "%s: " % test.name
 
 
         # Build the test image
         # Build the test image
@@ -113,8 +118,8 @@ def build(benchmarker_config, test_names, build_log_dir=os.devnull):
                 "%s.log" % test_docker_file.replace(".dockerfile", "").lower())
                 "%s.log" % test_docker_file.replace(".dockerfile", "").lower())
 
 
         try:
         try:
-            __build(
-                base_url=benchmarker_config.server_docker_host,
+            self.__build(
+                base_url=self.benchmarker.config.server_docker_host,
                 build_log_file=build_log_file,
                 build_log_file=build_log_file,
                 log_prefix=log_prefix,
                 log_prefix=log_prefix,
                 path=test.directory,
                 path=test.directory,
@@ -124,272 +129,239 @@ def build(benchmarker_config, test_names, build_log_dir=os.devnull):
         except Exception:
         except Exception:
             return 1
             return 1
 
 
-    return 0
+        return 0
 
 
+    def run(self, test, run_log_dir):
+        '''
+        Run the given Docker container(s)
+        '''
 
 
-def run(benchmarker_config, test, run_log_dir):
-    '''
-    Run the given Docker container(s)
-    '''
-    client = docker.DockerClient(
-        base_url=benchmarker_config.server_docker_host)
+        log_prefix = "%s: " % test.name
+        container = None
 
 
-    log_prefix = "%s: " % test.name
-    container = None
+        try:
 
 
-    try:
+            def watch_container(docker_container, docker_file):
+                with open(
+                        os.path.join(run_log_dir, "%s.log" % docker_file.replace(
+                            ".dockerfile", "").lower()), 'w') as run_log:
+                    for line in docker_container.logs(stream=True):
+                        log(line, prefix=log_prefix, file=run_log)
+
+            extra_hosts = None
+            name = "tfb-server"
+
+            if self.benchmarker.config.network is None:
+                extra_hosts = {
+                    socket.gethostname(): str(self.benchmarker.config.server_host),
+                    'tfb-server': str(self.benchmarker.config.server_host),
+                    'tfb-database': str(self.benchmarker.config.database_host)
+                }
+                name = None
+
+            sysctl = {'net.core.somaxconn': 65535}
+
+            ulimit = [{
+                'name': 'nofile',
+                'hard': 200000,
+                'soft': 200000
+            }, {
+                'name': 'rtprio',
+                'hard': 99,
+                'soft': 99
+            }]
+
+            container = self.server.containers.run(
+                "techempower/tfb.test.%s" % test.name,
+                name=name,
+                network=self.benchmarker.config.network,
+                network_mode=self.benchmarker.config.network_mode,
+                stderr=True,
+                detach=True,
+                init=True,
+                extra_hosts=extra_hosts,
+                privileged=True,
+                ulimits=ulimit,
+                sysctls=sysctl,
+                remove=True,
+                log_config={'type': None})
+
+            watch_thread = Thread(
+                target=watch_container,
+                args=(
+                    container,
+                    "%s.dockerfile" % test.name,
+                ))
+            watch_thread.daemon = True
+            watch_thread.start()
 
 
-        def watch_container(docker_container, docker_file):
+        except Exception:
             with open(
             with open(
-                    os.path.join(run_log_dir, "%s.log" % docker_file.replace(
-                        ".dockerfile", "").lower()), 'w') as run_log:
-                for line in docker_container.logs(stream=True):
-                    log(line, prefix=log_prefix, file=run_log)
-
-        extra_hosts = None
-        name = "tfb-server"
-
-        if benchmarker_config.network is None:
-            extra_hosts = {
-                socket.gethostname(): str(benchmarker_config.server_host),
-                'tfb-server': str(benchmarker_config.server_host),
-                'tfb-database': str(benchmarker_config.database_host)
-            }
-            name = None
+                    os.path.join(run_log_dir, "%s.log" % test.name.lower()),
+                    'w') as run_log:
+                tb = traceback.format_exc()
+                log("Running docker cointainer: %s.dockerfile failed" % test.name,
+                    prefix=log_prefix,
+                    file=run_log)
+                log(tb, prefix=log_prefix, file=run_log)
 
 
-        sysctl = {'net.core.somaxconn': 65535}
+        return container
 
 
-        ulimit = [{
-            'name': 'nofile',
-            'hard': 200000,
-            'soft': 200000
-        }, {
-            'name': 'rtprio',
-            'hard': 99,
-            'soft': 99
-        }]
-
-        container = client.containers.run(
-            "techempower/tfb.test.%s" % test.name,
-            name=name,
-            network=benchmarker_config.network,
-            network_mode=benchmarker_config.network_mode,
-            stderr=True,
+    @staticmethod
+    def __stop_container(container, kill):
+        try:
+            if kill:
+                container.kill()
+            else:
+                container.stop()
+        except:
+            # container has already been stopped/killed
+            pass
+
+    @staticmethod
+    def __stop_all(docker_client, kill):
+        for container in docker_client.containers.list():
+            if len(container.image.tags) > 0 \
+                    and 'techempower' in container.image.tags[0] \
+                    and 'tfb:latest' not in container.image.tags[0]:
+                DockerHelper.__stop_container(container, kill)
+
+    def stop(self, containers=None, kill=False):
+        '''
+        Attempts to stop a container or list of containers.
+        If no containers are passed, stops all running containers.
+        '''
+        is_multi_setup = self.benchmarker.config.server_docker_host != \
+                         self.benchmarker.config.database_docker_host
+
+        if containers:
+            if not isinstance(containers, list):
+                containers = [containers]
+            for container in containers:
+                DockerHelper.__stop_container(container, kill)
+        else:
+            self.__stop_all(self.server, kill)
+            if is_multi_setup:
+                self.__stop_all(self.database, kill)
+
+        self.database.containers.prune()
+        if is_multi_setup:
+            # Then we're on a 3 machine set up
+            self.server.containers.prune()
+            self.client.containers.prune()
+
+    def start_database(self, database):
+        '''
+        Sets up a container for the given database and port, and starts said docker
+        container.
+        '''
+        image_name = "techempower/%s:latest" % database
+        log_prefix = image_name + ": "
+
+        database_dir = os.path.join(self.benchmarker.config.db_root, database)
+        docker_file = "%s.dockerfile" % database
+
+        self.__build(
+            base_url=self.benchmarker.config.database_docker_host,
+            path=database_dir,
+            dockerfile=docker_file,
+            log_prefix=log_prefix,
+            build_log_file=os.devnull,
+            tag="techempower/%s" % database)
+
+        sysctl = {'net.core.somaxconn': 65535, 'kernel.sem': "250 32000 256 512"}
+
+        ulimit = [{'name': 'nofile', 'hard': 65535, 'soft': 65535}]
+
+        container = self.database.containers.run(
+            "techempower/%s" % database,
+            name="tfb-database",
+            network=self.benchmarker.config.network,
+            network_mode=self.benchmarker.config.network_mode,
             detach=True,
             detach=True,
-            init=True,
-            extra_hosts=extra_hosts,
-            privileged=True,
             ulimits=ulimit,
             ulimits=ulimit,
             sysctls=sysctl,
             sysctls=sysctl,
             remove=True,
             remove=True,
             log_config={'type': None})
             log_config={'type': None})
 
 
-        watch_thread = Thread(
-            target=watch_container,
-            args=(
-                container,
-                "%s.dockerfile" % test.name,
-            ))
-        watch_thread.daemon = True
-        watch_thread.start()
-
-    except Exception:
-        with open(
-                os.path.join(run_log_dir, "%s.log" % test.name.lower()),
-                'w') as run_log:
-            tb = traceback.format_exc()
-            log("Running docker cointainer: %s.dockerfile failed" % test.name,
-                prefix=log_prefix,
-                file=run_log)
-            log(tb, prefix=log_prefix, file=run_log)
-
-    return container
-
-
-def stop(benchmarker_config=None,
-         container=None,
-         database_container=None,
-         test=None):
-    '''
-    Attempts to stop the running test container.
-    '''
-    client = docker.DockerClient(
-        base_url=benchmarker_config.server_docker_host)
-    if container is None:
-        for container in client.containers.list():
-            if len(
-                    container.image.tags
-            ) > 0 and 'techempower' in container.image.tags[0] and 'tfb:latest' not in container.image.tags[0]:
-                container.stop()
-    else:
-        # Stop the running container
+        # Sleep until the database accepts connections
+        slept = 0
+        max_sleep = 60
+        database_ready = False
+        while not database_ready and slept < max_sleep:
+            time.sleep(1)
+            slept += 1
+            database_ready = test_database(self.benchmarker.config, database)
+
+        if not database_ready:
+            log("Database was not ready after startup", prefix=log_prefix)
+
+        return container
+
+    def build_wrk(self):
+        '''
+        Builds the techempower/tfb.wrk container
+        '''
+        self.__build(
+            base_url=self.benchmarker.config.client_docker_host,
+            path=self.benchmarker.config.wrk_root,
+            dockerfile="wrk.dockerfile",
+            log_prefix="wrk: ",
+            build_log_file=os.devnull,
+            tag="techempower/tfb.wrk")
+
+    def test_client_connection(self, url):
+        '''
+        Tests that the app server at the given url responds successfully to a
+        request.
+        '''
         try:
         try:
-            container.stop()
+            self.client.containers.run(
+                'techempower/tfb.wrk',
+                'curl %s' % url,
+                remove=True,
+                log_config={'type': None},
+                network=self.benchmarker.config.network,
+                network_mode=self.benchmarker.config.network_mode)
         except Exception:
         except Exception:
-            # Suppress "No such container" errors
-            pass
+            return False
 
 
-    database_client = docker.DockerClient(
-        base_url=benchmarker_config.database_docker_host)
-    # Stop the database container
-    if database_container is None:
-        for container in database_client.containers.list():
-            if len(
-                    container.image.tags
-            ) > 0 and 'techempower' in container.image.tags[0] and 'tfb:latest' not in container.image.tags[0]:
-                container.stop()
-    else:
-        try:
-            database_container.stop()
-        except Exception:
-            # Suppress "No such container" errors
-            pass
-
-    client.containers.prune()
-
-    if benchmarker_config.server_docker_host != benchmarker_config.database_docker_host:
-        database_client.containers.prune()
-
-
-def find(path, pattern):
-    '''
-    Finds and returns all the the files matching the given pattern recursively in
-    the given path.
-    '''
-    for root, dirs, files in os.walk(path):
-        for name in files:
-            if fnmatch.fnmatch(name, pattern):
-                return os.path.join(root, name)
-
-
-def start_database(benchmarker_config, database):
-    '''
-    Sets up a container for the given database and port, and starts said docker
-    container.
-    '''
-    image_name = "techempower/%s:latest" % database
-    log_prefix = image_name + ": "
-
-    database_dir = os.path.join(benchmarker_config.fwroot, "toolset",
-                                "databases", database)
-    docker_file = "%s.dockerfile" % database
-
-    __build(
-        base_url=benchmarker_config.database_docker_host,
-        path=database_dir,
-        dockerfile=docker_file,
-        log_prefix=log_prefix,
-        build_log_file=os.devnull,
-        tag="techempower/%s" % database)
-
-    client = docker.DockerClient(
-        base_url=benchmarker_config.database_docker_host)
-
-    sysctl = {'net.core.somaxconn': 65535, 'kernel.sem': "250 32000 256 512"}
-
-    ulimit = [{'name': 'nofile', 'hard': 65535, 'soft': 65535}]
-
-    container = client.containers.run(
-        "techempower/%s" % database,
-        name="tfb-database",
-        network=benchmarker_config.network,
-        network_mode=benchmarker_config.network_mode,
-        detach=True,
-        ulimits=ulimit,
-        sysctls=sysctl,
-        remove=True,
-        log_config={'type': None})
-
-    # Sleep until the database accepts connections
-    slept = 0
-    max_sleep = 60
-    database_ready = False
-    while not database_ready and slept < max_sleep:
-        time.sleep(1)
-        slept += 1
-        database_ready = test_database(benchmarker_config, database)
-
-    if not database_ready:
-        log("Database was not ready after startup", prefix=log_prefix)
-
-    return container
-
-
-def build_wrk(benchmarker_config):
-    '''
-    Builds the techempower/tfb.wrk container
-    '''
-    __build(
-        base_url=benchmarker_config.client_docker_host,
-        path=os.path.join(benchmarker_config.fwroot, "toolset", "wrk"),
-        dockerfile="wrk.dockerfile",
-        log_prefix="wrk: ",
-        build_log_file=os.devnull,
-        tag="techempower/tfb.wrk")
-
-
-def test_client_connection(benchmarker_config, url):
-    '''
-    Tests that the app server at the given url responds successfully to a
-    request.
-    '''
-    client = docker.DockerClient(
-        base_url=benchmarker_config.client_docker_host)
-
-    try:
-        client.containers.run(
-            'techempower/tfb.wrk',
-            'curl %s' % url,
-            remove=True,
-            log_config={'type': None},
-            network=benchmarker_config.network,
-            network_mode=benchmarker_config.network_mode)
-    except:
-        return False
-
-    return True
-
-
-def server_container_exists(benchmarker_config, container_id_or_name):
-    '''
-    Returns True if the container still exists on the server.
-    '''
-    client = docker.DockerClient(
-        base_url=benchmarker_config.server_docker_host)
-    try:
-        client.containers.get(container_id_or_name)
         return True
         return True
-    except:
-        return False
-
 
 
-def benchmark(benchmarker_config, script, variables, raw_file):
-    '''
-    Runs the given remote_script on the wrk container on the client machine.
-    '''
-
-    def watch_container(container, raw_file):
-        with open(raw_file, 'w') as benchmark_file:
-            for line in container.logs(stream=True):
-                log(line, file=benchmark_file)
+    def server_container_exists(self, container_id_or_name):
+        '''
+        Returns True if the container still exists on the server.
+        '''
+        try:
+            self.server.containers.get(container_id_or_name)
+            return True
+        except:
+            return False
 
 
-    client = docker.DockerClient(
-        base_url=benchmarker_config.client_docker_host)
+    def benchmark(self, script, variables, raw_file):
+        '''
+        Runs the given remote_script on the wrk container on the client machine.
+        '''
 
 
-    sysctl = {'net.core.somaxconn': 65535}
+        def watch_container(container):
+            with open(raw_file, 'w') as benchmark_file:
+                for line in container.logs(stream=True):
+                    log(line, file=benchmark_file)
 
 
-    ulimit = [{'name': 'nofile', 'hard': 65535, 'soft': 65535}]
+        sysctl = {'net.core.somaxconn': 65535}
 
 
-    watch_container(
-        client.containers.run(
-            "techempower/tfb.wrk",
-            "/bin/bash %s" % script,
-            environment=variables,
-            network=benchmarker_config.network,
-            network_mode=benchmarker_config.network_mode,
-            detach=True,
-            stderr=True,
-            ulimits=ulimit,
-            sysctls=sysctl,
-            remove=True,
-            log_config={'type': None}), raw_file)
+        ulimit = [{'name': 'nofile', 'hard': 65535, 'soft': 65535}]
+
+        watch_container(
+            self.client.containers.run(
+                "techempower/tfb.wrk",
+                "/bin/bash /%s" % script,
+                environment=variables,
+                network=self.benchmarker.config.network,
+                network_mode=self.benchmarker.config.network_mode,
+                detach=True,
+                stderr=True,
+                ulimits=ulimit,
+                sysctls=sysctl,
+                remove=True,
+                log_config={'type': None}))

+ 411 - 0
toolset/utils/metadata.py

@@ -0,0 +1,411 @@
+import os
+import glob
+import json
+
+from collections import OrderedDict
+
+from toolset.utils.output_helper import log
+from colorama import Fore
+
+
+class Metadata:
+
+    supported_dbs = [
+        ('MySQL',
+         'One of the most popular databases around the web and in TFB'),
+        ('Postgres',
+         'An advanced SQL database with a larger feature set than MySQL'),
+        ('MongoDB', 'A popular document-store database')]
+
+    def __init__(self, benchmarker = None):
+        self.benchmarker = benchmarker
+
+    def gather_languages(self):
+        '''
+        Gathers all the known languages in the suite via the folder names
+        beneath FWROOT.
+        '''
+
+        lang_dir = os.path.join(self.benchmarker.config.lang_root)
+        langs = []
+        for dir in glob.glob(os.path.join(lang_dir, "*")):
+            langs.append(dir.replace(lang_dir, "")[1:])
+        return langs
+
+    def gather_language_tests(self, language):
+        '''
+        Gathers all the test names from a known language
+        '''
+        try:
+            dir = os.path.join(self.benchmarker.config.lang_root, language)
+            tests = map(lambda x: os.path.join(language, x), os.listdir(dir))
+            return filter(lambda x: os.path.isdir(
+                os.path.join(self.benchmarker.config.lang_root, x)), tests)
+        except Exception:
+            raise Exception(
+                "Unable to locate language directory: {!s}".format(language))
+
+    def get_framework_config(self, test_dir):
+        '''
+        Gets a framework's benchmark_config from the given
+        test directory
+        '''
+        dir_config_files = glob.glob(
+            "{!s}/{!s}/benchmark_config.json".format(
+                self.benchmarker.config.lang_root, test_dir))
+        if len(dir_config_files):
+            return dir_config_files[0]
+        else:
+            raise Exception(
+                "Unable to locate tests in test-dir: {!s}".format(
+                    test_dir))
+
+    def gather_tests(self, include=None, exclude=None):
+        '''
+        Given test names as strings, returns a list of FrameworkTest objects.
+        For example, 'aspnet-mysql-raw' turns into a FrameworkTest object with
+        variables for checking the test directory, the test database os, and
+        other useful items.
+
+        With no arguments, every test in this framework will be returned.
+        With include, only tests with this exact name will be returned.
+        With exclude, all tests but those excluded will be returned.
+        '''
+
+        # Help callers out a bit
+        include = include or []
+        exclude = exclude or []
+
+        # Search for configuration files
+        config_files = []
+
+        if self.benchmarker.config.test_lang:
+            self.benchmarker.config.test_dir = []
+            for lang in self.benchmarker.config.test_lang:
+                self.benchmarker.config.test_dir.extend(
+                    self.gather_language_tests(lang))
+
+        if self.benchmarker.config.test_dir:
+            for test_dir in self.benchmarker.config.test_dir:
+                config_files.append(self.get_framework_config(test_dir))
+        else:
+            config_files.extend(
+                glob.glob("{!s}/*/*/benchmark_config.json".format(
+                    self.benchmarker.config.lang_root)))
+
+        tests = []
+        for config_file_name in config_files:
+            config = None
+            with open(config_file_name, 'r') as config_file:
+                try:
+                    config = json.load(config_file)
+                except ValueError:
+                    log("Error loading config: {!s}".format(config_file_name),
+                        color=Fore.RED)
+                    raise Exception("Error loading config file")
+
+            # Find all tests in the config file
+            config_tests = self.parse_config(config, os.path.dirname(config_file_name))
+
+            # Filter
+            for test in config_tests:
+                if len(include) is 0 and len(exclude) is 0:
+                    # No filters, we are running everything
+                    tests.append(test)
+                elif test.name in include:
+                    tests.append(test)
+
+        # Ensure we were able to locate everything that was
+        # explicitly included
+        if len(include):
+            names = {test.name for test in tests}
+            if len(set(include) - set(names)):
+                missing = list(set(include) - set(names))
+                raise Exception("Unable to locate tests %s" % missing)
+
+        tests.sort(key=lambda x: x.name)
+        return tests
+
+    def tests_to_run(self):
+        '''
+        Gathers all tests for current benchmark run.
+        '''
+        return self.gather_tests(
+            self.benchmarker.config.test,
+            self.benchmarker.config.exclude)
+
+    def gather_frameworks(self, include=None, exclude=None):
+        '''
+        Return a dictionary mapping frameworks->[test1,test2,test3]
+        for quickly grabbing all tests in a grouped manner.
+        Args have the same meaning as gather_tests
+        '''
+        tests = self.gather_tests(include, exclude)
+        frameworks = dict()
+
+        for test in tests:
+            if test.framework not in frameworks:
+                frameworks[test.framework] = []
+            frameworks[test.framework].append(test)
+        return frameworks
+
+    def has_file(self, test_dir, filename):
+        '''
+        Returns True if the file exists in the test dir
+        '''
+        path = test_dir
+        if not self.benchmarker.config.lang_root in path:
+            path = os.path.join(self.benchmarker.config.lang_root, path)
+        return os.path.isfile("{!s}/{!s}".format(path, filename))
+
+    @staticmethod
+    def test_order(type_name):
+        """
+        This sort ordering is set up specifically to return the length
+        of the test name. There were SO many problems involved with
+        'plaintext' being run first (rather, just not last) that we
+        needed to ensure that it was run last for every framework.
+        """
+        return len(type_name)
+
+    def parse_config(self, config, directory):
+        """
+        Parses a config file into a list of FrameworkTest objects
+        """
+        from toolset.benchmark.framework_test import FrameworkTest
+        tests = []
+
+        # The config object can specify multiple tests
+        # Loop over them and parse each into a FrameworkTest
+        for test in config['tests']:
+
+            tests_to_run = [name for (name, keys) in test.iteritems()]
+            if "default" not in tests_to_run:
+                log("Framework %s does not define a default test in benchmark_config.json"
+                    % config['framework'], color=Fore.YELLOW)
+
+            # Check that each test configuration is acceptable
+            # Throw exceptions if a field is missing, or how to improve the field
+            for test_name, test_keys in test.iteritems():
+                # Validates and normalizes the benchmark_config entry
+                test_keys = Metadata.validate_test(test_name, test_keys, directory)
+
+                # Map test type to a parsed FrameworkTestType object
+                runTests = dict()
+                for type_name, type_obj in self.benchmarker.config.types.iteritems():
+                    try:
+                        # Makes a FrameWorkTestType object using some of the keys in config
+                        # e.g. JsonTestType uses "json_url"
+                        runTests[type_name] = type_obj.copy().parse(test_keys)
+                    except AttributeError:
+                        # This is quite common - most tests don't support all types
+                        # Quitely log it and move on (debug logging is on in travis and this causes
+                        # ~1500 lines of debug, so I'm totally ignoring it for now
+                        # log("Missing arguments for test type %s for framework test %s" % (type_name, test_name))
+                        pass
+
+                # We need to sort by test_type to run
+                sortedTestKeys = sorted(runTests.keys(), key=Metadata.test_order)
+                sortedRunTests = OrderedDict()
+                for sortedTestKey in sortedTestKeys:
+                    sortedRunTests[sortedTestKey] = runTests[sortedTestKey]
+
+                # Prefix all test names with framework except 'default' test
+                # Done at the end so we may still refer to the primary test as `default` in benchmark config error messages
+                if test_name == 'default':
+                    test_name = config['framework']
+                else:
+                    test_name = "%s-%s" % (config['framework'], test_name)
+
+                # By passing the entire set of keys, each FrameworkTest will have a member for each key
+                tests.append(
+                    FrameworkTest(test_name, directory, self.benchmarker,
+                                  sortedRunTests, test_keys))
+
+        return tests
+
+    def list_test_metadata(self):
+        '''
+        Prints the metadata for all the available tests
+        '''
+        all_tests = self.gather_tests()
+        all_tests_json = json.dumps(map(lambda test: {
+            "name": test.name,
+            "approach": test.approach,
+            "classification": test.classification,
+            "database": test.database,
+            "framework": test.framework,
+            "language": test.language,
+            "orm": test.orm,
+            "platform": test.platform,
+            "webserver": test.webserver,
+            "os": test.os,
+            "database_os": test.database_os,
+            "display_name": test.display_name,
+            "notes": test.notes,
+            "versus": test.versus
+        }, all_tests))
+
+        with open(
+                os.path.join(self.benchmarker.results.directory, "test_metadata.json"),
+                "w") as f:
+            f.write(all_tests_json)
+
+    @staticmethod
+    def validate_test(test_name, test_keys, directory):
+        """
+        Validate and normalizes benchmark config values for this test based on a schema
+        """
+        recommended_lang = directory.split('/')[-2]
+        windows_url = "https://github.com/TechEmpower/FrameworkBenchmarks/issues/1038"
+        schema = {
+            'language': {
+                # Language is the only key right now with no 'allowed' key that can't
+                # have a "None" value
+                'required': True,
+                'help':
+                    ('language', 'The language of the framework used, suggestion: %s' %
+                     recommended_lang)
+            },
+            'webserver': {
+                'help':
+                    ('webserver',
+                     'Name of the webserver also referred to as the "front-end server"'
+                     )
+            },
+            'classification': {
+                'allowed': [('Fullstack', '...'), ('Micro', '...'), ('Platform',
+                                                                     '...')]
+            },
+            'database': {
+                'allowed':
+                    Metadata.supported_dbs +
+                    [('None',
+                      'No database was used for these tests, as is the case with Json Serialization and Plaintext'
+                      )]
+            },
+            'approach': {
+                'allowed': [('Realistic', '...'), ('Stripped', '...')]
+            },
+            'orm': {
+                'required_with': 'database',
+                'allowed':
+                    [('Full',
+                      'Has a full suite of features like lazy loading, caching, multiple language support, sometimes pre-configured with scripts.'
+                      ),
+                     ('Micro',
+                      'Has basic database driver capabilities such as establishing a connection and sending queries.'
+                      ),
+                     ('Raw',
+                      'Tests that do not use an ORM will be classified as "raw" meaning they use the platform\'s raw database connectivity.'
+                      )]
+            },
+            'platform': {
+                'help':
+                    ('platform',
+                     'Name of the platform this framework runs on, e.g. Node.js, PyPy, hhvm, JRuby ...'
+                     )
+            },
+            'framework': {
+                # Guaranteed to be here and correct at this point
+                # key is left here to produce the set of required keys
+            },
+            'os': {
+                'allowed':
+                    [('Linux',
+                      'Our best-supported host OS, it is recommended that you build your tests for Linux hosts'
+                      ),
+                     ('Windows',
+                      'TFB is not fully-compatible on windows, contribute towards our work on compatibility: %s'
+                      % windows_url)]
+            },
+            'database_os': {
+                'required_with': 'database',
+                'allowed':
+                    [('Linux',
+                      'Our best-supported host OS, it is recommended that you build your tests for Linux hosts'
+                      ),
+                     ('Windows',
+                      'TFB is not fully-compatible on windows, contribute towards our work on compatibility: %s'
+                      % windows_url)]
+            }
+        }
+
+        # Check the (all optional) test urls
+        Metadata.validate_urls(test_name, test_keys)
+
+        def get_test_val(k):
+            return test_keys.get(k, "none").lower()
+
+        def throw_incorrect_key(k):
+            msg = (
+                    "Invalid `%s` value specified for test \"%s\" in framework \"%s\"; suggestions:\n"
+                    % (k, test_name, test_keys['framework']))
+            helpinfo = ('\n').join([
+                "  `%s` -- %s" % (v, desc)
+                for (v, desc) in zip(acceptable_values, descriptors)
+            ])
+            fullerr = msg + helpinfo + "\n"
+            raise Exception(fullerr)
+
+        # Check values of keys against schema
+        for key in schema.keys():
+            val = get_test_val(key)
+            test_keys[key] = val
+
+            if val == "none":
+                # incorrect if key requires a value other than none
+                if schema[key].get('required', False):
+                    throw_incorrect_key(key)
+                # certain keys are only required if another key is not none
+                if 'required_with' in schema[key]:
+                    if get_test_val(schema[key]['required_with']) == "none":
+                        continue
+                    else:
+                        throw_incorrect_key(key)
+
+            # if we're here, the key needs to be one of the "allowed" values
+            if 'allowed' in schema[key]:
+                allowed = schema[key].get('allowed', [])
+                acceptable_values, descriptors = zip(*allowed)
+                acceptable_values = [a.lower() for a in acceptable_values]
+
+                if val not in acceptable_values:
+                    throw_incorrect_key(key)
+
+        return test_keys
+
+    @staticmethod
+    def validate_urls(test_name, test_keys):
+        """
+        Separated from validate_test because urls are not required anywhere. We know a url is incorrect if it is
+        empty or does not start with a "/" character. There is no validation done to ensure the url conforms to
+        the suggested url specifications, although those suggestions are presented if a url fails validation here.
+        """
+        example_urls = {
+            "json_url":
+                "/json",
+            "db_url":
+                "/mysql/db",
+            "query_url":
+                "/mysql/queries?queries=  or  /mysql/queries/",
+            "fortune_url":
+                "/mysql/fortunes",
+            "update_url":
+                "/mysql/updates?queries=  or  /mysql/updates/",
+            "plaintext_url":
+                "/plaintext",
+            "cached_query_url":
+                "/mysql/cached_queries?queries=  or /mysql/cached_queries"
+        }
+
+        for test_url in [
+            "json_url", "db_url", "query_url", "fortune_url", "update_url",
+            "plaintext_url", "cached_query_url"
+        ]:
+            key_value = test_keys.get(test_url, None)
+            if key_value != None and not key_value.startswith('/'):
+                errmsg = """`%s` field in test \"%s\" does not appear to be a valid url: \"%s\"\n
+            Example `%s` url: \"%s\"
+          """ % (test_url, test_name, key_value, test_url, example_urls[test_url])
+                raise Exception(errmsg)

+ 0 - 379
toolset/utils/metadata_helper.py

@@ -1,379 +0,0 @@
-import os
-import glob
-import json
-
-from collections import OrderedDict
-
-from toolset.utils.output_helper import log
-from colorama import Fore
-
-
-def gather_langauges(benchmarker_config):
-    '''
-    Gathers all the known languages in the suite via the folder names
-    beneath FWROOT.
-    '''
-
-    lang_dir = os.path.join(benchmarker_config.fwroot, "frameworks")
-    langs = []
-    for dir in glob.glob(os.path.join(lang_dir, "*")):
-        langs.append(dir.replace(lang_dir, "")[1:])
-    return langs
-
-
-def gather_tests(include=[], exclude=[], benchmarker_config=None,
-                 results=None):
-    '''
-    Given test names as strings, returns a list of FrameworkTest objects.
-    For example, 'aspnet-mysql-raw' turns into a FrameworkTest object with
-    variables for checking the test directory, the test database os, and
-    other useful items.
-
-    With no arguments, every test in this framework will be returned.
-    With include, only tests with this exact name will be returned.
-    With exclude, all tests but those excluded will be returned.
-
-    A config is needed to construct full FrameworkTest objects. If
-    one is not provided, a default config will be created.
-    '''
-
-    # Help callers out a bit
-    if include is None:
-        include = []
-    if exclude is None:
-        exclude = []
-
-    # Old, hacky method to exclude all tests was to
-    # request a test known to not exist, such as ''.
-    # If test '' was requested, short-circuit and return
-    # nothing immediately
-    if len(include) == 1 and '' in include:
-        return []
-
-    # Search for configuration files
-    config_files = []
-
-    if benchmarker_config.test_lang:
-        benchmarker_config.test_dir = []
-        for lang in benchmarker_config.test_lang:
-            if os.path.exists("{!s}/frameworks/{!s}".format(
-                    benchmarker_config.fwroot, lang)):
-                for test_dir in os.listdir("{!s}/frameworks/{!s}".format(
-                        benchmarker_config.fwroot, lang)):
-                    benchmarker_config.test_dir.append("{!s}/{!s}".format(
-                        lang, test_dir))
-            else:
-                raise Exception(
-                    "Unable to locate language directory: {!s}".format(lang))
-
-    if benchmarker_config.test_dir:
-        for test_dir in benchmarker_config.test_dir:
-            dir_config_files = glob.glob(
-                "{!s}/frameworks/{!s}/benchmark_config.json".format(
-                    benchmarker_config.fwroot, test_dir))
-            if len(dir_config_files):
-                config_files.extend(dir_config_files)
-            else:
-                raise Exception(
-                    "Unable to locate tests in test-dir: {!s}".format(
-                        test_dir))
-    else:
-        config_files.extend(
-            glob.glob("{!s}/frameworks/*/*/benchmark_config.json".format(
-                benchmarker_config.fwroot)))
-
-    tests = []
-    for config_file_name in config_files:
-        config = None
-        with open(config_file_name, 'r') as config_file:
-            try:
-                config = json.load(config_file)
-            except ValueError:
-                log("Error loading config: {!s}".format(config_file_name),
-                    color=Fore.RED)
-                raise Exception("Error loading config file")
-
-        # Find all tests in the config file
-        config_tests = parse_config(config, os.path.dirname(config_file_name),
-                                    benchmarker_config, results)
-
-        # Filter
-        for test in config_tests:
-            if len(include) is 0 and len(exclude) is 0:
-                # No filters, we are running everything
-                tests.append(test)
-            elif test.name in exclude:
-                continue
-            elif test.name in include:
-                tests.append(test)
-            else:
-                # An include list exists, but this test is
-                # not listed there, so we ignore it
-                pass
-
-    # Ensure we were able to locate everything that was
-    # explicitly included
-    if 0 != len(include):
-        names = {test.name for test in tests}
-        if 0 != len(set(include) - set(names)):
-            missing = list(set(include) - set(names))
-            raise Exception("Unable to locate tests %s" % missing)
-
-    tests.sort(key=lambda x: x.name)
-    return tests
-
-
-def gather_remaining_tests(config, results):
-    '''
-    Gathers the tests remaining in a current benchmark run.
-    '''
-    return gather_tests(config.test, config.exclude, config, results)
-
-
-def gather_frameworks(include=[], exclude=[], config=None):
-    '''
-    Return a dictionary mapping frameworks->[test1,test2,test3]
-    for quickly grabbing all tests in a grouped manner.
-    Args have the same meaning as gather_tests
-    '''
-    tests = gather_tests(include, exclude, config)
-    frameworks = dict()
-
-    for test in tests:
-        if test.framework not in frameworks:
-            frameworks[test.framework] = []
-        frameworks[test.framework].append(test)
-    return frameworks
-
-
-def test_order(type_name):
-    """
-    This sort ordering is set up specifically to return the length
-    of the test name. There were SO many problems involved with
-    'plaintext' being run first (rather, just not last) that we
-    needed to ensure that it was run last for every framework.
-    """
-    return len(type_name)
-
-
-def parse_config(config, directory, benchmarker_config, results):
-    """
-    Parses a config file into a list of FrameworkTest objects
-    """
-    from toolset.benchmark.framework_test import FrameworkTest
-    tests = []
-
-    # The config object can specify multiple tests
-    # Loop over them and parse each into a FrameworkTest
-    for test in config['tests']:
-
-        tests_to_run = [name for (name, keys) in test.iteritems()]
-        if "default" not in tests_to_run:
-            log("Framework %s does not define a default test in benchmark_config.json"
-                % config['framework'])
-
-        # Check that each test configuration is acceptable
-        # Throw exceptions if a field is missing, or how to improve the field
-        for test_name, test_keys in test.iteritems():
-            # Validates the benchmark_config entry
-            validate_test(test_name, test_keys, directory)
-
-            # Map test type to a parsed FrameworkTestType object
-            runTests = dict()
-            for type_name, type_obj in benchmarker_config.types.iteritems():
-                try:
-                    # Makes a FrameWorkTestType object using some of the keys in config
-                    # e.g. JsonTestType uses "json_url"
-                    runTests[type_name] = type_obj.copy().parse(test_keys)
-                except AttributeError:
-                    # This is quite common - most tests don't support all types
-                    # Quitely log it and move on (debug logging is on in travis and this causes
-                    # ~1500 lines of debug, so I'm totally ignoring it for now
-                    # log("Missing arguments for test type %s for framework test %s" % (type_name, test_name))
-                    pass
-
-            # We need to sort by test_type to run
-            sortedTestKeys = sorted(runTests.keys(), key=test_order)
-            sortedRunTests = OrderedDict()
-            for sortedTestKey in sortedTestKeys:
-                sortedRunTests[sortedTestKey] = runTests[sortedTestKey]
-
-            # Prefix all test names with framework except 'default' test
-            # Done at the end so we may still refer to the primary test as `default` in benchmark config error messages
-            if test_name == 'default':
-                test_name = config['framework']
-            else:
-                test_name = "%s-%s" % (config['framework'], test_name)
-
-            # By passing the entire set of keys, each FrameworkTest will have a member for each key
-            tests.append(
-                FrameworkTest(test_name, directory, benchmarker_config,
-                              results, sortedRunTests, test_keys))
-
-    return tests
-
-
-def validate_test(test_name, test_keys, directory):
-    """
-    Validate benchmark config values for this test based on a schema
-    """
-    recommended_lang = directory.split('/')[-2]
-    windows_url = "https://github.com/TechEmpower/FrameworkBenchmarks/issues/1038"
-    schema = {
-        'language': {
-            'help':
-            ('language', 'The language of the framework used, suggestion: %s' %
-             recommended_lang)
-        },
-        'webserver': {
-            'help':
-            ('webserver',
-             'Name of the webserver also referred to as the "front-end server"'
-             )
-        },
-        'classification': {
-            'allowed': [('Fullstack', '...'), ('Micro', '...'), ('Platform',
-                                                                 '...')]
-        },
-        'database': {
-            'allowed':
-            [('MySQL',
-              'One of the most popular databases around the web and in TFB'),
-             ('Postgres',
-              'An advanced SQL database with a larger feature set than MySQL'),
-             ('MongoDB', 'A popular document-store database'),
-             ('Cassandra', 'A highly performant and scalable NoSQL database'),
-             ('Elasticsearch',
-              'A distributed RESTful search engine that is used as a database for TFB tests'
-              ),
-             ('Redis',
-              'An open-sourced, BSD licensed, advanced key-value cache and store'
-              ),
-             ('SQLite',
-              'A network-less database, still supported for backwards compatibility'
-              ), ('SQLServer', 'Microsoft\'s SQL implementation'),
-             ('None',
-              'No database was used for these tests, as is the case with Json Serialization and Plaintext'
-              )]
-        },
-        'approach': {
-            'allowed': [('Realistic', '...'), ('Stripped', '...')]
-        },
-        'orm': {
-            'allowed':
-            [('Full',
-              'Has a full suite of features like lazy loading, caching, multiple language support, sometimes pre-configured with scripts.'
-              ),
-             ('Micro',
-              'Has basic database driver capabilities such as establishing a connection and sending queries.'
-              ),
-             ('Raw',
-              'Tests that do not use an ORM will be classified as "raw" meaning they use the platform\'s raw database connectivity.'
-              )]
-        },
-        'platform': {
-            'help':
-            ('platform',
-             'Name of the platform this framework runs on, e.g. Node.js, PyPy, hhvm, JRuby ...'
-             )
-        },
-        'framework': {
-            # Guranteed to be here and correct at this point
-            # key is left here to produce the set of required keys
-        },
-        'os': {
-            'allowed':
-            [('Linux',
-              'Our best-supported host OS, it is recommended that you build your tests for Linux hosts'
-              ),
-             ('Windows',
-              'TFB is not fully-compatible on windows, contribute towards our work on compatibility: %s'
-              % windows_url)]
-        },
-        'database_os': {
-            'allowed':
-            [('Linux',
-              'Our best-supported host OS, it is recommended that you build your tests for Linux hosts'
-              ),
-             ('Windows',
-              'TFB is not fully-compatible on windows, contribute towards our work on compatibility: %s'
-              % windows_url)]
-        }
-    }
-
-    # Confirm required keys are present
-    required_keys = schema.keys()
-    missing = list(set(required_keys) - set(test_keys))
-
-    if len(missing) > 0:
-        missingstr = (", ").join(map(str, missing))
-        raise Exception(
-            "benchmark_config.json for test %s is invalid, please amend by adding the following required keys: [%s]"
-            % (test_name, missingstr))
-
-    # Check the (all optional) test urls
-    validate_urls(test_name, test_keys)
-
-    # Check values of keys against schema
-    for key in required_keys:
-        val = test_keys.get(key, "").lower()
-        has_predefined_acceptables = 'allowed' in schema[key]
-
-        if has_predefined_acceptables:
-            allowed = schema[key].get('allowed', [])
-            acceptable_values, descriptors = zip(*allowed)
-            acceptable_values = [a.lower() for a in acceptable_values]
-
-            if val not in acceptable_values:
-                msg = (
-                    "Invalid `%s` value specified for test \"%s\" in framework \"%s\"; suggestions:\n"
-                    % (key, test_name, test_keys['framework']))
-                helpinfo = ('\n').join([
-                    "  `%s` -- %s" % (v, desc)
-                    for (v, desc) in zip(acceptable_values, descriptors)
-                ])
-                fullerr = msg + helpinfo + "\n"
-                raise Exception(fullerr)
-
-        elif not has_predefined_acceptables and val == "":
-            msg = (
-                "Value for `%s` in test \"%s\" in framework \"%s\" was missing:\n"
-                % (key, test_name, test_keys['framework']))
-            helpinfo = "  %s -- %s" % schema[key]['help']
-            fullerr = msg + helpinfo + '\n'
-            raise Exception(fullerr)
-
-
-def validate_urls(test_name, test_keys):
-    """
-    Separated from validate_test because urls are not required anywhere. We know a url is incorrect if it is
-    empty or does not start with a "/" character. There is no validation done to ensure the url conforms to
-    the suggested url specifications, although those suggestions are presented if a url fails validation here.
-    """
-    example_urls = {
-        "json_url":
-        "/json",
-        "db_url":
-        "/mysql/db",
-        "query_url":
-        "/mysql/queries?queries=  or  /mysql/queries/",
-        "fortune_url":
-        "/mysql/fortunes",
-        "update_url":
-        "/mysql/updates?queries=  or  /mysql/updates/",
-        "plaintext_url":
-        "/plaintext",
-        "cached_query_url":
-        "/mysql/cached_queries?queries=  or /mysql/cached_queries"
-    }
-
-    for test_url in [
-            "json_url", "db_url", "query_url", "fortune_url", "update_url",
-            "plaintext_url", "cached_query_url"
-    ]:
-        key_value = test_keys.get(test_url, None)
-        if key_value != None and not key_value.startswith('/'):
-            errmsg = """`%s` field in test \"%s\" does not appear to be a valid url: \"%s\"\n
-        Example `%s` url: \"%s\"
-      """ % (test_url, test_name, key_value, test_url, example_urls[test_url])
-            raise Exception(errmsg)

+ 0 - 71
toolset/utils/ordered_set.py

@@ -1,71 +0,0 @@
-import collections
-
-
-class OrderedSet(collections.MutableSet):
-    '''
-    From https://code.activestate.com/recipes/576694/
-    '''
-
-    def __init__(self, iterable=None):
-        self.end = end = []
-        end += [None, end, end]  # sentinel node for doubly linked list
-        self.map = {}  # key --> [key, prev, next]
-        if iterable is not None:
-            self |= iterable
-
-    def __len__(self):
-        return len(self.map)
-
-    def __contains__(self, key):
-        return key in self.map
-
-    def add(self, key):
-        if key not in self.map:
-            end = self.end
-            curr = end[1]
-            curr[2] = end[1] = self.map[key] = [key, curr, end]
-
-    def discard(self, key):
-        if key in self.map:
-            key, prev, next = self.map.pop(key)
-            prev[2] = next
-            next[1] = prev
-
-    def __iter__(self):
-        end = self.end
-        curr = end[2]
-        while curr is not end:
-            yield curr[0]
-            curr = curr[2]
-
-    def __reversed__(self):
-        end = self.end
-        curr = end[1]
-        while curr is not end:
-            yield curr[0]
-            curr = curr[1]
-
-    def pop(self, last=True):
-        if not self:
-            raise KeyError('set is empty')
-        key = self.end[1][0] if last else self.end[2][0]
-        self.discard(key)
-        return key
-
-    def __repr__(self):
-        if not self:
-            return '%s()' % (self.__class__.__name__, )
-        return '%s(%r)' % (self.__class__.__name__, list(self))
-
-    def __eq__(self, other):
-        if isinstance(other, OrderedSet):
-            return len(self) == len(other) and list(self) == list(other)
-        return set(self) == set(other)
-
-
-if __name__ == '__main__':
-    s = OrderedSet('abracadaba')
-    t = OrderedSet('simsalabim')
-    print(s | t)
-    print(s & t)
-    print(s - t)

+ 23 - 27
toolset/utils/results_helper.py → toolset/utils/results.py

@@ -1,4 +1,3 @@
-from toolset.utils.metadata_helper import gather_remaining_tests, gather_frameworks
 from toolset.utils.output_helper import log
 from toolset.utils.output_helper import log
 
 
 import os
 import os
@@ -19,12 +18,13 @@ from colorama import Fore, Style
 
 
 
 
 class Results:
 class Results:
-    def __init__(self, config):
+    def __init__(self, benchmarker):
         '''
         '''
         Constructor
         Constructor
         '''
         '''
-        self.config = config
-        self.directory = os.path.join(self.config.fwroot, "results",
+        self.benchmarker = benchmarker
+        self.config = benchmarker.config
+        self.directory = os.path.join(self.config.results_root,
                                       self.config.timestamp)
                                       self.config.timestamp)
         try:
         try:
             os.makedirs(self.directory)
             os.makedirs(self.directory)
@@ -50,7 +50,7 @@ class Results:
         self.queryIntervals = self.config.query_levels
         self.queryIntervals = self.config.query_levels
         self.cachedQueryIntervals = self.config.cached_query_levels
         self.cachedQueryIntervals = self.config.cached_query_levels
         self.frameworks = [
         self.frameworks = [
-            t.name for t in gather_remaining_tests(self.config, self)
+            t.name for t in benchmarker.tests
         ]
         ]
         self.duration = self.config.duration
         self.duration = self.config.duration
         self.rawData = dict()
         self.rawData = dict()
@@ -226,7 +226,7 @@ class Results:
     def get_raw_file(self, test_name, test_type):
     def get_raw_file(self, test_name, test_type):
         '''
         '''
         Returns the output file for this test_name and test_type
         Returns the output file for this test_name and test_type
-        Example: fwroot/results/timestamp/test_type/test_name/raw.txt
+        Example: fw_root/results/timestamp/test_type/test_name/raw.txt
         '''
         '''
         path = os.path.join(self.directory, test_name, test_type, "raw.txt")
         path = os.path.join(self.directory, test_name, test_type, "raw.txt")
         try:
         try:
@@ -238,7 +238,7 @@ class Results:
     def get_stats_file(self, test_name, test_type):
     def get_stats_file(self, test_name, test_type):
         '''
         '''
         Returns the stats file name for this test_name and
         Returns the stats file name for this test_name and
-        Example: fwroot/results/timestamp/test_type/test_name/stats.txt
+        Example: fw_root/results/timestamp/test_type/test_name/stats.txt
         '''
         '''
         path = os.path.join(self.directory, test_name, test_type, "stats.txt")
         path = os.path.join(self.directory, test_name, test_type, "stats.txt")
         try:
         try:
@@ -250,7 +250,7 @@ class Results:
     def report_verify_results(self, framework_test, test_type, result):
     def report_verify_results(self, framework_test, test_type, result):
         '''
         '''
         Used by FrameworkTest to add verification details to our results
         Used by FrameworkTest to add verification details to our results
-        
+
         TODO: Technically this is an IPC violation - we are accessing
         TODO: Technically this is an IPC violation - we are accessing
         the parent process' memory from the child process
         the parent process' memory from the child process
         '''
         '''
@@ -261,7 +261,7 @@ class Results:
     def report_benchmark_results(self, framework_test, test_type, results):
     def report_benchmark_results(self, framework_test, test_type, results):
         '''
         '''
         Used by FrameworkTest to add benchmark data to this
         Used by FrameworkTest to add benchmark data to this
-        
+
         TODO: Technically this is an IPC violation - we are accessing
         TODO: Technically this is an IPC violation - we are accessing
         the parent process' memory from the child process
         the parent process' memory from the child process
         '''
         '''
@@ -285,7 +285,6 @@ class Results:
         Finishes these results.
         Finishes these results.
         '''
         '''
         if not self.config.parse:
         if not self.config.parse:
-            tests = gather_remaining_tests(self.config, self)
             # Normally you don't have to use Fore.BLUE before each line, but
             # Normally you don't have to use Fore.BLUE before each line, but
             # Travis-CI seems to reset color codes on newline (see travis-ci/travis-ci#2692)
             # Travis-CI seems to reset color codes on newline (see travis-ci/travis-ci#2692)
             # or stream flush, so we have to ensure that the color code is printed repeatedly
             # or stream flush, so we have to ensure that the color code is printed repeatedly
@@ -293,11 +292,11 @@ class Results:
                 border='=',
                 border='=',
                 border_bottom='-',
                 border_bottom='-',
                 color=Fore.CYAN)
                 color=Fore.CYAN)
-            for test in tests:
+            for test in self.benchmarker.tests:
                 log(Fore.CYAN + "| {!s}".format(test.name))
                 log(Fore.CYAN + "| {!s}".format(test.name))
                 if test.name in self.verify.keys():
                 if test.name in self.verify.keys():
                     for test_type, result in self.verify[
                     for test_type, result in self.verify[
-                            test.name].iteritems():
+                        test.name].iteritems():
                         if result.upper() == "PASS":
                         if result.upper() == "PASS":
                             color = Fore.GREEN
                             color = Fore.GREEN
                         elif result.upper() == "WARN":
                         elif result.upper() == "WARN":
@@ -311,8 +310,6 @@ class Results:
                         "NO RESULTS (Did framework launch?)")
                         "NO RESULTS (Did framework launch?)")
             log('', border='=', border_bottom='', color=Fore.CYAN)
             log('', border='=', border_bottom='', color=Fore.CYAN)
 
 
-        log("%sTime to complete: %s seconds" %
-            (Style.RESET_ALL, str(int(time.time() - self.config.start_time))))
         log("Results are saved in " + self.directory)
         log("Results are saved in " + self.directory)
 
 
     #############################################################################
     #############################################################################
@@ -356,8 +353,7 @@ class Results:
         '''
         '''
         Counts the significant lines of code for all tests and stores in results.
         Counts the significant lines of code for all tests and stores in results.
         '''
         '''
-        frameworks = gather_frameworks(self.config.test, self.config.exclude,
-                                       self.config)
+        frameworks = self.benchmarker.metadata.gather_frameworks(self.config.test, self.config.exclude)
 
 
         jsonResult = {}
         jsonResult = {}
         for framework, testlist in frameworks.items():
         for framework, testlist in frameworks.items():
@@ -402,8 +398,8 @@ class Results:
         '''
         '''
         Count the git commits for all the framework tests
         Count the git commits for all the framework tests
         '''
         '''
-        frameworks = gather_frameworks(self.config.test, self.config.exclude,
-                                       self.config)
+        frameworks = self.benchmarker.metadata.gather_frameworks(
+            self.config.test, self.config.exclude)
 
 
         def count_commit(directory, jsonResult):
         def count_commit(directory, jsonResult):
             command = "git rev-list HEAD -- " + directory + " | sort -u | wc -l"
             command = "git rev-list HEAD -- " + directory + " | sort -u | wc -l"
@@ -447,7 +443,7 @@ class Results:
         Get the git commit id for this benchmark
         Get the git commit id for this benchmark
         '''
         '''
         return subprocess.check_output(
         return subprocess.check_output(
-            ["git", "rev-parse", "HEAD"], cwd=self.config.fwroot).strip()
+            ["git", "rev-parse", "HEAD"], cwd=self.config.fw_root).strip()
 
 
     def __get_git_repository_url(self):
     def __get_git_repository_url(self):
         '''
         '''
@@ -455,7 +451,7 @@ class Results:
         '''
         '''
         return subprocess.check_output(
         return subprocess.check_output(
             ["git", "config", "--get", "remote.origin.url"],
             ["git", "config", "--get", "remote.origin.url"],
-            cwd=self.config.fwroot).strip()
+            cwd=self.config.fw_root).strip()
 
 
     def __get_git_branch_name(self):
     def __get_git_branch_name(self):
         '''
         '''
@@ -464,12 +460,12 @@ class Results:
         return subprocess.check_output(
         return subprocess.check_output(
             'git rev-parse --abbrev-ref HEAD',
             'git rev-parse --abbrev-ref HEAD',
             shell=True,
             shell=True,
-            cwd=self.config.fwroot).strip()
+            cwd=self.config.fw_root).strip()
 
 
     def __parse_stats(self, framework_test, test_type, start_time, end_time,
     def __parse_stats(self, framework_test, test_type, start_time, end_time,
                       interval):
                       interval):
         '''
         '''
-        For each test type, process all the statistics, and return a multi-layered 
+        For each test type, process all the statistics, and return a multi-layered
         dictionary that has a structure as follows:
         dictionary that has a structure as follows:
 
 
         (timestamp)
         (timestamp)
@@ -512,18 +508,18 @@ class Results:
 
 
     def __calculate_average_stats(self, raw_stats):
     def __calculate_average_stats(self, raw_stats):
         '''
         '''
-        We have a large amount of raw data for the statistics that may be useful 
-        for the stats nerds, but most people care about a couple of numbers. For 
+        We have a large amount of raw data for the statistics that may be useful
+        for the stats nerds, but most people care about a couple of numbers. For
         now, we're only going to supply:
         now, we're only going to supply:
           * Average CPU
           * Average CPU
           * Average Memory
           * Average Memory
           * Total network use
           * Total network use
           * Total disk use
           * Total disk use
         More may be added in the future. If they are, please update the above list.
         More may be added in the future. If they are, please update the above list.
-        
+
         Note: raw_stats is directly from the __parse_stats method.
         Note: raw_stats is directly from the __parse_stats method.
-        
-        Recall that this consists of a dictionary of timestamps, each of which 
+
+        Recall that this consists of a dictionary of timestamps, each of which
         contain a dictionary of stat categories which contain a dictionary of stats
         contain a dictionary of stat categories which contain a dictionary of stats
         '''
         '''
         raw_stat_collection = dict()
         raw_stat_collection = dict()

+ 47 - 15
toolset/utils/scaffolding.py

@@ -1,11 +1,10 @@
 # -*- coding: utf-8 -*-
 # -*- coding: utf-8 -*-
 import os, re
 import os, re
 from shutil import copytree
 from shutil import copytree
-from toolset.utils.metadata_helper import gather_frameworks, gather_langauges
-
+from toolset.utils.metadata import Metadata
 
 
 class Scaffolding:
 class Scaffolding:
-    def __init__(self, benchmarker_config):
+    def __init__(self, benchmarker):
         print("""
         print("""
 -------------------------------------------------------------------------------
 -------------------------------------------------------------------------------
     This wizard is intended to help build the scaffolding required for a new 
     This wizard is intended to help build the scaffolding required for a new 
@@ -16,13 +15,15 @@ class Scaffolding:
 -------------------------------------------------------------------------------"""
 -------------------------------------------------------------------------------"""
               )
               )
 
 
-        self.benchmarker_config = benchmarker_config
+        self.benchmarker = benchmarker
+        self.benchmarker_config = benchmarker.config
 
 
         try:
         try:
             self.__gather_display_name()
             self.__gather_display_name()
             self.__gather_language()
             self.__gather_language()
             self.__gather_approach()
             self.__gather_approach()
             self.__gather_classification()
             self.__gather_classification()
+            self.__gather_database()
             self.__gather_orm()
             self.__gather_orm()
             self.__gather_webserver()
             self.__gather_webserver()
             self.__gather_versus()
             self.__gather_versus()
@@ -46,7 +47,7 @@ class Scaffolding:
         self.display_name = raw_input("Name: ").strip()
         self.display_name = raw_input("Name: ").strip()
 
 
         found = False
         found = False
-        for framework in gather_frameworks(config=self.benchmarker_config):
+        for framework in self.benchmarker.metadata.gather_frameworks():
             if framework.lower() == self.display_name.lower():
             if framework.lower() == self.display_name.lower():
                 found = True
                 found = True
 
 
@@ -70,7 +71,7 @@ class Scaffolding:
     def __prompt_language(self):
     def __prompt_language(self):
         self.language = raw_input("Language: ").strip()
         self.language = raw_input("Language: ").strip()
 
 
-        known_languages = gather_langauges(benchmarker_config)
+        known_languages = self.benchmarker.metadata.gather_languages()
         language = None
         language = None
         for lang in known_languages:
         for lang in known_languages:
             if lang.lower() == self.language.lower():
             if lang.lower() == self.language.lower():
@@ -93,9 +94,9 @@ class Scaffolding:
       
       
   Did you mean to add the new language, '%s', to the benchmark suite?
   Did you mean to add the new language, '%s', to the benchmark suite?
       """ % (similar, self.language))
       """ % (similar, self.language))
-            valid = self.__prompt_confirm_new_language(known_languages)
+            valid = self.__prompt_confirm_new_language()
             while not valid:
             while not valid:
-                valid = self.__prompt_confirm_new_language(known_languages)
+                valid = self.__prompt_confirm_new_language()
 
 
             if self.confirm_new_lang == 'n':
             if self.confirm_new_lang == 'n':
                 self.language = None
                 self.language = None
@@ -104,7 +105,7 @@ class Scaffolding:
 
 
         return self.language
         return self.language
 
 
-    def __prompt_confirm_new_language(self, known_languages):
+    def __prompt_confirm_new_language(self):
         self.confirm_new_lang = raw_input("Create New Language '%s' (y/n): " %
         self.confirm_new_lang = raw_input("Create New Language '%s' (y/n): " %
                                           self.language).strip().lower()
                                           self.language).strip().lower()
         return self.confirm_new_lang == 'y' or self.confirm_new_lang == 'n'
         return self.confirm_new_lang == 'y' or self.confirm_new_lang == 'n'
@@ -195,7 +196,38 @@ class Scaffolding:
         if self.platform == '':
         if self.platform == '':
             self.platform = 'None'
             self.platform = 'None'
 
 
+    def __gather_database(self):
+        print("""
+  Which database will you be using for your test?
+    """)
+        i = 1
+        prompt = "Database ["
+        options = []
+        for db in Metadata.supported_dbs:
+            print("  {!s}) {!s}: {!s}".format(i, db[0], db[1]))
+            prompt += "{!s}/".format(i)
+            options.append(db[0])
+            i += 1
+        print("  {!s}) None: No database at this time{!s}".format(i, os.linesep))
+        prompt += "{!s}]: ".format(i)
+        options.append("None")
+        valid = self.__prompt_database(prompt, options)
+        while not valid:
+            valid = self.__prompt_database(prompt, options)
+
+    def __prompt_database(self, prompt, options):
+        self.database = raw_input(prompt).strip()
+        if 0 < int(self.database) <= len(options):
+            self.database = options[int(self.database) - 1]
+            return True
+        else:
+            return False
+
     def __gather_orm(self):
     def __gather_orm(self):
+        if self.database == 'None':
+            self.orm = 'None'
+            return
+
         print("""
         print("""
   How you would classify the ORM (object relational mapper) of your test?
   How you would classify the ORM (object relational mapper) of your test?
 
 
@@ -294,8 +326,8 @@ class Scaffolding:
             self.__edit_scaffold_files()
             self.__edit_scaffold_files()
 
 
     def __create_test_folder(self):
     def __create_test_folder(self):
-        self.language_dir = os.path.join(self.benchmarker_config.fwroot,
-                                         "frameworks", self.language)
+        self.language_dir = os.path.join(self.benchmarker_config.lang_root,
+                                         self.language)
         self.test_dir = os.path.join(self.language_dir, self.name)
         self.test_dir = os.path.join(self.language_dir, self.name)
 
 
         if os.path.exists(self.test_dir):
         if os.path.exists(self.test_dir):
@@ -305,9 +337,7 @@ class Scaffolding:
         return True
         return True
 
 
     def __copy_scaffold_files(self):
     def __copy_scaffold_files(self):
-        self.scaffold_dir = os.path.join(self.benchmarker_config.fwroot,
-                                         "toolset", "scaffolding")
-        copytree(self.scaffold_dir, self.test_dir)
+        copytree(self.benchmarker_config.scaffold_root, self.test_dir)
 
 
     def __edit_scaffold_files(self):
     def __edit_scaffold_files(self):
         for file in os.listdir(os.path.join(self.test_dir)):
         for file in os.listdir(os.path.join(self.test_dir)):
@@ -326,6 +356,8 @@ class Scaffolding:
                 self.framework)
                 self.framework)
             self.__replace_text(
             self.__replace_text(
                 os.path.join(self.test_dir, file), "\$LANGUAGE", self.language)
                 os.path.join(self.test_dir, file), "\$LANGUAGE", self.language)
+            self.__replace_text(
+                os.path.join(self.test_dir, file), "\$DATABASE", self.database)
             self.__replace_text(
             self.__replace_text(
                 os.path.join(self.test_dir, file), "\$ORM", self.orm)
                 os.path.join(self.test_dir, file), "\$ORM", self.orm)
             self.__replace_text(
             self.__replace_text(
@@ -362,4 +394,4 @@ class Scaffolding:
             contents = conf.read()
             contents = conf.read()
         replaced_text = re.sub(to_replace, replacement, contents)
         replaced_text = re.sub(to_replace, replacement, contents)
         with open(file, "w") as f:
         with open(file, "w") as f:
-            f.write(replaced_text)
+            f.write(replaced_text)

+ 100 - 0
toolset/utils/time_logger.py

@@ -0,0 +1,100 @@
+import time
+from colorama import Fore, Style
+
+from toolset.utils.output_helper import log
+
+class TimeLogger:
+    '''
+    Class for keeping track of and logging execution times
+    for suite actions
+    '''
+
+    def __init__(self):
+        self.start = time.time()
+
+        self.benchmarking_start = 0
+        self.benchmarking_total = 0
+        self.build_start = 0
+        self.build_total = 0
+        self.test_start = 0
+        self.test_total = 0
+        self.verify_start = 0
+        self.verify_total = 0
+
+    @staticmethod
+    def output(sec):
+        output = ""
+        h = sec // 3600
+        m = (sec // 60) % 60
+        s = sec % 60
+        if h > 0:
+            output = "%sh" % h
+        if m > 0:
+            output = output + "%sm " % m
+        output = output + "%ss" % s
+        return output
+
+    def log_benchmarking_start(self):
+        self.benchmarking_start = time.time()
+
+    def log_benchmarking_end(self, log_prefix, file):
+        total = int(time.time() - self.benchmarking_start)
+        self.benchmarking_total = self.benchmarking_total + total
+        log("Total benchmarking time: %s" % TimeLogger.output(total),
+            prefix=log_prefix,
+            file=file,
+            color=Fore.YELLOW)
+
+    def log_build_start(self):
+        self.build_start = time.time()
+
+    def log_build_end(self, log_prefix, file):
+        total = int(time.time() - self.build_start)
+        self.build_total = self.build_total + total
+        log("Total build time: %s" % TimeLogger.output(total),
+            prefix=log_prefix,
+            file=file,
+            color=Fore.YELLOW)
+
+    def log_test_start(self):
+        self.test_start = time.time()
+
+    def log_test_end(self, log_prefix, file):
+        total = int(time.time() - self.test_start)
+        log("Total test time: %s" % TimeLogger.output(total),
+            prefix=log_prefix,
+            file=file,
+            color=Fore.YELLOW)
+        log("Total time building so far: %s"
+            % TimeLogger.output(self.build_total),
+            prefix="tfb: ",
+            file=file,
+            color=Fore.YELLOW)
+        log("Total time verifying so far: %s"
+            % TimeLogger.output(self.verify_total),
+            prefix="tfb: ",
+            file=file,
+            color=Fore.YELLOW)
+        if self.benchmarking_total > 0:
+            log("Total time benchmarking so far: %s"
+                % TimeLogger.output(self.benchmarking_total),
+                prefix="tfb: ",
+                file=file,
+                color=Fore.YELLOW)
+        running_time = int(time.time() - self.start)
+        log("Total execution time so far: %s"
+            % TimeLogger.output(running_time),
+            prefix="tfb: ",
+            file=file,
+            color=Fore.YELLOW)
+
+    def log_verify_start(self):
+        self.verify_start = time.time()
+
+    def log_verify_end(self, log_prefix, file):
+        total = int(time.time() - self.verify_start)
+        self.verify_total = self.verify_total + total
+        log("Total verify time: %s" % TimeLogger.output(total),
+            prefix=log_prefix,
+            file=file,
+            color=Fore.YELLOW)

+ 0 - 16
toolset/utils/unbuffered.py

@@ -1,16 +0,0 @@
-# Wrapper for unbuffered stream writing.
-# http://stackoverflow.com/a/107717/376366
-# Used to make sure print output appears in the correct order
-# in log files when spawning subprocesses.
-
-
-class Unbuffered:
-    def __init__(self, stream):
-        self.stream = stream
-
-    def write(self, data):
-        self.stream.write(data)
-        self.stream.flush()
-
-    def __getattr__(self, attr):
-        return getattr(self.stream, attr)