Browse Source

[WIP] Centralize logging (#3416)

Mike Smith 7 years ago
parent
commit
4367b865c8

+ 213 - 254
toolset/benchmark/benchmarker.py

@@ -1,12 +1,10 @@
-from toolset.utils.output_helper import header, tee_output
+from toolset.utils.output_helper import header, log, log_error, FNULL
 from toolset.utils.metadata_helper import gather_tests, gather_remaining_tests
 from toolset.utils.metadata_helper import gather_tests, gather_remaining_tests
 from toolset.utils import docker_helper
 from toolset.utils import docker_helper
 
 
 import os
 import os
 import subprocess
 import subprocess
 import traceback
 import traceback
-import sys
-import logging
 import socket
 import socket
 import time
 import time
 import json
 import json
@@ -40,91 +38,40 @@ class Benchmarker:
         all_tests = gather_remaining_tests(self.config, self.results)
         all_tests = gather_remaining_tests(self.config, self.results)
 
 
         # Setup client/server
         # Setup client/server
-        print(
-            header(
-                "Preparing Server, Database, and Client ...",
-                top='=',
-                bottom='='))
+        header(
+            "Preparing Server, Database, and Client ...", top='=', bottom='=')
         with self.config.quiet_out.enable():
         with self.config.quiet_out.enable():
             self.__setup_server()
             self.__setup_server()
             self.__setup_database()
             self.__setup_database()
             self.__setup_client()
             self.__setup_client()
 
 
         # Run tests
         # Run tests
-        print(header("Running Tests...", top='=', bottom='='))
-        self.__run_tests(all_tests)
+        success = True
+        header("Running Tests...", top='=', bottom='=')
+        with open(os.path.join(self.results.directory, 'benchmark.log'),
+                  'w') as benchmark_log:
+            for test in all_tests:
+                header("Running Test: %s" % test.name)
+                with self.config.quiet_out.enable():
+                    success = self.__run_test(test, benchmark_log) and success
+                # Load intermediate result from child process
+                self.results.load()
 
 
         # Parse results
         # Parse results
         if self.config.mode == "benchmark":
         if self.config.mode == "benchmark":
-            print(header("Parsing Results ...", top='=', bottom='='))
+            header("Parsing Results ...", top='=', bottom='=')
             self.results.parse(all_tests)
             self.results.parse(all_tests)
 
 
         self.results.set_completion_time()
         self.results.set_completion_time()
         self.results.upload()
         self.results.upload()
         self.results.finish()
         self.results.finish()
 
 
+        return success
+
     ##########################################################################################
     ##########################################################################################
     # Private methods
     # Private methods
     ##########################################################################################
     ##########################################################################################
 
 
-    def __benchmark(self, framework_test, logPath):
-        '''
-        Runs the benchmark for each type of test that it implements
-        '''
-
-        def benchmark_type(test_type):
-            benchmarkPath = os.path.join(logPath, test_type)
-            try:
-                os.makedirs(benchmarkPath)
-            except OSError:
-                pass
-            with open(os.path.join(benchmarkPath, 'benchmark.txt'),
-                      'w') as out:
-                out.write("BENCHMARKING %s ... " % test_type.upper())
-
-                test = framework_test.runTests[test_type]
-                test.setup_out(out)
-                raw_file = self.results.get_raw_file(framework_test.name,
-                                                     test_type)
-                if not os.path.exists(raw_file):
-                    # Open to create the empty file
-                    with open(raw_file, 'w'):
-                        pass
-
-                if not test.failed:
-                    remote_script = self.config.types[
-                        test_type].get_remote_script(self.config, test.name,
-                                                     test.get_url(),
-                                                     framework_test.port)
-
-                    # Begin resource usage metrics collection
-                    self.__begin_logging(framework_test, test_type)
-
-                    # Run the benchmark
-                    with open(raw_file, 'w') as raw_file:
-                        p = subprocess.Popen(
-                            self.config.client_ssh_command,
-                            stdin=subprocess.PIPE,
-                            stdout=raw_file,
-                            stderr=raw_file)
-                        p.communicate(remote_script)
-                        out.flush()
-
-                    # End resource usage metrics collection
-                    self.__end_logging()
-
-                results = self.results.parse_test(framework_test, test_type)
-                print("Benchmark results:")
-                pprint(results)
-
-                self.results.report_benchmark_results(
-                    framework_test, test_type, results['results'])
-                out.write("Complete\n")
-                out.flush()
-
-        for test_type in framework_test.runTests:
-            benchmark_type(test_type)
-
     def __run_list_test_metadata(self):
     def __run_list_test_metadata(self):
         '''
         '''
         Prints the metadata for all the available tests
         Prints the metadata for all the available tests
@@ -152,31 +99,6 @@ class Benchmarker:
                 "w") as f:
                 "w") as f:
             f.write(all_tests_json)
             f.write(all_tests_json)
 
 
-    def __begin_logging(self, framework_test, test_type):
-        '''
-        Starts a thread to monitor the resource usage, to be synced with the 
-        client's time.
-        TODO: MySQL and InnoDB are possible. Figure out how to implement them.
-        '''
-        output_file = "{file_name}".format(
-            file_name=self.results.get_stats_file(framework_test.name,
-                                                  test_type))
-        dstat_string = "dstat -Tafilmprs --aio --fs --ipc --lock --raw --socket --tcp \
-                                      --raw --socket --tcp --udp --unix --vm --disk-util \
-                                      --rpc --rpcd --output {output_file}".format(
-            output_file=output_file)
-        cmd = shlex.split(dstat_string)
-        dev_null = open(os.devnull, "w")
-        self.subprocess_handle = subprocess.Popen(
-            cmd, stdout=dev_null, stderr=subprocess.STDOUT)
-
-    def __end_logging(self):
-        '''
-        Stops the logger thread and blocks until shutdown is complete.
-        '''
-        self.subprocess_handle.terminate()
-        self.subprocess_handle.communicate()
-
     def __setup_server(self):
     def __setup_server(self):
         '''
         '''
         Makes any necessary changes to the server that should be
         Makes any necessary changes to the server that should be
@@ -187,31 +109,29 @@ class Benchmarker:
         http://redmine.lighttpd.net/projects/weighttp/wiki#Troubleshooting
         http://redmine.lighttpd.net/projects/weighttp/wiki#Troubleshooting
         '''
         '''
         try:
         try:
-            if os.name == 'nt':
-                return True
             subprocess.call(
             subprocess.call(
                 ['sudo', 'sysctl', '-w', 'net.ipv4.tcp_max_syn_backlog=65535'],
                 ['sudo', 'sysctl', '-w', 'net.ipv4.tcp_max_syn_backlog=65535'],
-                stdout=self.config.quiet_out,
+                stdout=FNULL,
                 stderr=subprocess.STDOUT)
                 stderr=subprocess.STDOUT)
             subprocess.call(
             subprocess.call(
                 ['sudo', 'sysctl', '-w', 'net.core.somaxconn=65535'],
                 ['sudo', 'sysctl', '-w', 'net.core.somaxconn=65535'],
-                stdout=self.config.quiet_out,
+                stdout=FNULL,
                 stderr=subprocess.STDOUT)
                 stderr=subprocess.STDOUT)
             subprocess.call(
             subprocess.call(
                 ['sudo', 'sysctl', 'net.ipv4.tcp_tw_reuse=1'],
                 ['sudo', 'sysctl', 'net.ipv4.tcp_tw_reuse=1'],
-                stdout=self.config.quiet_out,
+                stdout=FNULL,
                 stderr=subprocess.STDOUT)
                 stderr=subprocess.STDOUT)
             subprocess.call(
             subprocess.call(
                 ['sudo', 'sysctl', 'net.ipv4.tcp_tw_recycle=1'],
                 ['sudo', 'sysctl', 'net.ipv4.tcp_tw_recycle=1'],
-                stdout=self.config.quiet_out,
+                stdout=FNULL,
                 stderr=subprocess.STDOUT)
                 stderr=subprocess.STDOUT)
             subprocess.call(
             subprocess.call(
                 ['sudo', 'sysctl', '-w', 'kernel.shmmax=134217728'],
                 ['sudo', 'sysctl', '-w', 'kernel.shmmax=134217728'],
-                stdout=self.config.quiet_out,
+                stdout=FNULL,
                 stderr=subprocess.STDOUT)
                 stderr=subprocess.STDOUT)
             subprocess.call(
             subprocess.call(
                 ['sudo', 'sysctl', '-w', 'kernel.shmall=2097152'],
                 ['sudo', 'sysctl', '-w', 'kernel.shmall=2097152'],
-                stdout=self.config.quiet_out,
+                stdout=FNULL,
                 stderr=subprocess.STDOUT)
                 stderr=subprocess.STDOUT)
 
 
             with open(os.path.join(self.results.directory, 'sysctl.txt'),
             with open(os.path.join(self.results.directory, 'sysctl.txt'),
@@ -247,7 +167,7 @@ class Benchmarker:
             sudo sysctl -w kernel.sem="250 32000 256 512"
             sudo sysctl -w kernel.sem="250 32000 256 512"
             """
             """
         ])
         ])
-        subprocess.check_call(command)
+        subprocess.check_call(command, stdout=FNULL, stderr=subprocess.STDOUT)
         # TODO - print kernel configuration to file
         # TODO - print kernel configuration to file
         # echo "Printing kernel configuration:" && sudo sysctl -a
         # echo "Printing kernel configuration:" && sudo sysctl -a
 
 
@@ -269,170 +189,209 @@ class Benchmarker:
             sudo sysctl -w kernel.shmall=2097152
             sudo sysctl -w kernel.shmall=2097152
             """
             """
         ])
         ])
+        subprocess.check_call(command, stdout=FNULL, stderr=subprocess.STDOUT)
 
 
-    def __run_tests(self, tests):
+    def __run_test(self, test, benchmark_log):
         '''
         '''
-        Calls each test passed in tests to __run_test in a separate process. 
-        Each test is given a set amount of time and if kills the child process 
-        (and subsequently all of its child processes).
+        Runs the given test, verifies that the webapp is accepting requests,
+        optionally benchmarks the webapp, and ultimately stops all services
+        started for this test.
         '''
         '''
-        if len(tests) == 0:
-            return 0
+        log_prefix = "%s: " % test.name
 
 
-        logging.debug("Start __run_tests.")
-        logging.debug("__name__ = %s", __name__)
-
-        # These features do not work on Windows
-        for test in tests:
-            print(header("Running Test: %s" % test.name))
-            with self.config.quiet_out.enable():
-                self.__run_test(test)
-            # Load intermediate result from child process
-            self.results.load()
+        if test.os.lower() != self.config.os.lower() or test.database_os.lower(
+        ) != self.config.database_os.lower():
+            log("OS or Database OS specified in benchmark_config.json does not match the current environment. Skipping.",
+                log_prefix, benchmark_log)
+            return False
 
 
-        logging.debug("End __run_tests.")
+        # If the test is in the excludes list, we skip it
+        if self.config.exclude != None and test.name in self.config.exclude:
+            log("Test {name} has been added to the excludes list. Skipping.".
+                format(name=test.name),
+                log_prefix,
+                benchmark_log)
+            return False
 
 
-    def __run_test(self, test):
-        '''
-        Ensures that the system has all necessary software to run the tests. 
-        This does not include that software for the individual test, but covers 
-        software such as curl and weighttp that are needed.
-        '''
-        logDir = os.path.join(self.results.directory, test.name.lower())
+        database_container_id = None
         try:
         try:
-            os.makedirs(logDir)
-        except Exception:
-            pass
-        with open(os.path.join(logDir, 'out.txt'), 'w') as out:
-
-            if test.os.lower() != self.config.os.lower(
-            ) or test.database_os.lower() != self.config.database_os.lower():
-                out.write(
-                    "OS or Database OS specified in benchmark_config.json does not match the current environment. Skipping.\n"
-                )
-                return
-
-            # If the test is in the excludes list, we skip it
-            if self.config.exclude != None and test.name in self.config.exclude:
-                out.write(
-                    "Test {name} has been added to the excludes list. Skipping.\n".
-                    format(name=test.name))
-                return
-
-            database_container_id = None
-            try:
-                if self.__is_port_bound(test.port):
-                    time.sleep(60)
-
-                if self.__is_port_bound(test.port):
-                    # We gave it our all
-                    self.results.write_intermediate(test.name, "port " + str(
-                        test.port) + " is not available before start")
-                    out.write(
-                        header(
-                            "Error: Port %s is not available, cannot start %s"
-                            % (test.port, test.name)))
-                    out.flush()
-                    print("Error: Unable to recover port, cannot start test")
-                    return
-
-                # Start database container
-                if test.database.lower() != "none":
-                    database_container_id = docker_helper.start_database(
-                        self.config, test.database.lower())
-                    if not database_container_id:
-                        out.write(
-                            "ERROR: Problem building/running database container"
-                        )
-                        out.flush()
-                        self.results.write_intermediate(
-                            test.name, "ERROR: Problem starting")
-                        return
-
-                # Start webapp
-                result = test.start(out, database_container_id)
-                if result != 0:
-                    docker_helper.stop(self.config, database_container_id,
-                                       test)
-                    out.write("ERROR: Problem starting {name}\n".format(
-                        name=test.name))
-                    out.flush()
+            if self.__is_port_bound(test.port):
+                time.sleep(60)
+
+            if self.__is_port_bound(test.port):
+                # We gave it our all
+                self.results.write_intermediate(test.name, "port " + str(
+                    test.port) + " is not available before start")
+                header("Error: Port %s is not available, cannot start %s" %
+                       (test.port, test.name), log_prefix, benchmark_log)
+                return False
+
+            # Start database container
+            if test.database.lower() != "none":
+                database_container_id = docker_helper.start_database(
+                    self.config, test.database.lower())
+                if not database_container_id:
                     self.results.write_intermediate(test.name,
                     self.results.write_intermediate(test.name,
                                                     "ERROR: Problem starting")
                                                     "ERROR: Problem starting")
-                    return
-
-                slept = 0
-                max_sleep = 60
-                while not test.is_running() and slept < max_sleep:
-                    if not docker_helper.successfully_running_containers(
-                            test.get_docker_files(), out):
-                        docker_helper.stop(self.config, database_container_id,
-                                           test)
-                        tee_output(
-                            out,
-                            "ERROR: One or more expected docker container exited early"
-                            + os.linesep)
-                        return
-                    time.sleep(1)
-                    slept += 1
-
-                # Debug mode blocks execution here until ctrl+c
-                if self.config.mode == "debug":
-                    logging.info(
-                        "Entering debug mode. Server has started. CTRL-c to stop."
-                    )
-                    while True:
-                        time.sleep(1)
-
-                # Verify URLs
-                logging.info("Verifying framework URLs")
-                passed_verify = test.verify_urls(logDir)
-
-                # Benchmark this test
-                if self.config.mode == "benchmark":
-                    logging.info("Benchmarking")
-                    out.write(header("Benchmarking %s" % test.name))
-                    out.flush()
-                    self.__benchmark(test, logDir)
-
-                # Stop this test
-                docker_helper.stop(self.config, database_container_id, test)
+                    log("ERROR: Problem building/running database container",
+                        log_prefix, benchmark_log)
+                    return False
 
 
-                # Remove contents of  /tmp folder
-                try:
-                    subprocess.check_call(
-                        'sudo rm -rf /tmp/*',
-                        shell=True,
-                        stderr=out,
-                        stdout=out)
-                except Exception:
-                    out.write(header("Error: Could not empty /tmp"))
-
-                # Save results thus far into the latest results directory
+            # Start webapp
+            result = test.start(database_container_id)
+            if result != 0:
+                docker_helper.stop(self.config, database_container_id, test)
                 self.results.write_intermediate(test.name,
                 self.results.write_intermediate(test.name,
-                                                time.strftime(
-                                                    "%Y%m%d%H%M%S",
-                                                    time.localtime()))
+                                                "ERROR: Problem starting")
+                log("ERROR: Problem starting {name}".format(name=test.name),
+                    log_prefix,
+                    benchmark_log)
+                return False
+
+            slept = 0
+            max_sleep = 60
+            while not test.is_running() and slept < max_sleep:
+                if not docker_helper.successfully_running_containers(
+                        test.get_docker_files(), benchmark_log):
+                    docker_helper.stop(self.config, database_container_id,
+                                       test)
+                    log("ERROR: One or more expected docker container exited early",
+                        log_prefix, benchmark_log)
+                    return False
+                time.sleep(1)
+                slept += 1
+
+            # Debug mode blocks execution here until ctrl+c
+            if self.config.mode == "debug":
+                log("Entering debug mode. Server has started. CTRL-c to stop.",
+                    log_prefix, benchmark_log)
+                while True:
+                    time.sleep(1)
 
 
-                # Upload the results thus far to another server (optional)
-                self.results.upload()
+            # Verify URLs
+            log("Verifying framework URLs", log_prefix)
+            passed_verify = test.verify_urls()
 
 
-                if self.config.mode == "verify" and not passed_verify:
-                    print("Failed verify!")
-                    return
-            except KeyboardInterrupt:
-                docker_helper.stop(self.config, database_container_id, test)
-            except (OSError, IOError, subprocess.CalledProcessError) as e:
-                traceback.print_exc()
-                self.results.write_intermediate(
-                    test.name, "error during test setup: " + str(e))
-                out.write(header("Subprocess Error %s" % test.name))
-                traceback.print_exc(file=out)
-                out.flush()
-                out.close()
-                return
-
-            out.close()
+            # Benchmark this test
+            if self.config.mode == "benchmark":
+                header(
+                    message="Benchmarking %s" % test.name,
+                    log_file=benchmark_log)
+                self.__benchmark(test, benchmark_log)
+
+            # Stop this test
+            docker_helper.stop(self.config, database_container_id, test)
+
+            # Remove contents of  /tmp folder
+            try:
+                subprocess.check_call(
+                    'sudo rm -rf /tmp/*',
+                    shell=True,
+                    stderr=benchmark_log,
+                    stdout=benchmark_log)
+            except Exception:
+                header(
+                    message="Error: Could not empty /tmp",
+                    log_file=benchmark_log)
+
+            # Save results thus far into the latest results directory
+            self.results.write_intermediate(test.name,
+                                            time.strftime(
+                                                "%Y%m%d%H%M%S",
+                                                time.localtime()))
+
+            # Upload the results thus far to another server (optional)
+            self.results.upload()
+
+            if self.config.mode == "verify" and not passed_verify:
+                log("Failed verify!", log_prefix, benchmark_log)
+                return False
+        except KeyboardInterrupt:
+            docker_helper.stop(self.config, database_container_id, test)
+        except (OSError, IOError, subprocess.CalledProcessError) as e:
+            tb = traceback.format_exc()
+            self.results.write_intermediate(
+                test.name, "error during test setup: " + str(e))
+            header(
+                message="Subprocess Error %s" % test.name,
+                log_file=benchmark_log)
+            log_error(tb, log_prefix, benchmark_log)
+            return False
+
+        return True
+
+    def __benchmark(self, framework_test, benchmark_log):
+        '''
+        Runs the benchmark for each type of test that it implements
+        '''
+
+        def benchmark_type(test_type):
+            log("BENCHMARKING %s ... " % test_type.upper(), benchmark_log)
+
+            test = framework_test.runTests[test_type]
+            test.setup_out(benchmark_log)
+            raw_file = self.results.get_raw_file(framework_test.name,
+                                                 test_type)
+            if not os.path.exists(raw_file):
+                # Open to create the empty file
+                with open(raw_file, 'w'):
+                    pass
+
+            if not test.failed:
+                remote_script = self.config.types[test_type].get_remote_script(
+                    self.config, test.name, test.get_url(),
+                    framework_test.port)
+
+                # Begin resource usage metrics collection
+                self.__begin_logging(framework_test, test_type)
+
+                # Run the benchmark
+                with open(raw_file, 'w') as raw_file:
+                    p = subprocess.Popen(
+                        self.config.client_ssh_command,
+                        stdin=subprocess.PIPE,
+                        stdout=raw_file,
+                        stderr=raw_file)
+                    p.communicate(remote_script)
+
+                # End resource usage metrics collection
+                self.__end_logging()
+
+            results = self.results.parse_test(framework_test, test_type)
+            log("Benchmark results:", benchmark_log)
+            # TODO move into log somehow
+            pprint(results)
+
+            self.results.report_benchmark_results(framework_test, test_type,
+                                                  results['results'])
+            log("Complete", benchmark_log)
+
+        for test_type in framework_test.runTests:
+            benchmark_type(test_type)
+
+    def __begin_logging(self, framework_test, test_type):
+        '''
+        Starts a thread to monitor the resource usage, to be synced with the 
+        client's time.
+        TODO: MySQL and InnoDB are possible. Figure out how to implement them.
+        '''
+        output_file = "{file_name}".format(
+            file_name=self.results.get_stats_file(framework_test.name,
+                                                  test_type))
+        dstat_string = "dstat -Tafilmprs --aio --fs --ipc --lock --raw --socket --tcp \
+                                      --raw --socket --tcp --udp --unix --vm --disk-util \
+                                      --rpc --rpcd --output {output_file}".format(
+            output_file=output_file)
+        cmd = shlex.split(dstat_string)
+        self.subprocess_handle = subprocess.Popen(
+            cmd, stdout=FNULL, stderr=subprocess.STDOUT)
+
+    def __end_logging(self):
+        '''
+        Stops the logger thread and blocks until shutdown is complete.
+        '''
+        self.subprocess_handle.terminate()
+        self.subprocess_handle.communicate()
 
 
     def __is_port_bound(self, port):
     def __is_port_bound(self, port):
         '''
         '''

+ 14 - 11
toolset/benchmark/fortune_html_parser.py

@@ -1,7 +1,11 @@
 # -*- coding: utf-8
 # -*- coding: utf-8
+import os
+
 from HTMLParser import HTMLParser
 from HTMLParser import HTMLParser
 from difflib import unified_diff
 from difflib import unified_diff
 
 
+from toolset.utils.output_helper import log
+
 
 
 class FortuneHTMLParser(HTMLParser):
 class FortuneHTMLParser(HTMLParser):
 
 
@@ -111,7 +115,7 @@ class FortuneHTMLParser(HTMLParser):
 
 
         # Append a newline after the <table> and <html>
         # Append a newline after the <table> and <html>
         if tag.lower() == 'table' or tag.lower() == 'html':
         if tag.lower() == 'table' or tag.lower() == 'html':
-            self.body.append("\n")
+            self.body.append(os.linesep)
 
 
     def handle_data(self, data):
     def handle_data(self, data):
         '''
         '''
@@ -154,9 +158,9 @@ class FortuneHTMLParser(HTMLParser):
 
 
         # Append a newline after each </tr> and </head>
         # Append a newline after each </tr> and </head>
         if tag.lower() == 'tr' or tag.lower() == 'head':
         if tag.lower() == 'tr' or tag.lower() == 'head':
-            self.body.append("\n")
+            self.body.append(os.linesep)
 
 
-    def isValidFortune(self, out):
+    def isValidFortune(self, name, out):
         '''
         '''
         Returns whether the HTML input parsed by this parser
         Returns whether the HTML input parsed by this parser
         is valid against our known "fortune" spec.
         is valid against our known "fortune" spec.
@@ -167,13 +171,13 @@ class FortuneHTMLParser(HTMLParser):
         same = self.valid_fortune == body
         same = self.valid_fortune == body
         diff_lines = []
         diff_lines = []
         if not same:
         if not same:
-            output = "Oh no! I compared {!s}\n\n\nto.....{!s}\n".format(
-                self.valid_fortune, body)
-            output += "Fortune invalid. Diff following:\n"
+            output = "Oh no! I compared {!s}".format(self.valid_fortune)
+            output += os.linesep + os.linesep + "to" + os.linesep + os.linesep + body + os.linesep
+            output += "Fortune invalid. Diff following:" + os.linesep
             headers_left = 3
             headers_left = 3
             for line in unified_diff(
             for line in unified_diff(
-                    self.valid_fortune.split('\n'),
-                    body.split('\n'),
+                    self.valid_fortune.split(os.linesep),
+                    body.split(os.linesep),
                     fromfile='Valid',
                     fromfile='Valid',
                     tofile='Response',
                     tofile='Response',
                     n=0):
                     n=0):
@@ -181,7 +185,6 @@ class FortuneHTMLParser(HTMLParser):
                 output += line
                 output += line
                 headers_left -= 1
                 headers_left -= 1
                 if headers_left <= 0:
                 if headers_left <= 0:
-                    output += "\n"
-            print(output)
-            out.write(output)
+                    output += os.linesep
+            log(output, "%s: " % name)
         return (same, diff_lines)
         return (same, diff_lines)

+ 34 - 34
toolset/benchmark/framework_test.py

@@ -1,11 +1,9 @@
 import os
 import os
 import subprocess
 import subprocess
-import sys
 import traceback
 import traceback
-import logging
 from requests import ConnectionError
 from requests import ConnectionError
 
 
-from toolset.utils.output_helper import header
+from toolset.utils.output_helper import header, log, FNULL
 from toolset.utils import docker_helper
 from toolset.utils import docker_helper
 
 
 # Cross-platform colored text
 # Cross-platform colored text
@@ -40,9 +38,6 @@ class FrameworkTest:
         self.versus = ""
         self.versus = ""
         self.docker_files = None
         self.docker_files = None
 
 
-        # setup logging
-        logging.basicConfig(stream=sys.stderr, level=logging.INFO)
-
         # Used in setup.sh scripts for consistency with
         # Used in setup.sh scripts for consistency with
         # the bash environment variables
         # the bash environment variables
         self.troot = self.directory
         self.troot = self.directory
@@ -53,18 +48,31 @@ class FrameworkTest:
     # Public Methods
     # Public Methods
     ##########################################################################################
     ##########################################################################################
 
 
-    def start(self, out, database_container_id):
+    def start(self, database_container_id):
         '''
         '''
         Start the test implementation
         Start the test implementation
         '''
         '''
         test_docker_files = self.get_docker_files()
         test_docker_files = self.get_docker_files()
+        test_log_dir = os.path.join(self.results.directory, self.name.lower())
+        build_log_dir = os.path.join(test_log_dir, 'build')
+        run_log_dir = os.path.join(test_log_dir, 'run')
 
 
-        result = docker_helper.build(self.benchmarker_config, [self.name], out)
+        try:
+            os.makedirs(build_log_dir)
+        except OSError:
+            pass
+        try:
+            os.makedirs(run_log_dir)
+        except OSError:
+            pass
+
+        result = docker_helper.build(self.benchmarker_config, [self.name],
+                                     build_log_dir)
         if result != 0:
         if result != 0:
             return result
             return result
 
 
         return docker_helper.run(self.benchmarker_config, test_docker_files,
         return docker_helper.run(self.benchmarker_config, test_docker_files,
-                                 out)
+                                 run_log_dir)
 
 
     def is_running(self):
     def is_running(self):
         '''
         '''
@@ -81,7 +89,6 @@ class FrameworkTest:
                                    self.runTests[test_type].get_url())
                                    self.runTests[test_type].get_url())
 
 
         try:
         try:
-            FNULL = open(os.devnull, 'w')
             subprocess.check_call(
             subprocess.check_call(
                 ['curl', '-sSfl', url], stdout=FNULL, stderr=subprocess.STDOUT)
                 ['curl', '-sSfl', url], stdout=FNULL, stderr=subprocess.STDOUT)
         except:
         except:
@@ -103,17 +110,18 @@ class FrameworkTest:
 
 
         return test_docker_files
         return test_docker_files
 
 
-    def verify_urls(self, logPath):
+    def verify_urls(self):
         '''
         '''
         Verifys each of the URLs for this test. This will simply curl the URL and 
         Verifys each of the URLs for this test. This will simply curl the URL and 
         check for it's return status. For each url, a flag will be set on this 
         check for it's return status. For each url, a flag will be set on this 
         object for whether or not it passed.
         object for whether or not it passed.
         Returns True if all verifications succeeded
         Returns True if all verifications succeeded
         '''
         '''
+        log_path = os.path.join(self.results.directory, self.name.lower())
         result = True
         result = True
 
 
         def verify_type(test_type):
         def verify_type(test_type):
-            verificationPath = os.path.join(logPath, test_type)
+            verificationPath = os.path.join(log_path, test_type)
             try:
             try:
                 os.makedirs(verificationPath)
                 os.makedirs(verificationPath)
             except OSError:
             except OSError:
@@ -121,8 +129,9 @@ class FrameworkTest:
             with open(os.path.join(verificationPath, 'verification.txt'),
             with open(os.path.join(verificationPath, 'verification.txt'),
                       'w') as verification:
                       'w') as verification:
                 test = self.runTests[test_type]
                 test = self.runTests[test_type]
-                test.setup_out(verification)
-                verification.write(header("VERIFYING %s" % test_type.upper()))
+                header(
+                    message="VERIFYING %s" % test_type.upper(),
+                    log_file=verification)
 
 
                 base_url = "http://%s:%s" % (
                 base_url = "http://%s:%s" % (
                     self.benchmarker_config.server_host, self.port)
                     self.benchmarker_config.server_host, self.port)
@@ -151,24 +160,21 @@ class FrameworkTest:
                                 'fail',
                                 'fail',
                                 "Server did not respond to request from client machine.",
                                 "Server did not respond to request from client machine.",
                                 base_url)]
                                 base_url)]
-                            logging.warning(
-                                """This error usually means your server is only accepting
+                            log("""This error usually means your server is only accepting
                 requests from localhost.""")
                 requests from localhost.""")
                 except ConnectionError as e:
                 except ConnectionError as e:
                     results = [('fail', "Server did not respond to request",
                     results = [('fail', "Server did not respond to request",
                                 base_url)]
                                 base_url)]
-                    logging.warning(
-                        "Verifying test %s for %s caused an exception: %s",
-                        test_type, self.name, e)
+                    log("Verifying test %s for %s caused an exception: %s" %
+                        (test_type, self.name, e))
                 except Exception as e:
                 except Exception as e:
                     results = [('fail', """Caused Exception in TFB
                     results = [('fail', """Caused Exception in TFB
             This almost certainly means your return value is incorrect,
             This almost certainly means your return value is incorrect,
             but also that you have found a bug. Please submit an issue
             but also that you have found a bug. Please submit an issue
             including this message: %s\n%s""" % (e, traceback.format_exc()),
             including this message: %s\n%s""" % (e, traceback.format_exc()),
                                 base_url)]
                                 base_url)]
-                    logging.warning(
-                        "Verifying test %s for %s caused an exception: %s",
-                        test_type, self.name, e)
+                    log("Verifying test %s for %s caused an exception: %s" %
+                        (test_type, self.name, e))
                     traceback.format_exc()
                     traceback.format_exc()
 
 
                 test.failed = any(
                 test.failed = any(
@@ -186,19 +192,15 @@ class FrameworkTest:
                     elif result.upper() == "FAIL":
                     elif result.upper() == "FAIL":
                         color = Fore.RED
                         color = Fore.RED
 
 
-                    verification.write((
-                        "   " + color + "%s" + Style.RESET_ALL + " for %s\n") %
-                                       (result.upper(), url))
-                    print("   {!s}{!s}{!s} for {!s}\n".format(
-                        color, result.upper(), Style.RESET_ALL, url))
+                    log("   {!s}{!s}{!s} for {!s}".format(
+                        color, result.upper(), Style.RESET_ALL, url), None,
+                        verification)
                     if reason is not None and len(reason) != 0:
                     if reason is not None and len(reason) != 0:
                         for line in reason.splitlines():
                         for line in reason.splitlines():
-                            verification.write("     " + line + '\n')
-                            print("     " + line)
+                            log("     " + line, None, verification)
                         if not test.passed:
                         if not test.passed:
-                            verification.write(
-                                "     See %s\n" % specific_rules_url)
-                            print("     See {!s}\n".format(specific_rules_url))
+                            log("     See {!s}".format(specific_rules_url),
+                                None, verification)
 
 
                 [output_result(r1, r2, url) for (r1, r2, url) in results]
                 [output_result(r1, r2, url) for (r1, r2, url) in results]
 
 
@@ -212,8 +214,6 @@ class FrameworkTest:
                     raise Exception(
                     raise Exception(
                         "Unknown error - test did not pass,warn,or fail")
                         "Unknown error - test did not pass,warn,or fail")
 
 
-                verification.flush()
-
         result = True
         result = True
         for test_type in self.runTests:
         for test_type in self.runTests:
             verify_type(test_type)
             verify_type(test_type)

+ 1 - 1
toolset/benchmark/test_types/fortune_type.py

@@ -35,7 +35,7 @@ class FortuneTestType(FrameworkTestType):
 
 
         parser = FortuneHTMLParser()
         parser = FortuneHTMLParser()
         parser.feed(body)
         parser.feed(body)
-        (valid, diff) = parser.isValidFortune(self.out)
+        (valid, diff) = parser.isValidFortune(self.name, self.out)
 
 
         if valid:
         if valid:
             problems += verify_headers(headers, url, should_be='html')
             problems += verify_headers(headers, url, should_be='html')

+ 18 - 18
toolset/benchmark/test_types/framework_test_type.py

@@ -5,11 +5,9 @@ import requests
 import MySQLdb
 import MySQLdb
 import psycopg2
 import psycopg2
 import pymongo
 import pymongo
+import traceback
 
 
-# Requests is built ontop of urllib3,
-# here we prevent general request logging
-import logging
-logging.getLogger('urllib3').setLevel(logging.CRITICAL)
+from toolset.utils.output_helper import log, log_error
 
 
 
 
 class FrameworkTestType:
 class FrameworkTestType:
@@ -87,18 +85,17 @@ class FrameworkTestType:
         Downloads a URL and returns the HTTP response headers
         Downloads a URL and returns the HTTP response headers
         and body content as a tuple
         and body content as a tuple
         '''
         '''
-        print("Accessing URL {!s}:".format(url))
-        self.out.write("Accessing URL %s \n" % url)
+        log("Accessing URL {!s}:".format(url))
 
 
         headers = {'Accept': self.accept_header}
         headers = {'Accept': self.accept_header}
         r = requests.get(url, timeout=15, headers=headers)
         r = requests.get(url, timeout=15, headers=headers)
 
 
         headers = r.headers
         headers = r.headers
         body = r.content
         body = r.content
-        self.out.write(str(headers))
-        self.out.write(body)
+        log(str(headers))
+        log(body)
         b = 40
         b = 40
-        print("  Response (trimmed to {:d} bytes): \"{!s}\"".format(
+        log("  Response (trimmed to {:d} bytes): \"{!s}\"".format(
             b,
             b,
             body.strip()[:b]))
             body.strip()[:b]))
         return headers, body
         return headers, body
@@ -168,9 +165,10 @@ class FrameworkTestType:
                 results = cursor.fetchall()
                 results = cursor.fetchall()
                 results_json.append(json.loads(json.dumps(dict(results))))
                 results_json.append(json.loads(json.dumps(dict(results))))
                 db.close()
                 db.close()
-            except Exception as e:
-                print("ERROR: Unable to load current MySQL World table.")
-                print(e)
+            except Exception:
+                tb = traceback.format_exc()
+                log("ERROR: Unable to load current MySQL World table.")
+                log_error(tb)
         elif database_name == "postgres":
         elif database_name == "postgres":
             try:
             try:
                 db = psycopg2.connect(
                 db = psycopg2.connect(
@@ -188,9 +186,10 @@ class FrameworkTestType:
                 results = cursor.fetchall()
                 results = cursor.fetchall()
                 results_json.append(json.loads(json.dumps(dict(results))))
                 results_json.append(json.loads(json.dumps(dict(results))))
                 db.close()
                 db.close()
-            except Exception as e:
-                print("ERROR: Unable to load current Postgres World table.")
-                print(e)
+            except Exception:
+                tb = traceback.format_exc()
+                log("ERROR: Unable to load current Postgres World table.")
+                log_error(tb)
         elif database_name == "mongodb":
         elif database_name == "mongodb":
             try:
             try:
                 worlds_json = {}
                 worlds_json = {}
@@ -207,9 +206,10 @@ class FrameworkTestType:
                                 world["randomNumber"])
                                 world["randomNumber"])
                 results_json.append(worlds_json)
                 results_json.append(worlds_json)
                 connection.close()
                 connection.close()
-            except Exception as e:
-                print("ERROR: Unable to load current MongoDB World table.")
-                print(e)
+            except Exception:
+                tb = traceback.format_exc()
+                log("ERROR: Unable to load current MongoDB World table.")
+                log_error(tb)
         else:
         else:
             raise ValueError(
             raise ValueError(
                 "Database: {!s} does not exist".format(database_name))
                 "Database: {!s} does not exist".format(database_name))

+ 6 - 2
toolset/benchmark/test_types/verifications.py

@@ -1,5 +1,8 @@
 import json
 import json
 import re
 import re
+import traceback
+
+from toolset.utils.output_helper import log_error
 
 
 
 
 def basic_body_verification(body, url, is_json_check=True):
 def basic_body_verification(body, url, is_json_check=True):
@@ -275,8 +278,9 @@ def verify_updates(old_worlds, new_worlds, updates_expected, url):
                 if entry_id in old_worlds[n] and entry_id in new_worlds[n]:
                 if entry_id in old_worlds[n] and entry_id in new_worlds[n]:
                     if old_worlds[n][entry_id] != new_worlds[n][entry_id]:
                     if old_worlds[n][entry_id] != new_worlds[n][entry_id]:
                         successful_updates += 1
                         successful_updates += 1
-            except Exception as e:
-                print(e)
+            except Exception:
+                tb = traceback.format_exc()
+                log_error(tb)
         n += 1
         n += 1
 
 
     if successful_updates == 0:
     if successful_updates == 0:

+ 2 - 2
toolset/continuous/tasks/run-benchmarks.sh

@@ -5,6 +5,6 @@
 echo Change to benchmark root
 echo Change to benchmark root
 cd $TFB_REPOPARENT/$TFB_REPONAME
 cd $TFB_REPOPARENT/$TFB_REPONAME
 echo Cleaning
 echo Cleaning
-PYTHONPATH=$TFB_REPOPARENT/$TFB_REPONAME python toolset/run-tests.py --clean
+FWROOT=$TFB_REPOPARENT/$TFB_REPONAME PYTHONPATH=$TFB_REPOPARENT/$TFB_REPONAME python toolset/run-tests.py --clean
 echo Running tests
 echo Running tests
-PYTHONPATH=$TFB_REPOPARENT/$TFB_REPONAME python toolset/run-tests.py
+FWROOT=$TFB_REPOPARENT/$TFB_REPONAME PYTHONPATH=$TFB_REPOPARENT/$TFB_REPONAME python toolset/run-tests.py

+ 1 - 1
toolset/continuous/tasks/run-tasks.sh

@@ -20,4 +20,4 @@ $TFB_REPOPARENT/$TFB_REPONAME/toolset/continuous/tasks/run-benchmarks.sh
 #  Tasks after the run   #
 #  Tasks after the run   #
 ##########################
 ##########################
 
 
-PYTHONPATH=$TFB_REPOPARENT/$TFB_REPONAME python $TFB_REPOPARENT/$TFB_REPONAME/toolset/continuous/tasks/keep-logs.py
+FWROOT=$TFB_REPOPARENT/$TFB_REPONAME PYTHONPATH=$TFB_REPOPARENT/$TFB_REPONAME python $TFB_REPOPARENT/$TFB_REPONAME/toolset/continuous/tasks/keep-logs.py

+ 12 - 18
toolset/run-tests.py

@@ -7,8 +7,6 @@ import platform
 import multiprocessing
 import multiprocessing
 import signal
 import signal
 from toolset.benchmark.benchmarker import Benchmarker
 from toolset.benchmark.benchmarker import Benchmarker
-from toolset.utils import setup_util
-from toolset.utils.unbuffered import Unbuffered
 from toolset.utils.scaffolding import Scaffolding
 from toolset.utils.scaffolding import Scaffolding
 from toolset.utils.initializer import initialize
 from toolset.utils.initializer import initialize
 from toolset.utils import cleaner
 from toolset.utils import cleaner
@@ -16,6 +14,7 @@ from toolset.utils.results_helper import Results
 from toolset.utils.benchmark_config import BenchmarkConfig
 from toolset.utils.benchmark_config import BenchmarkConfig
 from toolset.utils import docker_helper
 from toolset.utils import docker_helper
 from toolset.utils.metadata_helper import gather_tests
 from toolset.utils.metadata_helper import gather_tests
+from toolset.utils.output_helper import log
 from ast import literal_eval
 from ast import literal_eval
 
 
 # Enable cross-platform colored output
 # Enable cross-platform colored output
@@ -42,8 +41,8 @@ class StoreSeqAction(argparse.Action):
             try:
             try:
                 (start, step, end) = sequence.split(':')
                 (start, step, end) = sequence.split(':')
             except ValueError:
             except ValueError:
-                print("  Invalid: {!s}".format(sequence))
-                print("  Requires start:step:end, e.g. 1:2:10")
+                log("  Invalid: {!s}".format(sequence))
+                log("  Requires start:step:end, e.g. 1:2:10")
                 raise
                 raise
             result.remove(sequence)
             result.remove(sequence)
             result = result + range(int(start), int(end), int(step))
             result = result + range(int(start), int(end), int(step))
@@ -77,12 +76,6 @@ def main(argv=None):
     if argv is None:
     if argv is None:
         argv = sys.argv
         argv = sys.argv
 
 
-    # Enable unbuffered output so messages will appear in the proper order with subprocess output.
-    sys.stdout = Unbuffered(sys.stdout)
-
-    # Update environment for shell scripts
-    os.environ['FWROOT'] = setup_util.get_fwroot()
-    os.environ['IROOT'] = os.environ['FWROOT'] + '/installs'
     # 'Ubuntu', '14.04', 'trusty' respectively
     # 'Ubuntu', '14.04', 'trusty' respectively
     os.environ['TFB_DISTRIB_ID'], os.environ[
     os.environ['TFB_DISTRIB_ID'], os.environ[
         'TFB_DISTRIB_RELEASE'], os.environ[
         'TFB_DISTRIB_RELEASE'], os.environ[
@@ -110,7 +103,7 @@ def main(argv=None):
                     os.environ['FWROOT'],
                     os.environ['FWROOT'],
                     args.conf_file)) and not os.path.exists(
                     args.conf_file)) and not os.path.exists(
                         os.path.join(os.environ['FWROOT'] + 'benchmark.cfg')):
                         os.path.join(os.environ['FWROOT'] + 'benchmark.cfg')):
-            print("No config file found. Aborting!")
+            log("No config file found. Aborting!")
             exit(1)
             exit(1)
         with open(os.path.join(os.environ['FWROOT'], args.conf_file)):
         with open(os.path.join(os.environ['FWROOT'], args.conf_file)):
             config = ConfigParser.SafeConfigParser()
             config = ConfigParser.SafeConfigParser()
@@ -123,7 +116,7 @@ def main(argv=None):
                 except Exception:
                 except Exception:
                     pass
                     pass
     except IOError:
     except IOError:
-        print("Configuration file not found!")
+        log("Configuration file not found!")
         exit(1)
         exit(1)
 
 
     ##########################################################
     ##########################################################
@@ -132,9 +125,9 @@ def main(argv=None):
 
 
     # Verify and massage options
     # Verify and massage options
     if defaults['client_user'] is None or defaults['client_host'] is None:
     if defaults['client_user'] is None or defaults['client_host'] is None:
-        print("client_user and client_host are required!")
-        print("Please check your configuration file.")
-        print("Aborting!")
+        log("client_user and client_host are required!")
+        log("Please check your configuration file.")
+        log("Aborting!")
         exit(1)
         exit(1)
 
 
     if defaults['database_user'] is None:
     if defaults['database_user'] is None:
@@ -283,7 +276,7 @@ def main(argv=None):
         initialize(config)
         initialize(config)
 
 
     elif config.build:
     elif config.build:
-        docker_helper.build(config, config.build, None)
+        docker_helper.build(config, config.build)
 
 
     elif config.clean:
     elif config.clean:
         cleaner.clean(results)
         cleaner.clean(results)
@@ -293,7 +286,7 @@ def main(argv=None):
         all_tests = gather_tests(benchmarker_config=config)
         all_tests = gather_tests(benchmarker_config=config)
 
 
         for test in all_tests:
         for test in all_tests:
-            print(test.name)
+            log(test.name)
 
 
     elif config.parse != None:
     elif config.parse != None:
         # TODO: broken
         # TODO: broken
@@ -306,7 +299,8 @@ def main(argv=None):
 
 
     else:
     else:
         benchmarker = Benchmarker(config, results)
         benchmarker = Benchmarker(config, results)
-        benchmarker.run()
+        if not benchmarker.run():
+            return 1
 
 
     return 0
     return 0
 
 

+ 2 - 2
toolset/setup/linux/prerequisites.sh

@@ -57,9 +57,9 @@ sudo sh -c "echo '*            soft    rtprio             99' >> /etc/security/l
 
 
 # Create a tfb command alias for running the toolset
 # Create a tfb command alias for running the toolset
 # For now, this still ensures you have to be in the framework root to run it
 # For now, this still ensures you have to be in the framework root to run it
-export PWD=$(pwd)
+export FWROOT=$(pwd)
 sudo tee /usr/local/bin/tfb <<EOF
 sudo tee /usr/local/bin/tfb <<EOF
 #!/bin/bash
 #!/bin/bash
-PYTHONPATH=$PWD python $PWD/toolset/run-tests.py "\$@"
+FWROOT=$FWROOT PYTHONPATH=$FWROOT python $PWD/toolset/run-tests.py "\$@"
 EOF
 EOF
 sudo chmod a+x /usr/local/bin/tfb
 sudo chmod a+x /usr/local/bin/tfb

+ 2 - 6
toolset/utils/benchmark_config.py

@@ -1,8 +1,7 @@
-from toolset.utils import setup_util
 from toolset.benchmark.test_types import *
 from toolset.benchmark.test_types import *
 from toolset.utils.output_helper import QuietOutputStream
 from toolset.utils.output_helper import QuietOutputStream
 
 
-import logging
+import os
 import time
 import time
 
 
 
 
@@ -55,9 +54,6 @@ class BenchmarkConfig:
 
 
         self.start_time = time.time()
         self.start_time = time.time()
 
 
-        # setup logging
-        logging.basicConfig(stream=self.quiet_out, level=logging.INFO)
-
         # setup some additional variables
         # setup some additional variables
         if self.database_user == None: self.database_user = self.client_user
         if self.database_user == None: self.database_user = self.client_user
         if self.database_host == None: self.database_host = self.client_host
         if self.database_host == None: self.database_host = self.client_host
@@ -65,7 +61,7 @@ class BenchmarkConfig:
             self.database_identity_file = self.client_identity_file
             self.database_identity_file = self.client_identity_file
 
 
         # Remember root directory
         # Remember root directory
-        self.fwroot = setup_util.get_fwroot()
+        self.fwroot = os.getenv('FWROOT')
 
 
         if hasattr(self, 'parse') and self.parse != None:
         if hasattr(self, 'parse') and self.parse != None:
             self.timestamp = self.parse
             self.timestamp = self.parse

+ 88 - 72
toolset/utils/docker_helper.py

@@ -6,11 +6,10 @@ import multiprocessing
 import json
 import json
 import docker
 import docker
 import time
 import time
-
+import traceback
 from threading import Thread
 from threading import Thread
 
 
-from toolset.utils import setup_util
-from toolset.utils.output_helper import tee_output
+from toolset.utils.output_helper import log, log_error, FNULL
 from toolset.utils.metadata_helper import gather_tests
 from toolset.utils.metadata_helper import gather_tests
 from toolset.utils.ordered_set import OrderedSet
 from toolset.utils.ordered_set import OrderedSet
 from toolset.utils.database_helper import test_database
 from toolset.utils.database_helper import test_database
@@ -49,7 +48,7 @@ def clean(config):
     subprocess.check_call(command)
     subprocess.check_call(command)
 
 
 
 
-def build(benchmarker_config, test_names, out):
+def build(benchmarker_config, test_names, build_log_dir=os.devnull):
     '''
     '''
     Builds the dependency chain as well as the test implementation docker images
     Builds the dependency chain as well as the test implementation docker images
     for the given tests.
     for the given tests.
@@ -57,6 +56,8 @@ def build(benchmarker_config, test_names, out):
     tests = gather_tests(test_names)
     tests = gather_tests(test_names)
 
 
     for test in tests:
     for test in tests:
+        log_prefix = "%s: " % test.name
+
         docker_buildargs = {
         docker_buildargs = {
             'CPU_COUNT': str(multiprocessing.cpu_count()),
             'CPU_COUNT': str(multiprocessing.cpu_count()),
             'MAX_CONCURRENCY': str(max(benchmarker_config.concurrency_levels)),
             'MAX_CONCURRENCY': str(max(benchmarker_config.concurrency_levels)),
@@ -78,84 +79,93 @@ def build(benchmarker_config, test_names, out):
                         __gather_dependencies(
                         __gather_dependencies(
                             os.path.join(test.directory, test_docker_file)))))
                             os.path.join(test.directory, test_docker_file)))))
 
 
-            docker_dir = os.path.join(setup_util.get_fwroot(), "toolset",
-                                      "setup", "docker")
+            docker_dir = os.path.join(
+                os.getenv('FWROOT'), "toolset", "setup", "docker")
             for dependency in deps:
             for dependency in deps:
-                docker_file = os.path.join(test.directory,
+                build_log_file = build_log_dir
+                if build_log_dir is not os.devnull:
+                    build_log_file = os.path.join(
+                        build_log_dir, "%s.log" % dependency.lower())
+                with open(build_log_file, 'w') as build_log:
+                    docker_file = os.path.join(test.directory,
+                                               dependency + ".dockerfile")
+                    if not docker_file or not os.path.exists(docker_file):
+                        docker_file = find(docker_dir,
                                            dependency + ".dockerfile")
                                            dependency + ".dockerfile")
-                if not docker_file or not os.path.exists(docker_file):
-                    docker_file = find(docker_dir, dependency + ".dockerfile")
-                if not docker_file:
-                    tee_output(
-                        out,
-                        "Docker build failed; %s could not be found; terminating\n"
-                        % (dependency + ".dockerfile"))
-                    return 1
+                    if not docker_file:
+                        log("Docker build failed; %s could not be found; terminating"
+                            % (dependency + ".dockerfile"), log_prefix,
+                            build_log)
+                        return 1
+
+                    # Build the dependency image
+                    try:
+                        for line in docker.APIClient(
+                                base_url='unix://var/run/docker.sock').build(
+                                    path=os.path.dirname(docker_file),
+                                    dockerfile="%s.dockerfile" % dependency,
+                                    tag="tfb/%s" % dependency,
+                                    buildargs=docker_buildargs,
+                                    forcerm=True):
+                            if line.startswith('{"stream":'):
+                                line = json.loads(line)
+                                line = line[line.keys()[0]].encode('utf-8')
+                                log(line, log_prefix, build_log)
+                    except Exception:
+                        tb = traceback.format_exc()
+                        log("Docker dependency build failed; terminating",
+                            log_prefix, build_log)
+                        log_error(tb, log_prefix, build_log)
+                        return 1
 
 
-                # Build the dependency image
+        # Build the test images
+        for test_docker_file in test_docker_files:
+            build_log_file = build_log_dir
+            if build_log_dir is not os.devnull:
+                build_log_file = os.path.join(
+                    build_log_dir, "%s.log" % test_docker_file.replace(
+                        ".dockerfile", "").lower())
+            with open(build_log_file, 'w') as build_log:
                 try:
                 try:
                     for line in docker.APIClient(
                     for line in docker.APIClient(
                             base_url='unix://var/run/docker.sock').build(
                             base_url='unix://var/run/docker.sock').build(
-                                path=os.path.dirname(docker_file),
-                                dockerfile="%s.dockerfile" % dependency,
-                                tag="tfb/%s" % dependency,
+                                path=test.directory,
+                                dockerfile=test_docker_file,
+                                tag="tfb/test/%s" % test_docker_file.replace(
+                                    ".dockerfile", ""),
                                 buildargs=docker_buildargs,
                                 buildargs=docker_buildargs,
                                 forcerm=True):
                                 forcerm=True):
-                        prev_line = os.linesep
                         if line.startswith('{"stream":'):
                         if line.startswith('{"stream":'):
                             line = json.loads(line)
                             line = json.loads(line)
                             line = line[line.keys()[0]].encode('utf-8')
                             line = line[line.keys()[0]].encode('utf-8')
-                            if prev_line.endswith(os.linesep):
-                                tee_output(out, line)
-                            else:
-                                tee_output(out, line)
-                            prev_line = line
-                except Exception as e:
-                    tee_output(out,
-                               "Docker dependency build failed; terminating\n")
-                    print(e)
+                            log(line, log_prefix, build_log)
+                except Exception:
+                    tb = traceback.format_exc()
+                    log("Docker build failed; terminating", log_prefix,
+                        build_log)
+                    log_error(tb, log_prefix, build_log)
                     return 1
                     return 1
 
 
-        # Build the test images
-        for test_docker_file in test_docker_files:
-            try:
-                for line in docker.APIClient(
-                        base_url='unix://var/run/docker.sock').build(
-                            path=test.directory,
-                            dockerfile=test_docker_file,
-                            tag="tfb/test/%s" % test_docker_file.replace(
-                                ".dockerfile", ""),
-                            buildargs=docker_buildargs,
-                            forcerm=True):
-                    prev_line = os.linesep
-                    if line.startswith('{"stream":'):
-                        line = json.loads(line)
-                        line = line[line.keys()[0]].encode('utf-8')
-                        if prev_line.endswith(os.linesep):
-                            tee_output(out, line)
-                        else:
-                            tee_output(out, line)
-                        prev_line = line
-            except Exception as e:
-                tee_output(out, "Docker build failed; terminating\n")
-                print(e)
-                return 1
-
     return 0
     return 0
 
 
 
 
-def run(benchmarker_config, docker_files, out):
+def run(benchmarker_config, docker_files, run_log_dir):
     '''
     '''
     Run the given Docker container(s)
     Run the given Docker container(s)
     '''
     '''
     client = docker.from_env()
     client = docker.from_env()
 
 
     for docker_file in docker_files:
     for docker_file in docker_files:
+        log_prefix = "%s: " % docker_file.replace(".dockerfile", "")
         try:
         try:
 
 
-            def watch_container(container):
-                for line in container.logs(stream=True):
-                    tee_output(out, line)
+            def watch_container(container, docker_file):
+                with open(
+                        os.path.join(
+                            run_log_dir, "%s.log" % docker_file.replace(
+                                ".dockerfile", "").lower()), 'w') as run_log:
+                    for line in container.logs(stream=True):
+                        log(line, log_prefix, run_log)
 
 
             extra_hosts = {
             extra_hosts = {
                 socket.gethostname(): str(benchmarker_config.server_host),
                 socket.gethostname(): str(benchmarker_config.server_host),
@@ -173,15 +183,23 @@ def run(benchmarker_config, docker_files, out):
                 init=True,
                 init=True,
                 extra_hosts=extra_hosts)
                 extra_hosts=extra_hosts)
 
 
-            watch_thread = Thread(target=watch_container, args=(container, ))
+            watch_thread = Thread(
+                target=watch_container, args=(
+                    container,
+                    docker_file,
+                ))
             watch_thread.daemon = True
             watch_thread.daemon = True
             watch_thread.start()
             watch_thread.start()
 
 
-        except Exception as e:
-            tee_output(out,
-                       "Running docker cointainer: %s failed" % docker_file)
-            print(e)
-            return 1
+        except Exception:
+            with open(
+                    os.path.join(run_log_dir, "%s.log" % docker_file.replace(
+                        ".dockerfile", "").lower()), 'w') as run_log:
+                tb = traceback.format_exc()
+                log("Running docker cointainer: %s failed" % docker_file,
+                    log_prefix, run_log)
+                log_error(tb, log_prefix, run_log)
+                return 1
 
 
     return 0
     return 0
 
 
@@ -205,9 +223,9 @@ def successfully_running_containers(docker_files, out):
 
 
     for image_name in expected_running_container_images:
     for image_name in expected_running_container_images:
         if image_name not in running_container_images:
         if image_name not in running_container_images:
-            tee_output(out,
-                       "ERROR: Expected tfb/test/%s to be running container" %
-                       image_name)
+            log_prefix = "%s: " % image_name
+            log("ERROR: Expected tfb/test/%s to be running container" %
+                image_name, log_prefix, out)
             return False
             return False
     return True
     return True
 
 
@@ -231,7 +249,7 @@ def stop(config=None, database_container_id=None, test=None):
     if database_container_id:
     if database_container_id:
         command = list(config.database_ssh_command)
         command = list(config.database_ssh_command)
         command.extend(['docker', 'stop', database_container_id])
         command.extend(['docker', 'stop', database_container_id])
-        subprocess.check_call(command)
+        subprocess.check_call(command, stdout=FNULL, stderr=subprocess.STDOUT)
     client.images.prune()
     client.images.prune()
     client.containers.prune()
     client.containers.prune()
     client.networks.prune()
     client.networks.prune()
@@ -318,12 +336,10 @@ def __gather_dependencies(docker_file):
     '''
     '''
     Gathers all the known docker dependencies for the given docker image.
     Gathers all the known docker dependencies for the given docker image.
     '''
     '''
-    # Avoid setting up a circular import
-    from toolset.utils import setup_util
     deps = []
     deps = []
 
 
-    docker_dir = os.path.join(setup_util.get_fwroot(), "toolset", "setup",
-                              "docker")
+    docker_dir = os.path.join(
+        os.getenv('FWROOT'), "toolset", "setup", "docker")
 
 
     if os.path.exists(docker_file):
     if os.path.exists(docker_file):
         with open(docker_file) as fp:
         with open(docker_file) as fp:

+ 8 - 9
toolset/utils/initializer.py

@@ -1,11 +1,10 @@
 import subprocess, os
 import subprocess, os
-from toolset.utils import setup_util
 
 
-DEVNULL = open(os.devnull, 'w')
+from toolset.utils.output_helper import FNULL
 
 
 
 
 def initialize(args):
 def initialize(args):
-    fwroot = setup_util.get_fwroot()
+    fwroot = os.getenv('FWROOT')
     dbuser = args.database_user
     dbuser = args.database_user
     dbhost = args.database_host
     dbhost = args.database_host
     dbiden = args.database_identity_file
     dbiden = args.database_identity_file
@@ -71,8 +70,8 @@ def __check_connection(user, host, identity_file, app_host):
         p = subprocess.Popen(
         p = subprocess.Popen(
             __ssh_string(user, host, identity_file),
             __ssh_string(user, host, identity_file),
             stdin=subprocess.PIPE,
             stdin=subprocess.PIPE,
-            stdout=DEVNULL,
-            stderr=DEVNULL)
+            stdout=FNULL,
+            stderr=subprocess.STDOUT)
         p.communicate("ssh -T -o StrictHostKeyChecking=no %s" % app_host)
         p.communicate("ssh -T -o StrictHostKeyChecking=no %s" % app_host)
         if p.returncode:
         if p.returncode:
             client_conn = False
             client_conn = False
@@ -96,8 +95,8 @@ def __init_client(fwroot, user, host, identity_file, quiet):
             p = subprocess.Popen(
             p = subprocess.Popen(
                 __ssh_string(user, host, identity_file),
                 __ssh_string(user, host, identity_file),
                 stdin=subprocess.PIPE,
                 stdin=subprocess.PIPE,
-                stdout=DEVNULL,
-                stderr=DEVNULL)
+                stdout=FNULL,
+                stderr=subprocess.STDOUT)
         else:
         else:
             p = subprocess.Popen(
             p = subprocess.Popen(
                 __ssh_string(user, host, identity_file), stdin=subprocess.PIPE)
                 __ssh_string(user, host, identity_file), stdin=subprocess.PIPE)
@@ -120,8 +119,8 @@ def __init_database(fwroot, user, host, identity_file, quiet):
             p = subprocess.Popen(
             p = subprocess.Popen(
                 __ssh_string(user, host, identity_file),
                 __ssh_string(user, host, identity_file),
                 stdin=subprocess.PIPE,
                 stdin=subprocess.PIPE,
-                stdout=DEVNULL,
-                stderr=DEVNULL)
+                stdout=FNULL,
+                stderr=subprocess.STDOUT)
         else:
         else:
             p = subprocess.Popen(
             p = subprocess.Popen(
                 __ssh_string(user, host, identity_file), stdin=subprocess.PIPE)
                 __ssh_string(user, host, identity_file), stdin=subprocess.PIPE)

+ 7 - 10
toolset/utils/metadata_helper.py

@@ -2,21 +2,20 @@ import ConfigParser
 import os
 import os
 import glob
 import glob
 import json
 import json
-import logging
 
 
 from ast import literal_eval
 from ast import literal_eval
 from collections import OrderedDict
 from collections import OrderedDict
 
 
+from toolset.utils.output_helper import log
+
 
 
 def gather_langauges():
 def gather_langauges():
     '''
     '''
     Gathers all the known languages in the suite via the folder names
     Gathers all the known languages in the suite via the folder names
     beneath FWROOT.
     beneath FWROOT.
     '''
     '''
-    # Avoid setting up a circular import
-    from toolset.utils import setup_util
 
 
-    lang_dir = os.path.join(setup_util.get_fwroot(), "frameworks")
+    lang_dir = os.path.join(os.getenv('FWROOT'), "frameworks")
     langs = []
     langs = []
     for dir in glob.glob(os.path.join(lang_dir, "*")):
     for dir in glob.glob(os.path.join(lang_dir, "*")):
         langs.append(dir.replace(lang_dir, "")[1:])
         langs.append(dir.replace(lang_dir, "")[1:])
@@ -40,7 +39,6 @@ def gather_tests(include=[], exclude=[], benchmarker_config=None,
     '''
     '''
     # Avoid setting up a circular import
     # Avoid setting up a circular import
     from toolset.utils.benchmark_config import BenchmarkConfig
     from toolset.utils.benchmark_config import BenchmarkConfig
-    from toolset.utils import setup_util
 
 
     # Help callers out a bit
     # Help callers out a bit
     if include is None:
     if include is None:
@@ -57,7 +55,7 @@ def gather_tests(include=[], exclude=[], benchmarker_config=None,
 
 
     # Setup default BenchmarkerConfig using example configuration
     # Setup default BenchmarkerConfig using example configuration
     if benchmarker_config is None:
     if benchmarker_config is None:
-        default_config = setup_util.get_fwroot() + "/benchmark.cfg"
+        default_config = os.getenv('FWROOT') + "/benchmark.cfg"
         config = ConfigParser.SafeConfigParser()
         config = ConfigParser.SafeConfigParser()
         config.readfp(open(default_config))
         config.readfp(open(default_config))
         defaults = dict(config.items("Defaults"))
         defaults = dict(config.items("Defaults"))
@@ -196,9 +194,8 @@ def parse_config(config, directory, benchmarker_config, results):
 
 
         tests_to_run = [name for (name, keys) in test.iteritems()]
         tests_to_run = [name for (name, keys) in test.iteritems()]
         if "default" not in tests_to_run:
         if "default" not in tests_to_run:
-            logging.warn(
-                "Framework %s does not define a default test in benchmark_config.json",
-                config['framework'])
+            log("Framework %s does not define a default test in benchmark_config.json"
+                % config['framework'])
 
 
         # Check that each test configuration is acceptable
         # Check that each test configuration is acceptable
         # Throw exceptions if a field is missing, or how to improve the field
         # Throw exceptions if a field is missing, or how to improve the field
@@ -217,7 +214,7 @@ def parse_config(config, directory, benchmarker_config, results):
                     # This is quite common - most tests don't support all types
                     # This is quite common - most tests don't support all types
                     # Quitely log it and move on (debug logging is on in travis and this causes
                     # Quitely log it and move on (debug logging is on in travis and this causes
                     # ~1500 lines of debug, so I'm totally ignoring it for now
                     # ~1500 lines of debug, so I'm totally ignoring it for now
-                    # logging.debug("Missing arguments for test type %s for framework test %s", type_name, test_name)
+                    # log("Missing arguments for test type %s for framework test %s" % (type_name, test_name))
                     pass
                     pass
 
 
             # We need to sort by test_type to run
             # We need to sort by test_type to run

+ 68 - 18
toolset/utils/output_helper.py

@@ -1,45 +1,95 @@
-import os, sys
+# -*- coding: utf-8 -*-
+import os, sys, re
 from contextlib import contextmanager
 from contextlib import contextmanager
 
 
+# RegExp for stripping color codes
+seq = re.compile(r'\x1B\[\d+m')
 
 
-def header(message, top='-', bottom='-'):
+FNULL = open(os.devnull, 'w')
+
+
+def header(message,
+           top='-',
+           bottom='-',
+           log_file=None,
+           quiet=False,
+           color=None):
     '''
     '''
-    Generates a clean header
+    Generates a clean header with the given message and top/bottom text breaks.
+    Optionally, a log file can be provided to log this header to, as well.
     '''
     '''
     topheader = (top * 80)[:80]
     topheader = (top * 80)[:80]
     bottomheader = (bottom * 80)[:80]
     bottomheader = (bottom * 80)[:80]
     result = ""
     result = ""
     if topheader != "":
     if topheader != "":
+        if color is not None:
+            result += color
         result += "%s" % topheader
         result += "%s" % topheader
     if message != "":
     if message != "":
+        if color is not None:
+            result += color
+
         if result == "":
         if result == "":
             result = "  %s" % message
             result = "  %s" % message
         else:
         else:
             result += "%s  %s" % (os.linesep, message)
             result += "%s  %s" % (os.linesep, message)
     if bottomheader != "":
     if bottomheader != "":
-        if result == "":
-            result = "%s" % bottomheader
-        else:
-            result += "%s%s" % (os.linesep, bottomheader)
-    return result + os.linesep
+        if color is not None:
+            result += color
+        result += "%s%s" % (os.linesep, bottomheader)
+    log(result + os.linesep, None, log_file, True, quiet)
 
 
 
 
-def tee_output(out, line):
+def log(log_text=None,
+        prefix=None,
+        log_file=None,
+        allow_newlines=False,
+        quiet=False):
     '''
     '''
-    Writes to bouth stdout and the provided out file
+    Logs the given text and optional prefix to stdout (if quiet is False) and
+    to an optional log file. By default, we strip out newlines in order to 
+    print our lines correctly, but you can override this functionality if you
+    want to print multi-line output.
     '''
     '''
-    sys.stdout.write(line)
-    sys.stdout.flush()
+    if not log_text:
+        return
+
+    if not allow_newlines:
+        log_text = log_text.splitlines()[0] + os.linesep
+
+        if log_text.splitlines()[0].strip() is '':
+            return
+
+    try:
+        if not quiet:
+            if prefix is not None:
+                sys.stdout.write(prefix + log_text)
+            else:
+                sys.stdout.write(log_text)
+            sys.stdout.flush()
 
 
-    if out is not None:
-        out.write(line)
-        out.flush()
+        if log_file is not None:
+            log_file.write(seq.sub('', log_text))
+            log_file.flush()
+    except:
+        pass
+
+
+def log_error(exception=None, prefix=None, log_file=None, quiet=False):
+    '''
+    Logs the given exception
+    '''
+    log(exception + os.linesep, prefix, log_file, True, quiet)
 
 
 
 
 class QuietOutputStream:
 class QuietOutputStream:
+    '''
+    Provides an output stream which either writes to stdout or nothing 
+    depending on the is_quiet param.
+    '''
+
     def __init__(self, is_quiet):
     def __init__(self, is_quiet):
         self.is_quiet = is_quiet
         self.is_quiet = is_quiet
-        self.null_out = open(os.devnull, 'w')
 
 
     def fileno(self):
     def fileno(self):
         with self.enable():
         with self.enable():
@@ -55,8 +105,8 @@ class QuietOutputStream:
             old_out = sys.stdout
             old_out = sys.stdout
             old_err = sys.stderr
             old_err = sys.stderr
             try:
             try:
-                sys.stdout = self.null_out
-                sys.stderr = self.null_out
+                sys.stdout = FNULL
+                sys.stderr = FNULL
                 yield
                 yield
             finally:
             finally:
                 sys.stdout = old_out
                 sys.stdout = old_out

+ 24 - 36
toolset/utils/results_helper.py

@@ -1,5 +1,5 @@
 from toolset.utils.metadata_helper import gather_remaining_tests, gather_frameworks
 from toolset.utils.metadata_helper import gather_remaining_tests, gather_frameworks
-from toolset.utils.output_helper import header
+from toolset.utils.output_helper import header, log, FNULL
 
 
 import os
 import os
 import logging
 import logging
@@ -37,13 +37,13 @@ class Results:
         self.environmentDescription = self.config.results_environment
         self.environmentDescription = self.config.results_environment
         try:
         try:
             self.git = dict()
             self.git = dict()
+            subprocess.check_call(
+                ['git', 'status'], stdout=FNULL, stderr=subprocess.STDOUT)
             self.git['commitId'] = self.__get_git_commit_id()
             self.git['commitId'] = self.__get_git_commit_id()
             self.git['repositoryUrl'] = self.__get_git_repository_url()
             self.git['repositoryUrl'] = self.__get_git_repository_url()
             self.git['branchName'] = self.__get_git_branch_name()
             self.git['branchName'] = self.__get_git_branch_name()
-        except Exception as e:
-            logging.debug(
-                'Could not read local git repository, which is fine. The error was: %s',
-                e)
+        except Exception:
+            #Could not read local git repository, which is fine.
             self.git = None
             self.git = None
         self.startTime = int(round(time.time() * 1000))
         self.startTime = int(round(time.time() * 1000))
         self.completionTime = None
         self.completionTime = None
@@ -82,13 +82,6 @@ class Results:
         self.failed['cached_query'] = []
         self.failed['cached_query'] = []
         self.verify = dict()
         self.verify = dict()
 
 
-        try:
-            with open(os.path.join(self.directory, 'results.json'), 'r') as f:
-                # Load json file into results object
-                self.__dict__.update(json.load(f))
-        except IOError:
-            logging.warn("results.json for test not found.")
-
     #############################################################################
     #############################################################################
     # PUBLIC FUNCTIONS
     # PUBLIC FUNCTIONS
     #############################################################################
     #############################################################################
@@ -223,7 +216,7 @@ class Results:
                     headers={'Content-Type': 'application/json'},
                     headers={'Content-Type': 'application/json'},
                     data=json.dumps(self.__to_jsonable(), indent=2))
                     data=json.dumps(self.__to_jsonable(), indent=2))
             except (Exception):
             except (Exception):
-                logging.error("Error uploading results.json")
+                log("Error uploading results.json")
 
 
     def load(self):
     def load(self):
         '''
         '''
@@ -301,12 +294,10 @@ class Results:
             # Normally you don't have to use Fore.BLUE before each line, but
             # Normally you don't have to use Fore.BLUE before each line, but
             # Travis-CI seems to reset color codes on newline (see travis-ci/travis-ci#2692)
             # Travis-CI seems to reset color codes on newline (see travis-ci/travis-ci#2692)
             # or stream flush, so we have to ensure that the color code is printed repeatedly
             # or stream flush, so we have to ensure that the color code is printed repeatedly
-            prefix = Fore.CYAN
-            for line in header(
-                    "Verification Summary", top='=', bottom='').split('\n'):
-                print(prefix + line)
+            header(
+                "Verification Summary", top='=', bottom='-', color=Fore.CYAN)
             for test in tests:
             for test in tests:
-                print(prefix + "| Test: {!s}".format(test.name))
+                log(Fore.CYAN + "| {!s}".format(test.name))
                 if test.name in self.verify.keys():
                 if test.name in self.verify.keys():
                     for test_type, result in self.verify[
                     for test_type, result in self.verify[
                             test.name].iteritems():
                             test.name].iteritems():
@@ -316,16 +307,16 @@ class Results:
                             color = Fore.YELLOW
                             color = Fore.YELLOW
                         else:
                         else:
                             color = Fore.RED
                             color = Fore.RED
-                        print(prefix + "|       " + test_type.ljust(13) +
-                              ' : ' + color + result.upper())
+                        log(Fore.CYAN + "|       " + test_type.ljust(13) +
+                            ' : ' + color + result.upper())
                 else:
                 else:
-                    print(prefix + "|      " + Fore.RED +
-                          "NO RESULTS (Did framework launch?)")
-            print(prefix + header('', top='', bottom='=') + Style.RESET_ALL)
+                    log(Fore.CYAN + "|      " + Fore.RED +
+                        "NO RESULTS (Did framework launch?)")
+            header('', top='=', bottom='', color=Fore.CYAN)
 
 
-        print("Time to complete: " +
-              str(int(time.time() - self.config.start_time)) + " seconds")
-        print("Results are saved in " + self.directory)
+        log("%sTime to complete: %s seconds" %
+            (Style.RESET_ALL, str(int(time.time() - self.config.start_time))))
+        log("Results are saved in " + self.directory)
 
 
     #############################################################################
     #############################################################################
     # PRIVATE FUNCTIONS
     # PRIVATE FUNCTIONS
@@ -362,7 +353,7 @@ class Results:
             with open(self.file, 'w') as f:
             with open(self.file, 'w') as f:
                 f.write(json.dumps(self.__to_jsonable(), indent=2))
                 f.write(json.dumps(self.__to_jsonable(), indent=2))
         except (IOError):
         except (IOError):
-            logging.error("Error writing results.json")
+            log("Error writing results.json")
 
 
     def __count_sloc(self):
     def __count_sloc(self):
         '''
         '''
@@ -375,9 +366,8 @@ class Results:
         for framework, testlist in frameworks.items():
         for framework, testlist in frameworks.items():
             if not os.path.exists(
             if not os.path.exists(
                     os.path.join(testlist[0].directory, "source_code")):
                     os.path.join(testlist[0].directory, "source_code")):
-                logging.warn(
-                    "Cannot count lines of code for %s - no 'source_code' file",
-                    framework)
+                log("Cannot count lines of code for %s - no 'source_code' file"
+                    % framework)
                 continue
                 continue
 
 
             # Unfortunately the source_code files use lines like
             # Unfortunately the source_code files use lines like
@@ -394,23 +384,21 @@ class Results:
                         os.path.join(testlist[0].directory, "cloc_defs.txt")):
                         os.path.join(testlist[0].directory, "cloc_defs.txt")):
                     command += " --read-lang-def %s" % os.path.join(
                     command += " --read-lang-def %s" % os.path.join(
                         testlist[0].directory, "cloc_defs.txt")
                         testlist[0].directory, "cloc_defs.txt")
-                    logging.info("Using custom cloc definitions for %s",
-                                 framework)
+                    log("Using custom cloc definitions for %s" % framework)
 
 
                 # Find the last instance of the word 'code' in the yaml output. This should
                 # Find the last instance of the word 'code' in the yaml output. This should
                 # be the line count for the sum of all listed files or just the line count
                 # be the line count for the sum of all listed files or just the line count
                 # for the last file in the case where there's only one file listed.
                 # for the last file in the case where there's only one file listed.
                 command = command + "| grep code | tail -1 | cut -d: -f 2"
                 command = command + "| grep code | tail -1 | cut -d: -f 2"
-                logging.debug("Running \"%s\" (cwd=%s)", command, wd)
+                log("Running \"%s\" (cwd=%s)" % (command, wd))
                 lineCount = subprocess.check_output(
                 lineCount = subprocess.check_output(
                     command, cwd=wd, shell=True)
                     command, cwd=wd, shell=True)
                 jsonResult[framework] = int(lineCount)
                 jsonResult[framework] = int(lineCount)
             except subprocess.CalledProcessError:
             except subprocess.CalledProcessError:
                 continue
                 continue
             except ValueError as ve:
             except ValueError as ve:
-                logging.warn(
-                    "Unable to get linecount for %s due to error '%s'",
-                    framework, ve)
+                log("Unable to get linecount for %s due to error '%s'" %
+                    (framework, ve))
         self.rawData['slocCounts'] = jsonResult
         self.rawData['slocCounts'] = jsonResult
 
 
     def __count_commits(self):
     def __count_commits(self):

+ 20 - 12
toolset/utils/scaffolding.py

@@ -1,7 +1,6 @@
 # -*- coding: utf-8 -*-
 # -*- coding: utf-8 -*-
-import os
+import os, re
 from shutil import copytree
 from shutil import copytree
-from toolset.utils.setup_util import replace_text
 from toolset.utils.metadata_helper import gather_frameworks, gather_langauges
 from toolset.utils.metadata_helper import gather_frameworks, gather_langauges
 
 
 
 
@@ -308,28 +307,29 @@ class Scaffolding:
 
 
     def __edit_scaffold_files(self):
     def __edit_scaffold_files(self):
         for file in os.listdir(os.path.join(self.test_dir)):
         for file in os.listdir(os.path.join(self.test_dir)):
-            replace_text(
+            self.__replace_text(
                 os.path.join(self.test_dir, file), "\$NAME", self.name)
                 os.path.join(self.test_dir, file), "\$NAME", self.name)
-            replace_text(
+            self.__replace_text(
                 os.path.join(self.test_dir, file), "\$DISPLAY_NAME",
                 os.path.join(self.test_dir, file), "\$DISPLAY_NAME",
                 self.display_name)
                 self.display_name)
-            replace_text(
+            self.__replace_text(
                 os.path.join(self.test_dir, file), "\$APPROACH", self.approach)
                 os.path.join(self.test_dir, file), "\$APPROACH", self.approach)
-            replace_text(
+            self.__replace_text(
                 os.path.join(self.test_dir, file), "\$CLASSIFICATION",
                 os.path.join(self.test_dir, file), "\$CLASSIFICATION",
                 self.classification)
                 self.classification)
-            replace_text(
+            self.__replace_text(
                 os.path.join(self.test_dir, file), "\$FRAMEWORK",
                 os.path.join(self.test_dir, file), "\$FRAMEWORK",
                 self.framework)
                 self.framework)
-            replace_text(
+            self.__replace_text(
                 os.path.join(self.test_dir, file), "\$LANGUAGE", self.language)
                 os.path.join(self.test_dir, file), "\$LANGUAGE", self.language)
-            replace_text(os.path.join(self.test_dir, file), "\$ORM", self.orm)
-            replace_text(
+            self.__replace_text(
+                os.path.join(self.test_dir, file), "\$ORM", self.orm)
+            self.__replace_text(
                 os.path.join(self.test_dir, file), "\$PLATFORM", self.platform)
                 os.path.join(self.test_dir, file), "\$PLATFORM", self.platform)
-            replace_text(
+            self.__replace_text(
                 os.path.join(self.test_dir, file), "\$WEBSERVER",
                 os.path.join(self.test_dir, file), "\$WEBSERVER",
                 self.webserver)
                 self.webserver)
-            replace_text(
+            self.__replace_text(
                 os.path.join(self.test_dir, file), "\$VERSUS", self.versus)
                 os.path.join(self.test_dir, file), "\$VERSUS", self.versus)
 
 
     def __print_success(self):
     def __print_success(self):
@@ -354,3 +354,11 @@ class Scaffolding:
   provided therein.
   provided therein.
 -------------------------------------------------------------------------------"""
 -------------------------------------------------------------------------------"""
               % (self.language, self.name))
               % (self.language, self.name))
+
+    # Replaces all text found using the regular expression to_replace with the supplied replacement.
+    def __replace_text(self, file, to_replace, replacement):
+        with open(file, "r") as conf:
+            contents = conf.read()
+        replaced_text = re.sub(to_replace, replacement, contents)
+        with open(file, "w") as f:
+            f.write(replaced_text)

+ 0 - 91
toolset/utils/setup_util.py

@@ -1,91 +0,0 @@
-import re
-import os
-import sys
-import subprocess
-
-from threading import Thread
-from Queue import Queue, Empty
-
-
-class NonBlockingStreamReader:
-    '''
-  Enables calling readline in a non-blocking manner with a blocking stream, 
-  such as the ones returned from subprocess.Popen
-
-  Originally written by Eyal Arubas, who granted permission to use this inside TFB
-  See http://eyalarubas.com/python-subproc-nonblock.html
-  '''
-
-    def __init__(self, stream, eof_message=None):
-        '''
-    stream: the stream to read from.
-            Usually a process' stdout or stderr.
-    eof_message: A message to print to stdout as soon
-      as the stream's end is reached. Useful if you
-      want to track the exact moment a stream terminates
-    '''
-
-        self._s = stream
-        self._q = Queue()
-        self._eof_message = eof_message
-        self._poisonpill = 'MAGIC_POISONPILL_STRING'
-
-        def _populateQueue(stream, queue):
-            while True:
-                line = stream.readline()
-                if line:  # 'data\n' or '\n'
-                    queue.put(line)
-                else:  # '' e.g. EOF
-                    if self._eof_message:
-                        sys.stdout.write(self._eof_message + '\n')
-                    queue.put(self._poisonpill)
-                    return
-
-        self._t = Thread(target=_populateQueue, args=(self._s, self._q))
-        self._t.daemon = True
-        self._t.start()
-
-    def readline(self, timeout=None):
-        try:
-            line = self._q.get(block=timeout is not None, timeout=timeout)
-            if line == self._poisonpill:
-                raise EndOfStream
-            return line
-        except Empty:
-            return None
-
-
-class EndOfStream(Exception):
-    pass
-
-
-# Replaces all text found using the regular expression to_replace with the supplied replacement.
-def replace_text(file, to_replace, replacement):
-    with open(file, "r") as conf:
-        contents = conf.read()
-    replaced_text = re.sub(to_replace, replacement, contents)
-    with open(file, "w") as f:
-        f.write(replaced_text)
-
-
-# Queries the shell for the value of FWROOT
-def get_fwroot():
-
-    if os.getenv('FWROOT'):
-        return os.environ['FWROOT']
-    else:
-        return os.getcwd()
-
-
-# Turns absolute path into path relative to FWROOT
-# Assumes path is underneath FWROOT, not above
-#
-# Useful for clean presentation of paths
-# e.g. /foo/bar/benchmarks/go/install.sh
-# v.s. FWROOT/go/install.sh
-def path_relative_to_root(path):
-    # Requires bash shell parameter expansion
-    return subprocess.check_output(
-        "D=%s && printf \"${D#%s}\"" % (path, get_fwroot()),
-        shell=True,
-        executable='/bin/bash')