Răsfoiți Sursa

Revert " Squash duplicate lines in logs when writing to file" (#4584)

Nate 6 ani în urmă
părinte
comite
74a2e06e25

+ 11 - 0
frameworks/JavaScript/nodejs/mitol.dockerfile

@@ -0,0 +1,11 @@
+FROM node:10.12.0
+
+COPY ./ ./
+
+RUN apt install -yqq wget
+RUN npm install
+RUN mkdir -p node_modules/mns & mkdir -p tmp
+RUN wget -q https://github.com/Helidium/Mitol/archive/v0.0.1.tar.gz -P tmp
+RUN tar -xzvf tmp/v0.0.1.tar.gz -C tmp
+RUN make -C tmp/Mitol-0.0.1/node
+RUN cp tmp/Mitol-0.0.1/node/dist/* node_modules/mns

+ 14 - 15
toolset/benchmark/benchmarker.py

@@ -1,4 +1,4 @@
-from toolset.utils.output_helper import FNULL
+from toolset.utils.output_helper import log, FNULL
 from toolset.utils.docker_helper import DockerHelper
 from toolset.utils.time_logger import TimeLogger
 from toolset.utils.metadata import Metadata
@@ -23,8 +23,7 @@ class Benchmarker:
         Initialize the benchmarker.
         '''
         self.config = config
-        self.log = config.log
-        self.time_logger = TimeLogger(config)
+        self.time_logger = TimeLogger()
         self.metadata = Metadata(self)
         self.audit = Audit(self)
 
@@ -50,7 +49,7 @@ class Benchmarker:
 
         any_failed = False
         # Run tests
-        self.log("Running Tests...", border='=')
+        log("Running Tests...", border='=')
 
         # build wrk and all databases needed for current run
         self.docker_helper.build_wrk()
@@ -59,7 +58,7 @@ class Benchmarker:
         with open(os.path.join(self.results.directory, 'benchmark.log'),
                   'w') as benchmark_log:
             for test in self.tests:
-                self.log("Running Test: %s" % test.name, border='-')
+                log("Running Test: %s" % test.name, border='-')
                 with self.config.quiet_out.enable():
                     if not self.__run_test(test, benchmark_log):
                         any_failed = True
@@ -68,7 +67,7 @@ class Benchmarker:
 
         # Parse results
         if self.config.mode == "benchmark":
-            self.log("Parsing Results ...", border='=')
+            log("Parsing Results ...", border='=')
             self.results.parse(self.tests)
 
         self.results.set_completion_time()
@@ -78,7 +77,7 @@ class Benchmarker:
         return any_failed
 
     def stop(self, signal=None, frame=None):
-        self.log("Shutting down (may take a moment)")
+        log("Shutting down (may take a moment)")
         self.docker_helper.stop()
         sys.exit(0)
 
@@ -88,7 +87,7 @@ class Benchmarker:
 
     def __exit_test(self, success, prefix, file, message=None):
         if message:
-            self.log(message,
+            log(message,
                 prefix=prefix,
                 file=file,
                 color=Fore.RED if success else '')
@@ -173,7 +172,7 @@ class Benchmarker:
 
             # Debug mode blocks execution here until ctrl+c
             if self.config.mode == "debug":
-                self.log("Entering debug mode. Server has started. CTRL-c to stop.",
+                log("Entering debug mode. Server has started. CTRL-c to stop.",
                     prefix=log_prefix,
                     file=benchmark_log,
                     color=Fore.YELLOW)
@@ -181,14 +180,14 @@ class Benchmarker:
                     time.sleep(1)
 
             # Verify URLs and audit
-            self.log("Verifying framework URLs", prefix=log_prefix)
+            log("Verifying framework URLs", prefix=log_prefix)
             self.time_logger.mark_verify_start()
             passed_verify = test.verify_urls()
             self.audit.audit_test_dir(test.directory)
 
             # Benchmark this test
             if self.config.mode == "benchmark":
-                self.log("Benchmarking %s" % test.name,
+                log("Benchmarking %s" % test.name,
                     file=benchmark_log,
                     border='-')
                 self.time_logger.mark_benchmarking_start()
@@ -225,7 +224,7 @@ class Benchmarker:
             tb = traceback.format_exc()
             self.results.write_intermediate(test.name,
                                             "error during test: " + str(e))
-            self.log(tb, prefix=log_prefix, file=benchmark_log)
+            log(tb, prefix=log_prefix, file=benchmark_log)
             return self.__exit_test(
                 success=False,
                 message="Error during test: %s" % test.name,
@@ -241,7 +240,7 @@ class Benchmarker:
         '''
 
         def benchmark_type(test_type):
-            self.log("BENCHMARKING %s ... " % test_type.upper(), file=benchmark_log)
+            log("BENCHMARKING %s ... " % test_type.upper(), file=benchmark_log)
 
             test = framework_test.runTests[test_type]
             raw_file = self.results.get_raw_file(framework_test.name,
@@ -269,13 +268,13 @@ class Benchmarker:
                 self.__end_logging()
 
             results = self.results.parse_test(framework_test, test_type)
-            self.log("Benchmark results:", file=benchmark_log)
+            log("Benchmark results:", file=benchmark_log)
             # TODO move into log somehow
             pprint(results)
 
             self.results.report_benchmark_results(framework_test, test_type,
                                                   results['results'])
-            self.log("Complete", file=benchmark_log)
+            log("Complete", file=benchmark_log)
 
         for test_type in framework_test.runTests:
             benchmark_type(test_type)

+ 4 - 3
toolset/benchmark/fortune_html_parser.py

@@ -4,11 +4,12 @@ import os
 from HTMLParser import HTMLParser
 from difflib import unified_diff
 
+from toolset.utils.output_helper import log
+
 
 class FortuneHTMLParser(HTMLParser):
-    def __init__(self, config):
+    def __init__(self):
         HTMLParser.__init__(self)
-        self.log = config.log
         self.body = []
 
     valid_fortune = '''<!doctype html><html>
@@ -184,5 +185,5 @@ class FortuneHTMLParser(HTMLParser):
                 headers_left -= 1
                 if headers_left <= 0:
                     output += os.linesep
-            self.log(output, prefix="%s: " % name)
+            log(output, prefix="%s: " % name)
         return (same, diff_lines)

+ 9 - 8
toolset/benchmark/framework_test.py

@@ -2,6 +2,8 @@ import os
 import traceback
 from requests import ConnectionError, Timeout
 
+from toolset.utils.output_helper import log
+
 # Cross-platform colored text
 from colorama import Fore, Style
 
@@ -30,7 +32,6 @@ class FrameworkTest:
         self.notes = ""
         self.port = ""
         self.versus = ""
-        self.log = benchmarker.log
 
         self.__dict__.update(args)
 
@@ -96,7 +97,7 @@ class FrameworkTest:
             with open(os.path.join(verificationPath, 'verification.txt'),
                       'w') as verification:
                 test = self.runTests[test_type]
-                self.log("VERIFYING %s" % test_type.upper(),
+                log("VERIFYING %s" % test_type.upper(),
                     file=verification,
                     border='-',
                     color=Fore.WHITE + Style.BRIGHT)
@@ -120,13 +121,13 @@ class FrameworkTest:
                 except ConnectionError as e:
                     results = [('fail', "Server did not respond to request",
                                 base_url)]
-                    self.log("Verifying test %s for %s caused an exception: %s" %
+                    log("Verifying test %s for %s caused an exception: %s" %
                         (test_type, self.name, e),
                         color=Fore.RED)
                 except Timeout as e:
                     results = [('fail', "Connection to server timed out",
                                 base_url)]
-                    self.log("Verifying test %s for %s caused an exception: %s" %
+                    log("Verifying test %s for %s caused an exception: %s" %
                         (test_type, self.name, e),
                         color=Fore.RED)
                 except Exception as e:
@@ -135,7 +136,7 @@ class FrameworkTest:
             but also that you have found a bug. Please submit an issue
             including this message: %s\n%s""" % (e, traceback.format_exc()),
                                 base_url)]
-                    self.log("Verifying test %s for %s caused an exception: %s" %
+                    log("Verifying test %s for %s caused an exception: %s" %
                         (test_type, self.name, e),
                         color=Fore.RED)
                     traceback.format_exc()
@@ -155,14 +156,14 @@ class FrameworkTest:
                     elif result.upper() == "FAIL":
                         color = Fore.RED
 
-                    self.log("   {!s}{!s}{!s} for {!s}".format(
+                    log("   {!s}{!s}{!s} for {!s}".format(
                         color, result.upper(), Style.RESET_ALL, url),
                         file=verification)
                     if reason is not None and len(reason) != 0:
                         for line in reason.splitlines():
-                            self.log("     " + line, file=verification)
+                            log("     " + line, file=verification)
                         if not test.passed:
-                            self.log("     See {!s}".format(specific_rules_url),
+                            log("     See {!s}".format(specific_rules_url),
                                 file=verification)
 
                 [output_result(r1, r2, url) for (r1, r2, url) in results]

+ 1 - 2
toolset/benchmark/test_types/fortune_type.py

@@ -5,7 +5,6 @@ from toolset.benchmark.test_types.verifications import basic_body_verification,
 
 class FortuneTestType(FrameworkTestType):
     def __init__(self, config):
-        self.config = config
         self.fortune_url = ""
         kwargs = {
             'name': 'fortune',
@@ -33,7 +32,7 @@ class FortuneTestType(FrameworkTestType):
         if len(problems) > 0:
             return problems
 
-        parser = FortuneHTMLParser(self.config)
+        parser = FortuneHTMLParser()
         parser.feed(body)
         (valid, diff) = parser.isValidFortune(self.name, body)
 

+ 11 - 10
toolset/benchmark/test_types/framework_test_type.py

@@ -1,4 +1,5 @@
 import copy
+import sys
 import json
 import requests
 import MySQLdb
@@ -7,6 +8,7 @@ import pymongo
 import traceback
 
 from colorama import Fore
+from toolset.utils.output_helper import log
 
 
 class FrameworkTestType:
@@ -27,7 +29,6 @@ class FrameworkTestType:
                  accept_header=None,
                  args=[]):
         self.config = config
-        self.log = config.log
         self.name = name
         self.requires_db = requires_db
         self.args = args
@@ -73,7 +74,7 @@ class FrameworkTestType:
         Downloads a URL and returns the HTTP response headers
         and body content as a tuple
         '''
-        self.log("Accessing URL {!s}: ".format(url), color=Fore.CYAN)
+        log("Accessing URL {!s}: ".format(url), color=Fore.CYAN)
 
         headers = {'Accept': self.accept_header}
         r = requests.get(url, timeout=15, headers=headers)
@@ -83,8 +84,8 @@ class FrameworkTestType:
         return self.headers, self.body
 
     def output_headers_and_body(self):
-        self.log(str(self.headers))
-        self.log(self.body)
+        log(str(self.headers))
+        log(self.body)
 
     def verify(self, base_url):
         '''
@@ -160,9 +161,9 @@ class FrameworkTestType:
                 db.close()
             except Exception:
                 tb = traceback.format_exc()
-                self.log("ERROR: Unable to load current MySQL World table.",
+                log("ERROR: Unable to load current MySQL World table.",
                     color=Fore.RED)
-                self.log(tb)
+                log(tb)
         elif database_name == "postgres":
             try:
                 db = psycopg2.connect(
@@ -182,9 +183,9 @@ class FrameworkTestType:
                 db.close()
             except Exception:
                 tb = traceback.format_exc()
-                self.log("ERROR: Unable to load current Postgres World table.",
+                log("ERROR: Unable to load current Postgres World table.",
                     color=Fore.RED)
-                self.log(tb)
+                log(tb)
         elif database_name == "mongodb":
             try:
                 worlds_json = {}
@@ -204,9 +205,9 @@ class FrameworkTestType:
                 connection.close()
             except Exception:
                 tb = traceback.format_exc()
-                self.log("ERROR: Unable to load current MongoDB World table.",
+                log("ERROR: Unable to load current MongoDB World table.",
                     color=Fore.RED)
-                self.log(tb)
+                log(tb)
         else:
             raise ValueError(
                 "Database: {!s} does not exist".format(database_name))

+ 3 - 2
toolset/benchmark/test_types/verifications.py

@@ -3,7 +3,7 @@ import re
 import traceback
 
 from datetime import datetime
-from toolset.utils.output_helper import Logger
+from toolset.utils.output_helper import log
 from time import sleep
 
 def basic_body_verification(body, url, is_json_check=True):
@@ -71,6 +71,7 @@ def verify_headers(request_headers_and_body, headers, url, should_be='json'):
     # Make sure that the date object isn't cached
     sleep(3)
     second_headers, body2 = request_headers_and_body(url)
+    second_date = second_headers.get('Date')
 
     date2 = second_headers.get('Date')
     if date == date2:
@@ -284,7 +285,7 @@ def verify_updates(old_worlds, new_worlds, updates_expected, url):
                         successful_updates += 1
             except Exception:
                 tb = traceback.format_exc()
-                Logger.log(tb, squash=False)
+                log(tb)
         n += 1
 
     if successful_updates == 0:

+ 4 - 3
toolset/run-tests.py

@@ -8,6 +8,7 @@ from toolset.utils.scaffolding import Scaffolding
 from toolset.utils.audit import Audit
 from toolset.utils import cleaner
 from toolset.utils.benchmark_config import BenchmarkConfig
+from toolset.utils.output_helper import log
 
 # Enable cross-platform colored output
 from colorama import init, Fore
@@ -211,7 +212,7 @@ def main(argv=None):
             all_tests = benchmarker.metadata.gather_tests()
 
             for test in all_tests:
-                config.log(test.name)
+                log(test.name)
 
         elif config.parse:
             all_tests = benchmarker.metadata.gather_tests()
@@ -227,8 +228,8 @@ def main(argv=None):
                 return any_failed
     except Exception:
         tb = traceback.format_exc()
-        config.log("A fatal error has occurred", color=Fore.RED)
-        config.log(tb)
+        log("A fatal error has occurred", color=Fore.RED)
+        log(tb)
         # try one last time to stop docker containers on fatal error
         try:
             benchmarker.stop()

+ 5 - 5
toolset/utils/audit.py

@@ -1,3 +1,4 @@
+from toolset.utils.output_helper import log
 from colorama import Fore
 
 
@@ -8,7 +9,6 @@ class Audit:
 
     def __init__(self, benchmarker):
         self.benchmarker = benchmarker
-        self.log = benchmarker.config.log
 
     def start_audit(self):
         for lang in self.benchmarker.metadata.gather_languages():
@@ -18,13 +18,13 @@ class Audit:
 
     def audit_test_dir(self, test_dir):
         warnings = 0
-        self.log('Auditing %s:' % test_dir, color=Fore.BLUE)
+        log('Auditing %s:' % test_dir, color=Fore.BLUE)
 
         if not self.benchmarker.metadata.has_file(test_dir, 'README.md'):
-            self.log('README.md file is missing')
+            log('README.md file is missing')
             warnings += 1
 
         if warnings:
-            self.log('(%s) warning(s)' % warnings, color=Fore.YELLOW)
+            log('(%s) warning(s)' % warnings, color=Fore.YELLOW)
         else:
-            self.log('No problems to report', color=Fore.GREEN)
+            log('No problems to report', color=Fore.GREEN)

+ 1 - 3
toolset/utils/benchmark_config.py

@@ -1,5 +1,5 @@
 from toolset.benchmark.test_types import *
-from toolset.utils.output_helper import Logger, QuietOutputStream
+from toolset.utils.output_helper import QuietOutputStream
 
 import os
 import time
@@ -11,8 +11,6 @@ class BenchmarkConfig:
         Configures this BenchmarkConfig given the arguments provided.
         '''
 
-        self.log = Logger().log
-
         # Map type strings to their objects
         types = dict()
         types['json'] = JsonTestType(self)

+ 11 - 17
toolset/utils/docker_helper.py

@@ -8,6 +8,7 @@ import traceback
 from threading import Thread
 from colorama import Fore, Style
 
+from toolset.utils.output_helper import log
 from toolset.utils.database_helper import test_database
 
 from psutil import virtual_memory
@@ -18,7 +19,6 @@ mem_limit = int(round(virtual_memory().total * .95))
 class DockerHelper:
     def __init__(self, benchmarker=None):
         self.benchmarker = benchmarker
-        self.log = benchmarker.log
 
         self.client = docker.DockerClient(
             base_url=self.benchmarker.config.client_docker_host)
@@ -59,7 +59,7 @@ class DockerHelper:
                         index = buffer.index("\n")
                         line = buffer[:index]
                         buffer = buffer[index + 1:]
-                        self.log(line,
+                        log(line,
                             prefix=log_prefix,
                             file=build_log,
                             color=Fore.WHITE + Style.BRIGHT \
@@ -67,28 +67,25 @@ class DockerHelper:
                     # Kill docker builds if they exceed 60 mins. This will only
                     # catch builds that are still printing output.
                     if self.benchmarker.time_logger.time_since_start() > 3600:
-                        self.log("Build time exceeded 60 minutes",
+                        log("Build time exceeded 60 minutes",
                             prefix=log_prefix,
                             file=build_log,
                             color=Fore.RED)
                         raise Exception
 
                 if buffer:
-                    self.log(buffer,
+                    log(buffer,
                         prefix=log_prefix,
                         file=build_log,
                         color=Fore.WHITE + Style.BRIGHT \
                             if re.match(r'^Step \d+\/\d+', buffer) else '')
             except Exception:
                 tb = traceback.format_exc()
-                self.log("Docker build failed; terminating",
+                log("Docker build failed; terminating",
                     prefix=log_prefix,
                     file=build_log,
                     color=Fore.RED)
-                self.log(tb,
-                    squash=False,
-                    prefix=log_prefix,
-                    file=build_log)
+                log(tb, prefix=log_prefix, file=build_log)
                 self.benchmarker.time_logger.log_build_end(
                     log_prefix=log_prefix, file=build_log)
                 raise
@@ -173,7 +170,7 @@ class DockerHelper:
                             run_log_dir, "%s.log" % docker_file.replace(
                                 ".dockerfile", "").lower()), 'w') as run_log:
                     for line in docker_container.logs(stream=True):
-                        self.log(line, prefix=log_prefix, file=run_log)
+                        log(line, prefix=log_prefix, file=run_log)
 
             extra_hosts = None
             name = "tfb-server"
@@ -236,14 +233,11 @@ class DockerHelper:
                     os.path.join(run_log_dir, "%s.log" % test.name.lower()),
                     'w') as run_log:
                 tb = traceback.format_exc()
-                self.log("Running docker container: %s.dockerfile failed" %
+                log("Running docker container: %s.dockerfile failed" %
                     test.name,
                     prefix=log_prefix,
                     file=run_log)
-                self.log(tb,
-                    squash=False,
-                    prefix=log_prefix,
-                    file=run_log)
+                log(tb, prefix=log_prefix, file=run_log)
 
         return container
 
@@ -348,7 +342,7 @@ class DockerHelper:
             database_ready = test_database(self.benchmarker.config, database)
 
         if not database_ready:
-            self.log("Database was not ready after startup", prefix=log_prefix)
+            log("Database was not ready after startup", prefix=log_prefix)
 
         return container
 
@@ -400,7 +394,7 @@ class DockerHelper:
         def watch_container(container):
             with open(raw_file, 'w') as benchmark_file:
                 for line in container.logs(stream=True):
-                    self.log(line, file=benchmark_file)
+                    log(line, file=benchmark_file)
 
         sysctl = {'net.core.somaxconn': 65535}
 

+ 4 - 5
toolset/utils/metadata.py

@@ -4,6 +4,7 @@ import json
 
 from collections import OrderedDict
 
+from toolset.utils.output_helper import log
 from colorama import Fore
 
 
@@ -19,7 +20,6 @@ class Metadata:
 
     def __init__(self, benchmarker=None):
         self.benchmarker = benchmarker
-        self.log = benchmarker.log
 
     def gather_languages(self):
         '''
@@ -99,8 +99,7 @@ class Metadata:
                 try:
                     config = json.load(config_file)
                 except ValueError:
-                    self.log("Error loading config: {!s}".format(config_file_name),
-                        squash=False,
+                    log("Error loading config: {!s}".format(config_file_name),
                         color=Fore.RED)
                     raise Exception("Error loading config file")
 
@@ -183,7 +182,7 @@ class Metadata:
             tests_to_run = [name for (name, keys) in test.iteritems()]
 
             if "default" not in tests_to_run:
-                self.log("Framework %s does not define a default test in benchmark_config.json"
+                log("Framework %s does not define a default test in benchmark_config.json"
                     % config['framework'],
                     color=Fore.YELLOW)
 
@@ -206,7 +205,7 @@ class Metadata:
                         # This is quite common - most tests don't support all types
                         # Quitely log it and move on (debug logging is on in travis and this causes
                         # ~1500 lines of debug, so I'm totally ignoring it for now
-                        # self.log("Missing arguments for test type %s for framework test %s" % (type_name, test_name))
+                        # log("Missing arguments for test type %s for framework test %s" % (type_name, test_name))
                         pass
 
                 # We need to sort by test_type to run

+ 38 - 83
toolset/utils/output_helper.py

@@ -15,100 +15,55 @@ FNULL = open(os.devnull, 'w')
 # message endlessly anyway.
 TOO_MANY_BYTES = 50 * 1024 * 1024
 
-class Logger:
+
+def log(log_text=None, **kwargs):
     '''
     Logs the given text and optional prefix to stdout (if quiet is False) and
-    to an optional log file. By default, we strip out newlines in order to
+    to an optional log file. By default, we strip out newlines in order to 
     print our lines correctly, but you can override this functionality if you
     want to print multi-line output.
     '''
 
-    def __init__(self):
-        self.fileLogger = FileLogger()
-
-    def log(self, log_text=None, squash=True, **kwargs):
-        # set up some defaults
-        color = kwargs.get('color', '')
-        color_reset = Style.RESET_ALL if color else ''
-        prefix = kwargs.get('prefix', '')
-        border = kwargs.get('border')
-        border_bottom = kwargs.get('border_bottom')
-        file = kwargs.get('file')
-        quiet = kwargs.get('quiet')
-
-        if border is not None:
-            border = color + (border * 80) + os.linesep + color_reset
-            border_bottom = border if border_bottom is None else \
-                color + (border_bottom * 80) + os.linesep + color_reset
-        elif not log_text:
-            return
-
-        try:
-            new_log_text = border or ''
-            for line in log_text.splitlines():
-                if line.strip() is not '':
-                    if prefix:
-                        new_log_text += Style.DIM + prefix + Style.RESET_ALL
-                    new_log_text += color + line + color_reset + os.linesep
-            new_log_text += border_bottom or ''
-
-            if not quiet:
-                sys.stdout.write(Style.RESET_ALL + new_log_text)
-                sys.stdout.flush()
-
-            if file is not None:
-                self.fileLogger.log(file, new_log_text, squash)
-
-        except:
-            pass
-
-class FileLogger:
-    '''
-    Logs text to a file
-    '''
-
-    def __init__(self):
-        self.prev_text_count = 0
-        self.prev_text = ''
-
-    def write_to_file(self, file, text):
-        if os.fstat(file.fileno()).st_size < TOO_MANY_BYTES:
-            file.write(seq.sub('', text))
+    # set up some defaults
+    color = kwargs.get('color', '')
+    color_reset = Style.RESET_ALL if color else ''
+    prefix = kwargs.get('prefix', '')
+    border = kwargs.get('border')
+    border_bottom = kwargs.get('border_bottom')
+    file = kwargs.get('file')
+    quiet = kwargs.get('quiet')
+
+    if border is not None:
+        border = color + (border * 80) + os.linesep + color_reset
+        border_bottom = border if border_bottom is None else \
+            color + (border_bottom * 80) + os.linesep + color_reset
+    elif not log_text:
+        return
+
+    try:
+        new_log_text = border or ''
+        for line in log_text.splitlines():
+            if line.strip() is not '':
+                if prefix:
+                    new_log_text += Style.DIM + prefix + Style.RESET_ALL
+                new_log_text += color + line + color_reset + os.linesep
+        new_log_text += border_bottom or ''
+
+        if not quiet:
+            sys.stdout.write(Style.RESET_ALL + new_log_text)
+            sys.stdout.flush()
+
+        if file is not None and os.fstat(
+                file.fileno()).st_size < TOO_MANY_BYTES:
+            file.write(seq.sub('', new_log_text))
             file.flush()
+    except:
+        pass
 
-    def write_prev_text(self, file):
-        text = self.prev_text
-        if self.prev_text_count > 1:
-            text = '[%s]: %s' % (self.prev_text_count, self.prev_text)
-        self.write_to_file(file, text)
-
-    def log(self, file, text, squash):
-        if not squash:
-            # If we're not squashing make sure there's no prev text
-            # to flush out
-            if self.prev_text_count > 0:
-                self.write_prev_text(file)
-                self.prev_text_count = 0
-                self.prev_text = ''
-            # Then write the text we're not squashing
-            self.write_to_file(file, text)
-        # If we have matching lines, increase the counter without
-        # writing anything to file
-        elif self.prev_text and self.prev_text == text:
-            self.prev_text_count += 1
-        # If we get here, we don't have matching lines. Write the
-        # previous text and store the current text
-        elif self.prev_text_count > 0:
-            self.write_prev_text(file)
-            self.prev_text = text
-            self.prev_text_count = 1
-        else:
-            self.prev_text = text
-            self.prev_text_count = 1
 
 class QuietOutputStream:
     '''
-    Provides an output stream which either writes to stdout or nothing
+    Provides an output stream which either writes to stdout or nothing 
     depending on the is_quiet param.
     '''
 

+ 14 - 12
toolset/utils/results.py

@@ -1,3 +1,5 @@
+from toolset.utils.output_helper import log
+
 import os
 import subprocess
 import uuid
@@ -8,6 +10,7 @@ import threading
 import re
 import math
 import csv
+import traceback
 from datetime import datetime
 
 # Cross-platform colored text
@@ -20,7 +23,6 @@ class Results:
         Constructor
         '''
         self.benchmarker = benchmarker
-        self.log = benchmarker.log
         self.config = benchmarker.config
         self.directory = os.path.join(self.config.results_root,
                                       self.config.timestamp)
@@ -205,7 +207,7 @@ class Results:
                     headers={'Content-Type': 'application/json'},
                     data=json.dumps(self.__to_jsonable(), indent=2))
             except Exception:
-                self.log("Error uploading results.json")
+                log("Error uploading results.json")
 
     def load(self):
         '''
@@ -282,12 +284,12 @@ class Results:
             # Normally you don't have to use Fore.BLUE before each line, but
             # Travis-CI seems to reset color codes on newline (see travis-ci/travis-ci#2692)
             # or stream flush, so we have to ensure that the color code is printed repeatedly
-            self.log("Verification Summary",
+            log("Verification Summary",
                 border='=',
                 border_bottom='-',
                 color=Fore.CYAN)
             for test in self.benchmarker.tests:
-                self.log(Fore.CYAN + "| {!s}".format(test.name))
+                log(Fore.CYAN + "| {!s}".format(test.name))
                 if test.name in self.verify.keys():
                     for test_type, result in self.verify[
                             test.name].iteritems():
@@ -297,14 +299,14 @@ class Results:
                             color = Fore.YELLOW
                         else:
                             color = Fore.RED
-                        self.log(Fore.CYAN + "|       " + test_type.ljust(13) +
+                        log(Fore.CYAN + "|       " + test_type.ljust(13) +
                             ' : ' + color + result.upper())
                 else:
-                    self.log(Fore.CYAN + "|      " + Fore.RED +
+                    log(Fore.CYAN + "|      " + Fore.RED +
                         "NO RESULTS (Did framework launch?)")
-            self.log('', border='=', border_bottom='', color=Fore.CYAN)
+            log('', border='=', border_bottom='', color=Fore.CYAN)
 
-        self.log("Results are saved in " + self.directory)
+        log("Results are saved in " + self.directory)
 
     #############################################################################
     # PRIVATE FUNCTIONS
@@ -341,7 +343,7 @@ class Results:
             with open(self.file, 'w') as f:
                 f.write(json.dumps(self.__to_jsonable(), indent=2))
         except IOError:
-            self.log("Error writing results.json")
+            log("Error writing results.json")
 
     def __count_sloc(self):
         '''
@@ -362,15 +364,15 @@ class Results:
             # one file listed.
             command = "cloc --yaml --follow-links . | grep code | tail -1 | cut -d: -f 2"
 
-            self.log("Running \"%s\" (cwd=%s)" % (command, wd))
+            log("Running \"%s\" (cwd=%s)" % (command, wd))
             try:
                 line_count = int(subprocess.check_output(command, cwd=wd, shell=True))
             except (subprocess.CalledProcessError, ValueError) as e:
-                self.log("Unable to count lines of code for %s due to error '%s'" %
+                log("Unable to count lines of code for %s due to error '%s'" %
                     (framework, e))
                 continue
 
-            self.log("Counted %s lines of code" % line_count)
+            log("Counted %s lines of code" % line_count)
             framework_to_count[framework] = line_count
 
         self.rawData['slocCounts'] = framework_to_count

+ 15 - 14
toolset/utils/time_logger.py

@@ -1,6 +1,8 @@
 import time
 from colorama import Fore
 
+from toolset.utils.output_helper import log
+
 
 class TimeLogger:
     '''
@@ -8,10 +10,9 @@ class TimeLogger:
     for suite actions
     '''
 
-    def __init__(self, config):
-        self.log = config.log
-
+    def __init__(self):
         self.start = time.time()
+
         self.benchmarking_start = 0
         self.benchmarking_total = 0
         self.database_starting = 0
@@ -46,7 +47,7 @@ class TimeLogger:
         self.database_started = int(time.time() - self.database_starting)
 
     def log_database_start_time(self, log_prefix, file):
-        self.log("Time starting database: %s" % TimeLogger.output(
+        log("Time starting database: %s" % TimeLogger.output(
             self.database_started),
             prefix=log_prefix,
             file=file,
@@ -58,7 +59,7 @@ class TimeLogger:
     def log_benchmarking_end(self, log_prefix, file):
         total = int(time.time() - self.benchmarking_start)
         self.benchmarking_total = self.benchmarking_total + total
-        self.log("Benchmarking time: %s" % TimeLogger.output(total),
+        log("Benchmarking time: %s" % TimeLogger.output(total),
             prefix=log_prefix,
             file=file,
             color=Fore.YELLOW)
@@ -74,11 +75,11 @@ class TimeLogger:
         self.build_total = self.build_total + total
         log_str = "Build time: %s" % TimeLogger.output(total)
         self.build_logs.append({'log_prefix': log_prefix, 'str': log_str})
-        self.log(log_str, prefix=log_prefix, file=file, color=Fore.YELLOW)
+        log(log_str, prefix=log_prefix, file=file, color=Fore.YELLOW)
 
     def log_build_flush(self, file):
         for b_log in self.build_logs:
-            self.log(b_log['str'],
+            log(b_log['str'],
                 prefix=b_log['log_prefix'],
                 file=file,
                 color=Fore.YELLOW)
@@ -91,7 +92,7 @@ class TimeLogger:
         self.accepting_requests = int(time.time() - self.test_started)
 
     def log_test_accepting_requests(self, log_prefix, file):
-        self.log("Time until accepting requests: %s" % TimeLogger.output(
+        log("Time until accepting requests: %s" % TimeLogger.output(
             self.accepting_requests),
             prefix=log_prefix,
             file=file,
@@ -102,28 +103,28 @@ class TimeLogger:
 
     def log_test_end(self, log_prefix, file):
         total = int(time.time() - self.test_start)
-        self.log("Total test time: %s" % TimeLogger.output(total),
+        log("Total test time: %s" % TimeLogger.output(total),
             prefix=log_prefix,
             file=file,
             color=Fore.YELLOW)
-        self.log("Total time building so far: %s" % TimeLogger.output(
+        log("Total time building so far: %s" % TimeLogger.output(
             self.build_total),
             prefix="tfb: ",
             file=file,
             color=Fore.YELLOW)
-        self.log("Total time verifying so far: %s" % TimeLogger.output(
+        log("Total time verifying so far: %s" % TimeLogger.output(
             self.verify_total),
             prefix="tfb: ",
             file=file,
             color=Fore.YELLOW)
         if self.benchmarking_total > 0:
-            self.log("Total time benchmarking so far: %s" % TimeLogger.output(
+            log("Total time benchmarking so far: %s" % TimeLogger.output(
                 self.benchmarking_total),
                 prefix="tfb: ",
                 file=file,
                 color=Fore.YELLOW)
         running_time = int(time.time() - self.start)
-        self.log("Total execution time so far: %s" %
+        log("Total execution time so far: %s" %
             TimeLogger.output(running_time),
             prefix="tfb: ",
             file=file,
@@ -135,7 +136,7 @@ class TimeLogger:
     def log_verify_end(self, log_prefix, file):
         total = int(time.time() - self.verify_start)
         self.verify_total = self.verify_total + total
-        self.log("Verify time: %s" % TimeLogger.output(total),
+        log("Verify time: %s" % TimeLogger.output(total),
             prefix=log_prefix,
             file=file,
             color=Fore.YELLOW)