فهرست منبع

Revert " Squash duplicate lines in logs when writing to file" (#4584)

Nate 6 سال پیش
والد
کامیت
74a2e06e25

+ 11 - 0
frameworks/JavaScript/nodejs/mitol.dockerfile

@@ -0,0 +1,11 @@
+FROM node:10.12.0
+
+COPY ./ ./
+
+RUN apt install -yqq wget
+RUN npm install
+RUN mkdir -p node_modules/mns & mkdir -p tmp
+RUN wget -q https://github.com/Helidium/Mitol/archive/v0.0.1.tar.gz -P tmp
+RUN tar -xzvf tmp/v0.0.1.tar.gz -C tmp
+RUN make -C tmp/Mitol-0.0.1/node
+RUN cp tmp/Mitol-0.0.1/node/dist/* node_modules/mns

+ 14 - 15
toolset/benchmark/benchmarker.py

@@ -1,4 +1,4 @@
-from toolset.utils.output_helper import FNULL
+from toolset.utils.output_helper import log, FNULL
 from toolset.utils.docker_helper import DockerHelper
 from toolset.utils.docker_helper import DockerHelper
 from toolset.utils.time_logger import TimeLogger
 from toolset.utils.time_logger import TimeLogger
 from toolset.utils.metadata import Metadata
 from toolset.utils.metadata import Metadata
@@ -23,8 +23,7 @@ class Benchmarker:
         Initialize the benchmarker.
         Initialize the benchmarker.
         '''
         '''
         self.config = config
         self.config = config
-        self.log = config.log
-        self.time_logger = TimeLogger(config)
+        self.time_logger = TimeLogger()
         self.metadata = Metadata(self)
         self.metadata = Metadata(self)
         self.audit = Audit(self)
         self.audit = Audit(self)
 
 
@@ -50,7 +49,7 @@ class Benchmarker:
 
 
         any_failed = False
         any_failed = False
         # Run tests
         # Run tests
-        self.log("Running Tests...", border='=')
+        log("Running Tests...", border='=')
 
 
         # build wrk and all databases needed for current run
         # build wrk and all databases needed for current run
         self.docker_helper.build_wrk()
         self.docker_helper.build_wrk()
@@ -59,7 +58,7 @@ class Benchmarker:
         with open(os.path.join(self.results.directory, 'benchmark.log'),
         with open(os.path.join(self.results.directory, 'benchmark.log'),
                   'w') as benchmark_log:
                   'w') as benchmark_log:
             for test in self.tests:
             for test in self.tests:
-                self.log("Running Test: %s" % test.name, border='-')
+                log("Running Test: %s" % test.name, border='-')
                 with self.config.quiet_out.enable():
                 with self.config.quiet_out.enable():
                     if not self.__run_test(test, benchmark_log):
                     if not self.__run_test(test, benchmark_log):
                         any_failed = True
                         any_failed = True
@@ -68,7 +67,7 @@ class Benchmarker:
 
 
         # Parse results
         # Parse results
         if self.config.mode == "benchmark":
         if self.config.mode == "benchmark":
-            self.log("Parsing Results ...", border='=')
+            log("Parsing Results ...", border='=')
             self.results.parse(self.tests)
             self.results.parse(self.tests)
 
 
         self.results.set_completion_time()
         self.results.set_completion_time()
@@ -78,7 +77,7 @@ class Benchmarker:
         return any_failed
         return any_failed
 
 
     def stop(self, signal=None, frame=None):
     def stop(self, signal=None, frame=None):
-        self.log("Shutting down (may take a moment)")
+        log("Shutting down (may take a moment)")
         self.docker_helper.stop()
         self.docker_helper.stop()
         sys.exit(0)
         sys.exit(0)
 
 
@@ -88,7 +87,7 @@ class Benchmarker:
 
 
     def __exit_test(self, success, prefix, file, message=None):
     def __exit_test(self, success, prefix, file, message=None):
         if message:
         if message:
-            self.log(message,
+            log(message,
                 prefix=prefix,
                 prefix=prefix,
                 file=file,
                 file=file,
                 color=Fore.RED if success else '')
                 color=Fore.RED if success else '')
@@ -173,7 +172,7 @@ class Benchmarker:
 
 
             # Debug mode blocks execution here until ctrl+c
             # Debug mode blocks execution here until ctrl+c
             if self.config.mode == "debug":
             if self.config.mode == "debug":
-                self.log("Entering debug mode. Server has started. CTRL-c to stop.",
+                log("Entering debug mode. Server has started. CTRL-c to stop.",
                     prefix=log_prefix,
                     prefix=log_prefix,
                     file=benchmark_log,
                     file=benchmark_log,
                     color=Fore.YELLOW)
                     color=Fore.YELLOW)
@@ -181,14 +180,14 @@ class Benchmarker:
                     time.sleep(1)
                     time.sleep(1)
 
 
             # Verify URLs and audit
             # Verify URLs and audit
-            self.log("Verifying framework URLs", prefix=log_prefix)
+            log("Verifying framework URLs", prefix=log_prefix)
             self.time_logger.mark_verify_start()
             self.time_logger.mark_verify_start()
             passed_verify = test.verify_urls()
             passed_verify = test.verify_urls()
             self.audit.audit_test_dir(test.directory)
             self.audit.audit_test_dir(test.directory)
 
 
             # Benchmark this test
             # Benchmark this test
             if self.config.mode == "benchmark":
             if self.config.mode == "benchmark":
-                self.log("Benchmarking %s" % test.name,
+                log("Benchmarking %s" % test.name,
                     file=benchmark_log,
                     file=benchmark_log,
                     border='-')
                     border='-')
                 self.time_logger.mark_benchmarking_start()
                 self.time_logger.mark_benchmarking_start()
@@ -225,7 +224,7 @@ class Benchmarker:
             tb = traceback.format_exc()
             tb = traceback.format_exc()
             self.results.write_intermediate(test.name,
             self.results.write_intermediate(test.name,
                                             "error during test: " + str(e))
                                             "error during test: " + str(e))
-            self.log(tb, prefix=log_prefix, file=benchmark_log)
+            log(tb, prefix=log_prefix, file=benchmark_log)
             return self.__exit_test(
             return self.__exit_test(
                 success=False,
                 success=False,
                 message="Error during test: %s" % test.name,
                 message="Error during test: %s" % test.name,
@@ -241,7 +240,7 @@ class Benchmarker:
         '''
         '''
 
 
         def benchmark_type(test_type):
         def benchmark_type(test_type):
-            self.log("BENCHMARKING %s ... " % test_type.upper(), file=benchmark_log)
+            log("BENCHMARKING %s ... " % test_type.upper(), file=benchmark_log)
 
 
             test = framework_test.runTests[test_type]
             test = framework_test.runTests[test_type]
             raw_file = self.results.get_raw_file(framework_test.name,
             raw_file = self.results.get_raw_file(framework_test.name,
@@ -269,13 +268,13 @@ class Benchmarker:
                 self.__end_logging()
                 self.__end_logging()
 
 
             results = self.results.parse_test(framework_test, test_type)
             results = self.results.parse_test(framework_test, test_type)
-            self.log("Benchmark results:", file=benchmark_log)
+            log("Benchmark results:", file=benchmark_log)
             # TODO move into log somehow
             # TODO move into log somehow
             pprint(results)
             pprint(results)
 
 
             self.results.report_benchmark_results(framework_test, test_type,
             self.results.report_benchmark_results(framework_test, test_type,
                                                   results['results'])
                                                   results['results'])
-            self.log("Complete", file=benchmark_log)
+            log("Complete", file=benchmark_log)
 
 
         for test_type in framework_test.runTests:
         for test_type in framework_test.runTests:
             benchmark_type(test_type)
             benchmark_type(test_type)

+ 4 - 3
toolset/benchmark/fortune_html_parser.py

@@ -4,11 +4,12 @@ import os
 from HTMLParser import HTMLParser
 from HTMLParser import HTMLParser
 from difflib import unified_diff
 from difflib import unified_diff
 
 
+from toolset.utils.output_helper import log
+
 
 
 class FortuneHTMLParser(HTMLParser):
 class FortuneHTMLParser(HTMLParser):
-    def __init__(self, config):
+    def __init__(self):
         HTMLParser.__init__(self)
         HTMLParser.__init__(self)
-        self.log = config.log
         self.body = []
         self.body = []
 
 
     valid_fortune = '''<!doctype html><html>
     valid_fortune = '''<!doctype html><html>
@@ -184,5 +185,5 @@ class FortuneHTMLParser(HTMLParser):
                 headers_left -= 1
                 headers_left -= 1
                 if headers_left <= 0:
                 if headers_left <= 0:
                     output += os.linesep
                     output += os.linesep
-            self.log(output, prefix="%s: " % name)
+            log(output, prefix="%s: " % name)
         return (same, diff_lines)
         return (same, diff_lines)

+ 9 - 8
toolset/benchmark/framework_test.py

@@ -2,6 +2,8 @@ import os
 import traceback
 import traceback
 from requests import ConnectionError, Timeout
 from requests import ConnectionError, Timeout
 
 
+from toolset.utils.output_helper import log
+
 # Cross-platform colored text
 # Cross-platform colored text
 from colorama import Fore, Style
 from colorama import Fore, Style
 
 
@@ -30,7 +32,6 @@ class FrameworkTest:
         self.notes = ""
         self.notes = ""
         self.port = ""
         self.port = ""
         self.versus = ""
         self.versus = ""
-        self.log = benchmarker.log
 
 
         self.__dict__.update(args)
         self.__dict__.update(args)
 
 
@@ -96,7 +97,7 @@ class FrameworkTest:
             with open(os.path.join(verificationPath, 'verification.txt'),
             with open(os.path.join(verificationPath, 'verification.txt'),
                       'w') as verification:
                       'w') as verification:
                 test = self.runTests[test_type]
                 test = self.runTests[test_type]
-                self.log("VERIFYING %s" % test_type.upper(),
+                log("VERIFYING %s" % test_type.upper(),
                     file=verification,
                     file=verification,
                     border='-',
                     border='-',
                     color=Fore.WHITE + Style.BRIGHT)
                     color=Fore.WHITE + Style.BRIGHT)
@@ -120,13 +121,13 @@ class FrameworkTest:
                 except ConnectionError as e:
                 except ConnectionError as e:
                     results = [('fail', "Server did not respond to request",
                     results = [('fail', "Server did not respond to request",
                                 base_url)]
                                 base_url)]
-                    self.log("Verifying test %s for %s caused an exception: %s" %
+                    log("Verifying test %s for %s caused an exception: %s" %
                         (test_type, self.name, e),
                         (test_type, self.name, e),
                         color=Fore.RED)
                         color=Fore.RED)
                 except Timeout as e:
                 except Timeout as e:
                     results = [('fail', "Connection to server timed out",
                     results = [('fail', "Connection to server timed out",
                                 base_url)]
                                 base_url)]
-                    self.log("Verifying test %s for %s caused an exception: %s" %
+                    log("Verifying test %s for %s caused an exception: %s" %
                         (test_type, self.name, e),
                         (test_type, self.name, e),
                         color=Fore.RED)
                         color=Fore.RED)
                 except Exception as e:
                 except Exception as e:
@@ -135,7 +136,7 @@ class FrameworkTest:
             but also that you have found a bug. Please submit an issue
             but also that you have found a bug. Please submit an issue
             including this message: %s\n%s""" % (e, traceback.format_exc()),
             including this message: %s\n%s""" % (e, traceback.format_exc()),
                                 base_url)]
                                 base_url)]
-                    self.log("Verifying test %s for %s caused an exception: %s" %
+                    log("Verifying test %s for %s caused an exception: %s" %
                         (test_type, self.name, e),
                         (test_type, self.name, e),
                         color=Fore.RED)
                         color=Fore.RED)
                     traceback.format_exc()
                     traceback.format_exc()
@@ -155,14 +156,14 @@ class FrameworkTest:
                     elif result.upper() == "FAIL":
                     elif result.upper() == "FAIL":
                         color = Fore.RED
                         color = Fore.RED
 
 
-                    self.log("   {!s}{!s}{!s} for {!s}".format(
+                    log("   {!s}{!s}{!s} for {!s}".format(
                         color, result.upper(), Style.RESET_ALL, url),
                         color, result.upper(), Style.RESET_ALL, url),
                         file=verification)
                         file=verification)
                     if reason is not None and len(reason) != 0:
                     if reason is not None and len(reason) != 0:
                         for line in reason.splitlines():
                         for line in reason.splitlines():
-                            self.log("     " + line, file=verification)
+                            log("     " + line, file=verification)
                         if not test.passed:
                         if not test.passed:
-                            self.log("     See {!s}".format(specific_rules_url),
+                            log("     See {!s}".format(specific_rules_url),
                                 file=verification)
                                 file=verification)
 
 
                 [output_result(r1, r2, url) for (r1, r2, url) in results]
                 [output_result(r1, r2, url) for (r1, r2, url) in results]

+ 1 - 2
toolset/benchmark/test_types/fortune_type.py

@@ -5,7 +5,6 @@ from toolset.benchmark.test_types.verifications import basic_body_verification,
 
 
 class FortuneTestType(FrameworkTestType):
 class FortuneTestType(FrameworkTestType):
     def __init__(self, config):
     def __init__(self, config):
-        self.config = config
         self.fortune_url = ""
         self.fortune_url = ""
         kwargs = {
         kwargs = {
             'name': 'fortune',
             'name': 'fortune',
@@ -33,7 +32,7 @@ class FortuneTestType(FrameworkTestType):
         if len(problems) > 0:
         if len(problems) > 0:
             return problems
             return problems
 
 
-        parser = FortuneHTMLParser(self.config)
+        parser = FortuneHTMLParser()
         parser.feed(body)
         parser.feed(body)
         (valid, diff) = parser.isValidFortune(self.name, body)
         (valid, diff) = parser.isValidFortune(self.name, body)
 
 

+ 11 - 10
toolset/benchmark/test_types/framework_test_type.py

@@ -1,4 +1,5 @@
 import copy
 import copy
+import sys
 import json
 import json
 import requests
 import requests
 import MySQLdb
 import MySQLdb
@@ -7,6 +8,7 @@ import pymongo
 import traceback
 import traceback
 
 
 from colorama import Fore
 from colorama import Fore
+from toolset.utils.output_helper import log
 
 
 
 
 class FrameworkTestType:
 class FrameworkTestType:
@@ -27,7 +29,6 @@ class FrameworkTestType:
                  accept_header=None,
                  accept_header=None,
                  args=[]):
                  args=[]):
         self.config = config
         self.config = config
-        self.log = config.log
         self.name = name
         self.name = name
         self.requires_db = requires_db
         self.requires_db = requires_db
         self.args = args
         self.args = args
@@ -73,7 +74,7 @@ class FrameworkTestType:
         Downloads a URL and returns the HTTP response headers
         Downloads a URL and returns the HTTP response headers
         and body content as a tuple
         and body content as a tuple
         '''
         '''
-        self.log("Accessing URL {!s}: ".format(url), color=Fore.CYAN)
+        log("Accessing URL {!s}: ".format(url), color=Fore.CYAN)
 
 
         headers = {'Accept': self.accept_header}
         headers = {'Accept': self.accept_header}
         r = requests.get(url, timeout=15, headers=headers)
         r = requests.get(url, timeout=15, headers=headers)
@@ -83,8 +84,8 @@ class FrameworkTestType:
         return self.headers, self.body
         return self.headers, self.body
 
 
     def output_headers_and_body(self):
     def output_headers_and_body(self):
-        self.log(str(self.headers))
-        self.log(self.body)
+        log(str(self.headers))
+        log(self.body)
 
 
     def verify(self, base_url):
     def verify(self, base_url):
         '''
         '''
@@ -160,9 +161,9 @@ class FrameworkTestType:
                 db.close()
                 db.close()
             except Exception:
             except Exception:
                 tb = traceback.format_exc()
                 tb = traceback.format_exc()
-                self.log("ERROR: Unable to load current MySQL World table.",
+                log("ERROR: Unable to load current MySQL World table.",
                     color=Fore.RED)
                     color=Fore.RED)
-                self.log(tb)
+                log(tb)
         elif database_name == "postgres":
         elif database_name == "postgres":
             try:
             try:
                 db = psycopg2.connect(
                 db = psycopg2.connect(
@@ -182,9 +183,9 @@ class FrameworkTestType:
                 db.close()
                 db.close()
             except Exception:
             except Exception:
                 tb = traceback.format_exc()
                 tb = traceback.format_exc()
-                self.log("ERROR: Unable to load current Postgres World table.",
+                log("ERROR: Unable to load current Postgres World table.",
                     color=Fore.RED)
                     color=Fore.RED)
-                self.log(tb)
+                log(tb)
         elif database_name == "mongodb":
         elif database_name == "mongodb":
             try:
             try:
                 worlds_json = {}
                 worlds_json = {}
@@ -204,9 +205,9 @@ class FrameworkTestType:
                 connection.close()
                 connection.close()
             except Exception:
             except Exception:
                 tb = traceback.format_exc()
                 tb = traceback.format_exc()
-                self.log("ERROR: Unable to load current MongoDB World table.",
+                log("ERROR: Unable to load current MongoDB World table.",
                     color=Fore.RED)
                     color=Fore.RED)
-                self.log(tb)
+                log(tb)
         else:
         else:
             raise ValueError(
             raise ValueError(
                 "Database: {!s} does not exist".format(database_name))
                 "Database: {!s} does not exist".format(database_name))

+ 3 - 2
toolset/benchmark/test_types/verifications.py

@@ -3,7 +3,7 @@ import re
 import traceback
 import traceback
 
 
 from datetime import datetime
 from datetime import datetime
-from toolset.utils.output_helper import Logger
+from toolset.utils.output_helper import log
 from time import sleep
 from time import sleep
 
 
 def basic_body_verification(body, url, is_json_check=True):
 def basic_body_verification(body, url, is_json_check=True):
@@ -71,6 +71,7 @@ def verify_headers(request_headers_and_body, headers, url, should_be='json'):
     # Make sure that the date object isn't cached
     # Make sure that the date object isn't cached
     sleep(3)
     sleep(3)
     second_headers, body2 = request_headers_and_body(url)
     second_headers, body2 = request_headers_and_body(url)
+    second_date = second_headers.get('Date')
 
 
     date2 = second_headers.get('Date')
     date2 = second_headers.get('Date')
     if date == date2:
     if date == date2:
@@ -284,7 +285,7 @@ def verify_updates(old_worlds, new_worlds, updates_expected, url):
                         successful_updates += 1
                         successful_updates += 1
             except Exception:
             except Exception:
                 tb = traceback.format_exc()
                 tb = traceback.format_exc()
-                Logger.log(tb, squash=False)
+                log(tb)
         n += 1
         n += 1
 
 
     if successful_updates == 0:
     if successful_updates == 0:

+ 4 - 3
toolset/run-tests.py

@@ -8,6 +8,7 @@ from toolset.utils.scaffolding import Scaffolding
 from toolset.utils.audit import Audit
 from toolset.utils.audit import Audit
 from toolset.utils import cleaner
 from toolset.utils import cleaner
 from toolset.utils.benchmark_config import BenchmarkConfig
 from toolset.utils.benchmark_config import BenchmarkConfig
+from toolset.utils.output_helper import log
 
 
 # Enable cross-platform colored output
 # Enable cross-platform colored output
 from colorama import init, Fore
 from colorama import init, Fore
@@ -211,7 +212,7 @@ def main(argv=None):
             all_tests = benchmarker.metadata.gather_tests()
             all_tests = benchmarker.metadata.gather_tests()
 
 
             for test in all_tests:
             for test in all_tests:
-                config.log(test.name)
+                log(test.name)
 
 
         elif config.parse:
         elif config.parse:
             all_tests = benchmarker.metadata.gather_tests()
             all_tests = benchmarker.metadata.gather_tests()
@@ -227,8 +228,8 @@ def main(argv=None):
                 return any_failed
                 return any_failed
     except Exception:
     except Exception:
         tb = traceback.format_exc()
         tb = traceback.format_exc()
-        config.log("A fatal error has occurred", color=Fore.RED)
-        config.log(tb)
+        log("A fatal error has occurred", color=Fore.RED)
+        log(tb)
         # try one last time to stop docker containers on fatal error
         # try one last time to stop docker containers on fatal error
         try:
         try:
             benchmarker.stop()
             benchmarker.stop()

+ 5 - 5
toolset/utils/audit.py

@@ -1,3 +1,4 @@
+from toolset.utils.output_helper import log
 from colorama import Fore
 from colorama import Fore
 
 
 
 
@@ -8,7 +9,6 @@ class Audit:
 
 
     def __init__(self, benchmarker):
     def __init__(self, benchmarker):
         self.benchmarker = benchmarker
         self.benchmarker = benchmarker
-        self.log = benchmarker.config.log
 
 
     def start_audit(self):
     def start_audit(self):
         for lang in self.benchmarker.metadata.gather_languages():
         for lang in self.benchmarker.metadata.gather_languages():
@@ -18,13 +18,13 @@ class Audit:
 
 
     def audit_test_dir(self, test_dir):
     def audit_test_dir(self, test_dir):
         warnings = 0
         warnings = 0
-        self.log('Auditing %s:' % test_dir, color=Fore.BLUE)
+        log('Auditing %s:' % test_dir, color=Fore.BLUE)
 
 
         if not self.benchmarker.metadata.has_file(test_dir, 'README.md'):
         if not self.benchmarker.metadata.has_file(test_dir, 'README.md'):
-            self.log('README.md file is missing')
+            log('README.md file is missing')
             warnings += 1
             warnings += 1
 
 
         if warnings:
         if warnings:
-            self.log('(%s) warning(s)' % warnings, color=Fore.YELLOW)
+            log('(%s) warning(s)' % warnings, color=Fore.YELLOW)
         else:
         else:
-            self.log('No problems to report', color=Fore.GREEN)
+            log('No problems to report', color=Fore.GREEN)

+ 1 - 3
toolset/utils/benchmark_config.py

@@ -1,5 +1,5 @@
 from toolset.benchmark.test_types import *
 from toolset.benchmark.test_types import *
-from toolset.utils.output_helper import Logger, QuietOutputStream
+from toolset.utils.output_helper import QuietOutputStream
 
 
 import os
 import os
 import time
 import time
@@ -11,8 +11,6 @@ class BenchmarkConfig:
         Configures this BenchmarkConfig given the arguments provided.
         Configures this BenchmarkConfig given the arguments provided.
         '''
         '''
 
 
-        self.log = Logger().log
-
         # Map type strings to their objects
         # Map type strings to their objects
         types = dict()
         types = dict()
         types['json'] = JsonTestType(self)
         types['json'] = JsonTestType(self)

+ 11 - 17
toolset/utils/docker_helper.py

@@ -8,6 +8,7 @@ import traceback
 from threading import Thread
 from threading import Thread
 from colorama import Fore, Style
 from colorama import Fore, Style
 
 
+from toolset.utils.output_helper import log
 from toolset.utils.database_helper import test_database
 from toolset.utils.database_helper import test_database
 
 
 from psutil import virtual_memory
 from psutil import virtual_memory
@@ -18,7 +19,6 @@ mem_limit = int(round(virtual_memory().total * .95))
 class DockerHelper:
 class DockerHelper:
     def __init__(self, benchmarker=None):
     def __init__(self, benchmarker=None):
         self.benchmarker = benchmarker
         self.benchmarker = benchmarker
-        self.log = benchmarker.log
 
 
         self.client = docker.DockerClient(
         self.client = docker.DockerClient(
             base_url=self.benchmarker.config.client_docker_host)
             base_url=self.benchmarker.config.client_docker_host)
@@ -59,7 +59,7 @@ class DockerHelper:
                         index = buffer.index("\n")
                         index = buffer.index("\n")
                         line = buffer[:index]
                         line = buffer[:index]
                         buffer = buffer[index + 1:]
                         buffer = buffer[index + 1:]
-                        self.log(line,
+                        log(line,
                             prefix=log_prefix,
                             prefix=log_prefix,
                             file=build_log,
                             file=build_log,
                             color=Fore.WHITE + Style.BRIGHT \
                             color=Fore.WHITE + Style.BRIGHT \
@@ -67,28 +67,25 @@ class DockerHelper:
                     # Kill docker builds if they exceed 60 mins. This will only
                     # Kill docker builds if they exceed 60 mins. This will only
                     # catch builds that are still printing output.
                     # catch builds that are still printing output.
                     if self.benchmarker.time_logger.time_since_start() > 3600:
                     if self.benchmarker.time_logger.time_since_start() > 3600:
-                        self.log("Build time exceeded 60 minutes",
+                        log("Build time exceeded 60 minutes",
                             prefix=log_prefix,
                             prefix=log_prefix,
                             file=build_log,
                             file=build_log,
                             color=Fore.RED)
                             color=Fore.RED)
                         raise Exception
                         raise Exception
 
 
                 if buffer:
                 if buffer:
-                    self.log(buffer,
+                    log(buffer,
                         prefix=log_prefix,
                         prefix=log_prefix,
                         file=build_log,
                         file=build_log,
                         color=Fore.WHITE + Style.BRIGHT \
                         color=Fore.WHITE + Style.BRIGHT \
                             if re.match(r'^Step \d+\/\d+', buffer) else '')
                             if re.match(r'^Step \d+\/\d+', buffer) else '')
             except Exception:
             except Exception:
                 tb = traceback.format_exc()
                 tb = traceback.format_exc()
-                self.log("Docker build failed; terminating",
+                log("Docker build failed; terminating",
                     prefix=log_prefix,
                     prefix=log_prefix,
                     file=build_log,
                     file=build_log,
                     color=Fore.RED)
                     color=Fore.RED)
-                self.log(tb,
-                    squash=False,
-                    prefix=log_prefix,
-                    file=build_log)
+                log(tb, prefix=log_prefix, file=build_log)
                 self.benchmarker.time_logger.log_build_end(
                 self.benchmarker.time_logger.log_build_end(
                     log_prefix=log_prefix, file=build_log)
                     log_prefix=log_prefix, file=build_log)
                 raise
                 raise
@@ -173,7 +170,7 @@ class DockerHelper:
                             run_log_dir, "%s.log" % docker_file.replace(
                             run_log_dir, "%s.log" % docker_file.replace(
                                 ".dockerfile", "").lower()), 'w') as run_log:
                                 ".dockerfile", "").lower()), 'w') as run_log:
                     for line in docker_container.logs(stream=True):
                     for line in docker_container.logs(stream=True):
-                        self.log(line, prefix=log_prefix, file=run_log)
+                        log(line, prefix=log_prefix, file=run_log)
 
 
             extra_hosts = None
             extra_hosts = None
             name = "tfb-server"
             name = "tfb-server"
@@ -236,14 +233,11 @@ class DockerHelper:
                     os.path.join(run_log_dir, "%s.log" % test.name.lower()),
                     os.path.join(run_log_dir, "%s.log" % test.name.lower()),
                     'w') as run_log:
                     'w') as run_log:
                 tb = traceback.format_exc()
                 tb = traceback.format_exc()
-                self.log("Running docker container: %s.dockerfile failed" %
+                log("Running docker container: %s.dockerfile failed" %
                     test.name,
                     test.name,
                     prefix=log_prefix,
                     prefix=log_prefix,
                     file=run_log)
                     file=run_log)
-                self.log(tb,
-                    squash=False,
-                    prefix=log_prefix,
-                    file=run_log)
+                log(tb, prefix=log_prefix, file=run_log)
 
 
         return container
         return container
 
 
@@ -348,7 +342,7 @@ class DockerHelper:
             database_ready = test_database(self.benchmarker.config, database)
             database_ready = test_database(self.benchmarker.config, database)
 
 
         if not database_ready:
         if not database_ready:
-            self.log("Database was not ready after startup", prefix=log_prefix)
+            log("Database was not ready after startup", prefix=log_prefix)
 
 
         return container
         return container
 
 
@@ -400,7 +394,7 @@ class DockerHelper:
         def watch_container(container):
         def watch_container(container):
             with open(raw_file, 'w') as benchmark_file:
             with open(raw_file, 'w') as benchmark_file:
                 for line in container.logs(stream=True):
                 for line in container.logs(stream=True):
-                    self.log(line, file=benchmark_file)
+                    log(line, file=benchmark_file)
 
 
         sysctl = {'net.core.somaxconn': 65535}
         sysctl = {'net.core.somaxconn': 65535}
 
 

+ 4 - 5
toolset/utils/metadata.py

@@ -4,6 +4,7 @@ import json
 
 
 from collections import OrderedDict
 from collections import OrderedDict
 
 
+from toolset.utils.output_helper import log
 from colorama import Fore
 from colorama import Fore
 
 
 
 
@@ -19,7 +20,6 @@ class Metadata:
 
 
     def __init__(self, benchmarker=None):
     def __init__(self, benchmarker=None):
         self.benchmarker = benchmarker
         self.benchmarker = benchmarker
-        self.log = benchmarker.log
 
 
     def gather_languages(self):
     def gather_languages(self):
         '''
         '''
@@ -99,8 +99,7 @@ class Metadata:
                 try:
                 try:
                     config = json.load(config_file)
                     config = json.load(config_file)
                 except ValueError:
                 except ValueError:
-                    self.log("Error loading config: {!s}".format(config_file_name),
-                        squash=False,
+                    log("Error loading config: {!s}".format(config_file_name),
                         color=Fore.RED)
                         color=Fore.RED)
                     raise Exception("Error loading config file")
                     raise Exception("Error loading config file")
 
 
@@ -183,7 +182,7 @@ class Metadata:
             tests_to_run = [name for (name, keys) in test.iteritems()]
             tests_to_run = [name for (name, keys) in test.iteritems()]
 
 
             if "default" not in tests_to_run:
             if "default" not in tests_to_run:
-                self.log("Framework %s does not define a default test in benchmark_config.json"
+                log("Framework %s does not define a default test in benchmark_config.json"
                     % config['framework'],
                     % config['framework'],
                     color=Fore.YELLOW)
                     color=Fore.YELLOW)
 
 
@@ -206,7 +205,7 @@ class Metadata:
                         # This is quite common - most tests don't support all types
                         # This is quite common - most tests don't support all types
                         # Quitely log it and move on (debug logging is on in travis and this causes
                         # Quitely log it and move on (debug logging is on in travis and this causes
                         # ~1500 lines of debug, so I'm totally ignoring it for now
                         # ~1500 lines of debug, so I'm totally ignoring it for now
-                        # self.log("Missing arguments for test type %s for framework test %s" % (type_name, test_name))
+                        # log("Missing arguments for test type %s for framework test %s" % (type_name, test_name))
                         pass
                         pass
 
 
                 # We need to sort by test_type to run
                 # We need to sort by test_type to run

+ 38 - 83
toolset/utils/output_helper.py

@@ -15,100 +15,55 @@ FNULL = open(os.devnull, 'w')
 # message endlessly anyway.
 # message endlessly anyway.
 TOO_MANY_BYTES = 50 * 1024 * 1024
 TOO_MANY_BYTES = 50 * 1024 * 1024
 
 
-class Logger:
+
+def log(log_text=None, **kwargs):
     '''
     '''
     Logs the given text and optional prefix to stdout (if quiet is False) and
     Logs the given text and optional prefix to stdout (if quiet is False) and
-    to an optional log file. By default, we strip out newlines in order to
+    to an optional log file. By default, we strip out newlines in order to 
     print our lines correctly, but you can override this functionality if you
     print our lines correctly, but you can override this functionality if you
     want to print multi-line output.
     want to print multi-line output.
     '''
     '''
 
 
-    def __init__(self):
-        self.fileLogger = FileLogger()
-
-    def log(self, log_text=None, squash=True, **kwargs):
-        # set up some defaults
-        color = kwargs.get('color', '')
-        color_reset = Style.RESET_ALL if color else ''
-        prefix = kwargs.get('prefix', '')
-        border = kwargs.get('border')
-        border_bottom = kwargs.get('border_bottom')
-        file = kwargs.get('file')
-        quiet = kwargs.get('quiet')
-
-        if border is not None:
-            border = color + (border * 80) + os.linesep + color_reset
-            border_bottom = border if border_bottom is None else \
-                color + (border_bottom * 80) + os.linesep + color_reset
-        elif not log_text:
-            return
-
-        try:
-            new_log_text = border or ''
-            for line in log_text.splitlines():
-                if line.strip() is not '':
-                    if prefix:
-                        new_log_text += Style.DIM + prefix + Style.RESET_ALL
-                    new_log_text += color + line + color_reset + os.linesep
-            new_log_text += border_bottom or ''
-
-            if not quiet:
-                sys.stdout.write(Style.RESET_ALL + new_log_text)
-                sys.stdout.flush()
-
-            if file is not None:
-                self.fileLogger.log(file, new_log_text, squash)
-
-        except:
-            pass
-
-class FileLogger:
-    '''
-    Logs text to a file
-    '''
-
-    def __init__(self):
-        self.prev_text_count = 0
-        self.prev_text = ''
-
-    def write_to_file(self, file, text):
-        if os.fstat(file.fileno()).st_size < TOO_MANY_BYTES:
-            file.write(seq.sub('', text))
+    # set up some defaults
+    color = kwargs.get('color', '')
+    color_reset = Style.RESET_ALL if color else ''
+    prefix = kwargs.get('prefix', '')
+    border = kwargs.get('border')
+    border_bottom = kwargs.get('border_bottom')
+    file = kwargs.get('file')
+    quiet = kwargs.get('quiet')
+
+    if border is not None:
+        border = color + (border * 80) + os.linesep + color_reset
+        border_bottom = border if border_bottom is None else \
+            color + (border_bottom * 80) + os.linesep + color_reset
+    elif not log_text:
+        return
+
+    try:
+        new_log_text = border or ''
+        for line in log_text.splitlines():
+            if line.strip() is not '':
+                if prefix:
+                    new_log_text += Style.DIM + prefix + Style.RESET_ALL
+                new_log_text += color + line + color_reset + os.linesep
+        new_log_text += border_bottom or ''
+
+        if not quiet:
+            sys.stdout.write(Style.RESET_ALL + new_log_text)
+            sys.stdout.flush()
+
+        if file is not None and os.fstat(
+                file.fileno()).st_size < TOO_MANY_BYTES:
+            file.write(seq.sub('', new_log_text))
             file.flush()
             file.flush()
+    except:
+        pass
 
 
-    def write_prev_text(self, file):
-        text = self.prev_text
-        if self.prev_text_count > 1:
-            text = '[%s]: %s' % (self.prev_text_count, self.prev_text)
-        self.write_to_file(file, text)
-
-    def log(self, file, text, squash):
-        if not squash:
-            # If we're not squashing make sure there's no prev text
-            # to flush out
-            if self.prev_text_count > 0:
-                self.write_prev_text(file)
-                self.prev_text_count = 0
-                self.prev_text = ''
-            # Then write the text we're not squashing
-            self.write_to_file(file, text)
-        # If we have matching lines, increase the counter without
-        # writing anything to file
-        elif self.prev_text and self.prev_text == text:
-            self.prev_text_count += 1
-        # If we get here, we don't have matching lines. Write the
-        # previous text and store the current text
-        elif self.prev_text_count > 0:
-            self.write_prev_text(file)
-            self.prev_text = text
-            self.prev_text_count = 1
-        else:
-            self.prev_text = text
-            self.prev_text_count = 1
 
 
 class QuietOutputStream:
 class QuietOutputStream:
     '''
     '''
-    Provides an output stream which either writes to stdout or nothing
+    Provides an output stream which either writes to stdout or nothing 
     depending on the is_quiet param.
     depending on the is_quiet param.
     '''
     '''
 
 

+ 14 - 12
toolset/utils/results.py

@@ -1,3 +1,5 @@
+from toolset.utils.output_helper import log
+
 import os
 import os
 import subprocess
 import subprocess
 import uuid
 import uuid
@@ -8,6 +10,7 @@ import threading
 import re
 import re
 import math
 import math
 import csv
 import csv
+import traceback
 from datetime import datetime
 from datetime import datetime
 
 
 # Cross-platform colored text
 # Cross-platform colored text
@@ -20,7 +23,6 @@ class Results:
         Constructor
         Constructor
         '''
         '''
         self.benchmarker = benchmarker
         self.benchmarker = benchmarker
-        self.log = benchmarker.log
         self.config = benchmarker.config
         self.config = benchmarker.config
         self.directory = os.path.join(self.config.results_root,
         self.directory = os.path.join(self.config.results_root,
                                       self.config.timestamp)
                                       self.config.timestamp)
@@ -205,7 +207,7 @@ class Results:
                     headers={'Content-Type': 'application/json'},
                     headers={'Content-Type': 'application/json'},
                     data=json.dumps(self.__to_jsonable(), indent=2))
                     data=json.dumps(self.__to_jsonable(), indent=2))
             except Exception:
             except Exception:
-                self.log("Error uploading results.json")
+                log("Error uploading results.json")
 
 
     def load(self):
     def load(self):
         '''
         '''
@@ -282,12 +284,12 @@ class Results:
             # Normally you don't have to use Fore.BLUE before each line, but
             # Normally you don't have to use Fore.BLUE before each line, but
             # Travis-CI seems to reset color codes on newline (see travis-ci/travis-ci#2692)
             # Travis-CI seems to reset color codes on newline (see travis-ci/travis-ci#2692)
             # or stream flush, so we have to ensure that the color code is printed repeatedly
             # or stream flush, so we have to ensure that the color code is printed repeatedly
-            self.log("Verification Summary",
+            log("Verification Summary",
                 border='=',
                 border='=',
                 border_bottom='-',
                 border_bottom='-',
                 color=Fore.CYAN)
                 color=Fore.CYAN)
             for test in self.benchmarker.tests:
             for test in self.benchmarker.tests:
-                self.log(Fore.CYAN + "| {!s}".format(test.name))
+                log(Fore.CYAN + "| {!s}".format(test.name))
                 if test.name in self.verify.keys():
                 if test.name in self.verify.keys():
                     for test_type, result in self.verify[
                     for test_type, result in self.verify[
                             test.name].iteritems():
                             test.name].iteritems():
@@ -297,14 +299,14 @@ class Results:
                             color = Fore.YELLOW
                             color = Fore.YELLOW
                         else:
                         else:
                             color = Fore.RED
                             color = Fore.RED
-                        self.log(Fore.CYAN + "|       " + test_type.ljust(13) +
+                        log(Fore.CYAN + "|       " + test_type.ljust(13) +
                             ' : ' + color + result.upper())
                             ' : ' + color + result.upper())
                 else:
                 else:
-                    self.log(Fore.CYAN + "|      " + Fore.RED +
+                    log(Fore.CYAN + "|      " + Fore.RED +
                         "NO RESULTS (Did framework launch?)")
                         "NO RESULTS (Did framework launch?)")
-            self.log('', border='=', border_bottom='', color=Fore.CYAN)
+            log('', border='=', border_bottom='', color=Fore.CYAN)
 
 
-        self.log("Results are saved in " + self.directory)
+        log("Results are saved in " + self.directory)
 
 
     #############################################################################
     #############################################################################
     # PRIVATE FUNCTIONS
     # PRIVATE FUNCTIONS
@@ -341,7 +343,7 @@ class Results:
             with open(self.file, 'w') as f:
             with open(self.file, 'w') as f:
                 f.write(json.dumps(self.__to_jsonable(), indent=2))
                 f.write(json.dumps(self.__to_jsonable(), indent=2))
         except IOError:
         except IOError:
-            self.log("Error writing results.json")
+            log("Error writing results.json")
 
 
     def __count_sloc(self):
     def __count_sloc(self):
         '''
         '''
@@ -362,15 +364,15 @@ class Results:
             # one file listed.
             # one file listed.
             command = "cloc --yaml --follow-links . | grep code | tail -1 | cut -d: -f 2"
             command = "cloc --yaml --follow-links . | grep code | tail -1 | cut -d: -f 2"
 
 
-            self.log("Running \"%s\" (cwd=%s)" % (command, wd))
+            log("Running \"%s\" (cwd=%s)" % (command, wd))
             try:
             try:
                 line_count = int(subprocess.check_output(command, cwd=wd, shell=True))
                 line_count = int(subprocess.check_output(command, cwd=wd, shell=True))
             except (subprocess.CalledProcessError, ValueError) as e:
             except (subprocess.CalledProcessError, ValueError) as e:
-                self.log("Unable to count lines of code for %s due to error '%s'" %
+                log("Unable to count lines of code for %s due to error '%s'" %
                     (framework, e))
                     (framework, e))
                 continue
                 continue
 
 
-            self.log("Counted %s lines of code" % line_count)
+            log("Counted %s lines of code" % line_count)
             framework_to_count[framework] = line_count
             framework_to_count[framework] = line_count
 
 
         self.rawData['slocCounts'] = framework_to_count
         self.rawData['slocCounts'] = framework_to_count

+ 15 - 14
toolset/utils/time_logger.py

@@ -1,6 +1,8 @@
 import time
 import time
 from colorama import Fore
 from colorama import Fore
 
 
+from toolset.utils.output_helper import log
+
 
 
 class TimeLogger:
 class TimeLogger:
     '''
     '''
@@ -8,10 +10,9 @@ class TimeLogger:
     for suite actions
     for suite actions
     '''
     '''
 
 
-    def __init__(self, config):
-        self.log = config.log
-
+    def __init__(self):
         self.start = time.time()
         self.start = time.time()
+
         self.benchmarking_start = 0
         self.benchmarking_start = 0
         self.benchmarking_total = 0
         self.benchmarking_total = 0
         self.database_starting = 0
         self.database_starting = 0
@@ -46,7 +47,7 @@ class TimeLogger:
         self.database_started = int(time.time() - self.database_starting)
         self.database_started = int(time.time() - self.database_starting)
 
 
     def log_database_start_time(self, log_prefix, file):
     def log_database_start_time(self, log_prefix, file):
-        self.log("Time starting database: %s" % TimeLogger.output(
+        log("Time starting database: %s" % TimeLogger.output(
             self.database_started),
             self.database_started),
             prefix=log_prefix,
             prefix=log_prefix,
             file=file,
             file=file,
@@ -58,7 +59,7 @@ class TimeLogger:
     def log_benchmarking_end(self, log_prefix, file):
     def log_benchmarking_end(self, log_prefix, file):
         total = int(time.time() - self.benchmarking_start)
         total = int(time.time() - self.benchmarking_start)
         self.benchmarking_total = self.benchmarking_total + total
         self.benchmarking_total = self.benchmarking_total + total
-        self.log("Benchmarking time: %s" % TimeLogger.output(total),
+        log("Benchmarking time: %s" % TimeLogger.output(total),
             prefix=log_prefix,
             prefix=log_prefix,
             file=file,
             file=file,
             color=Fore.YELLOW)
             color=Fore.YELLOW)
@@ -74,11 +75,11 @@ class TimeLogger:
         self.build_total = self.build_total + total
         self.build_total = self.build_total + total
         log_str = "Build time: %s" % TimeLogger.output(total)
         log_str = "Build time: %s" % TimeLogger.output(total)
         self.build_logs.append({'log_prefix': log_prefix, 'str': log_str})
         self.build_logs.append({'log_prefix': log_prefix, 'str': log_str})
-        self.log(log_str, prefix=log_prefix, file=file, color=Fore.YELLOW)
+        log(log_str, prefix=log_prefix, file=file, color=Fore.YELLOW)
 
 
     def log_build_flush(self, file):
     def log_build_flush(self, file):
         for b_log in self.build_logs:
         for b_log in self.build_logs:
-            self.log(b_log['str'],
+            log(b_log['str'],
                 prefix=b_log['log_prefix'],
                 prefix=b_log['log_prefix'],
                 file=file,
                 file=file,
                 color=Fore.YELLOW)
                 color=Fore.YELLOW)
@@ -91,7 +92,7 @@ class TimeLogger:
         self.accepting_requests = int(time.time() - self.test_started)
         self.accepting_requests = int(time.time() - self.test_started)
 
 
     def log_test_accepting_requests(self, log_prefix, file):
     def log_test_accepting_requests(self, log_prefix, file):
-        self.log("Time until accepting requests: %s" % TimeLogger.output(
+        log("Time until accepting requests: %s" % TimeLogger.output(
             self.accepting_requests),
             self.accepting_requests),
             prefix=log_prefix,
             prefix=log_prefix,
             file=file,
             file=file,
@@ -102,28 +103,28 @@ class TimeLogger:
 
 
     def log_test_end(self, log_prefix, file):
     def log_test_end(self, log_prefix, file):
         total = int(time.time() - self.test_start)
         total = int(time.time() - self.test_start)
-        self.log("Total test time: %s" % TimeLogger.output(total),
+        log("Total test time: %s" % TimeLogger.output(total),
             prefix=log_prefix,
             prefix=log_prefix,
             file=file,
             file=file,
             color=Fore.YELLOW)
             color=Fore.YELLOW)
-        self.log("Total time building so far: %s" % TimeLogger.output(
+        log("Total time building so far: %s" % TimeLogger.output(
             self.build_total),
             self.build_total),
             prefix="tfb: ",
             prefix="tfb: ",
             file=file,
             file=file,
             color=Fore.YELLOW)
             color=Fore.YELLOW)
-        self.log("Total time verifying so far: %s" % TimeLogger.output(
+        log("Total time verifying so far: %s" % TimeLogger.output(
             self.verify_total),
             self.verify_total),
             prefix="tfb: ",
             prefix="tfb: ",
             file=file,
             file=file,
             color=Fore.YELLOW)
             color=Fore.YELLOW)
         if self.benchmarking_total > 0:
         if self.benchmarking_total > 0:
-            self.log("Total time benchmarking so far: %s" % TimeLogger.output(
+            log("Total time benchmarking so far: %s" % TimeLogger.output(
                 self.benchmarking_total),
                 self.benchmarking_total),
                 prefix="tfb: ",
                 prefix="tfb: ",
                 file=file,
                 file=file,
                 color=Fore.YELLOW)
                 color=Fore.YELLOW)
         running_time = int(time.time() - self.start)
         running_time = int(time.time() - self.start)
-        self.log("Total execution time so far: %s" %
+        log("Total execution time so far: %s" %
             TimeLogger.output(running_time),
             TimeLogger.output(running_time),
             prefix="tfb: ",
             prefix="tfb: ",
             file=file,
             file=file,
@@ -135,7 +136,7 @@ class TimeLogger:
     def log_verify_end(self, log_prefix, file):
     def log_verify_end(self, log_prefix, file):
         total = int(time.time() - self.verify_start)
         total = int(time.time() - self.verify_start)
         self.verify_total = self.verify_total + total
         self.verify_total = self.verify_total + total
-        self.log("Verify time: %s" % TimeLogger.output(total),
+        log("Verify time: %s" % TimeLogger.output(total),
             prefix=log_prefix,
             prefix=log_prefix,
             file=file,
             file=file,
             color=Fore.YELLOW)
             color=Fore.YELLOW)