Sfoglia il codice sorgente

Docker logging multiple build lines (#3432)

Nate 7 anni fa
parent
commit
ad41d917aa

+ 28 - 28
toolset/benchmark/benchmarker.py

@@ -1,4 +1,4 @@
-from toolset.utils.output_helper import header, log, log_error, FNULL
+from toolset.utils.output_helper import log, FNULL
 from toolset.utils.metadata_helper import gather_tests, gather_remaining_tests
 from toolset.utils import docker_helper
 
@@ -11,6 +11,8 @@ import json
 import shlex
 from pprint import pprint
 
+from colorama import Fore
+
 
 class Benchmarker:
     def __init__(self, config, results):
@@ -38,8 +40,8 @@ class Benchmarker:
         all_tests = gather_remaining_tests(self.config, self.results)
 
         # Setup client/server
-        header(
-            "Preparing Server, Database, and Client ...", top='=', bottom='=')
+        log(
+            "Preparing Server, Database, and Client ...", border='=')
         with self.config.quiet_out.enable():
             self.__setup_server()
             self.__setup_database()
@@ -47,11 +49,11 @@ class Benchmarker:
 
         # Run tests
         success = True
-        header("Running Tests...", top='=', bottom='=')
+        log("Running Tests...", border='=')
         with open(os.path.join(self.results.directory, 'benchmark.log'),
                   'w') as benchmark_log:
             for test in all_tests:
-                header("Running Test: %s" % test.name)
+                log("Running Test: %s" % test.name, border='-')
                 with self.config.quiet_out.enable():
                     success = self.__run_test(test, benchmark_log) and success
                 # Load intermediate result from child process
@@ -59,7 +61,7 @@ class Benchmarker:
 
         # Parse results
         if self.config.mode == "benchmark":
-            header("Parsing Results ...", top='=', bottom='=')
+            log("Parsing Results ...", border='=')
             self.results.parse(all_tests)
 
         self.results.set_completion_time()
@@ -222,8 +224,9 @@ class Benchmarker:
                 # We gave it our all
                 self.results.write_intermediate(test.name, "port " + str(
                     test.port) + " is not available before start")
-                header("Error: Port %s is not available, cannot start %s" %
-                       (test.port, test.name), log_prefix, benchmark_log)
+                log("Error: Port %s is not available, cannot start %s" %
+                       (test.port, test.name),
+                    prefix=log_prefix, file=benchmark_log, color=Fore.RED)
                 return False
 
             # Start database container
@@ -234,7 +237,7 @@ class Benchmarker:
                     self.results.write_intermediate(test.name,
                                                     "ERROR: Problem starting")
                     log("ERROR: Problem building/running database container",
-                        log_prefix, benchmark_log)
+                        prefix=log_prefix, file=benchmark_log, color=Fore.RED)
                     return False
 
             # Start webapp
@@ -244,8 +247,7 @@ class Benchmarker:
                 self.results.write_intermediate(test.name,
                                                 "ERROR: Problem starting")
                 log("ERROR: Problem starting {name}".format(name=test.name),
-                    log_prefix,
-                    benchmark_log)
+                    prefix=log_prefix, file=benchmark_log, color=Fore.RED)
                 return False
 
             slept = 0
@@ -256,7 +258,7 @@ class Benchmarker:
                     docker_helper.stop(self.config, database_container_id,
                                        test)
                     log("ERROR: One or more expected docker container exited early",
-                        log_prefix, benchmark_log)
+                        prefix=log_prefix, file=benchmark_log, color=Fore.RED)
                     return False
                 time.sleep(1)
                 slept += 1
@@ -264,19 +266,18 @@ class Benchmarker:
             # Debug mode blocks execution here until ctrl+c
             if self.config.mode == "debug":
                 log("Entering debug mode. Server has started. CTRL-c to stop.",
-                    log_prefix, benchmark_log)
+                    prefix=log_prefix, file=benchmark_log, color=Fore.YELLOW)
                 while True:
                     time.sleep(1)
 
             # Verify URLs
-            log("Verifying framework URLs", log_prefix)
+            log("Verifying framework URLs", prefix=log_prefix)
             passed_verify = test.verify_urls()
 
             # Benchmark this test
             if self.config.mode == "benchmark":
-                header(
-                    message="Benchmarking %s" % test.name,
-                    log_file=benchmark_log)
+                log("Benchmarking %s" % test.name,
+                    file=benchmark_log, border='-')
                 self.__benchmark(test, benchmark_log)
 
             # Stop this test
@@ -290,9 +291,8 @@ class Benchmarker:
                     stderr=benchmark_log,
                     stdout=benchmark_log)
             except Exception:
-                header(
-                    message="Error: Could not empty /tmp",
-                    log_file=benchmark_log)
+                log("Error: Could not empty /tmp",
+                    file=benchmark_log, color=Fore.RED)
 
             # Save results thus far into the latest results directory
             self.results.write_intermediate(test.name,
@@ -304,7 +304,8 @@ class Benchmarker:
             self.results.upload()
 
             if self.config.mode == "verify" and not passed_verify:
-                log("Failed verify!", log_prefix, benchmark_log)
+                log("Failed verify!",
+                    prefix=log_prefix, file=benchmark_log, color=Fore.RED)
                 return False
         except KeyboardInterrupt:
             docker_helper.stop(self.config, database_container_id, test)
@@ -312,10 +313,9 @@ class Benchmarker:
             tb = traceback.format_exc()
             self.results.write_intermediate(
                 test.name, "error during test setup: " + str(e))
-            header(
-                message="Subprocess Error %s" % test.name,
-                log_file=benchmark_log)
-            log_error(tb, log_prefix, benchmark_log)
+            log("Subprocess Error %s" % test.name,
+                file=benchmark_log, border='-', color=Fore.RED)
+            log(tb, prefix=log_prefix, file=benchmark_log)
             return False
 
         return True
@@ -326,7 +326,7 @@ class Benchmarker:
         '''
 
         def benchmark_type(test_type):
-            log("BENCHMARKING %s ... " % test_type.upper(), benchmark_log)
+            log("BENCHMARKING %s ... " % test_type.upper(), file=benchmark_log)
 
             test = framework_test.runTests[test_type]
             test.setup_out(benchmark_log)
@@ -358,13 +358,13 @@ class Benchmarker:
                 self.__end_logging()
 
             results = self.results.parse_test(framework_test, test_type)
-            log("Benchmark results:", benchmark_log)
+            log("Benchmark results:", file=benchmark_log)
             # TODO move into log somehow
             pprint(results)
 
             self.results.report_benchmark_results(framework_test, test_type,
                                                   results['results'])
-            log("Complete", benchmark_log)
+            log("Complete", file=benchmark_log)
 
         for test_type in framework_test.runTests:
             benchmark_type(test_type)

+ 1 - 1
toolset/benchmark/fortune_html_parser.py

@@ -186,5 +186,5 @@ class FortuneHTMLParser(HTMLParser):
                 headers_left -= 1
                 if headers_left <= 0:
                     output += os.linesep
-            log(output, "%s: " % name)
+            log(output, prefix="%s: " % name)
         return (same, diff_lines)

+ 9 - 10
toolset/benchmark/framework_test.py

@@ -3,7 +3,7 @@ import subprocess
 import traceback
 from requests import ConnectionError
 
-from toolset.utils.output_helper import header, log, FNULL
+from toolset.utils.output_helper import log, FNULL
 from toolset.utils import docker_helper
 
 # Cross-platform colored text
@@ -129,9 +129,8 @@ class FrameworkTest:
             with open(os.path.join(verificationPath, 'verification.txt'),
                       'w') as verification:
                 test = self.runTests[test_type]
-                header(
-                    message="VERIFYING %s" % test_type.upper(),
-                    log_file=verification)
+                log("VERIFYING %s" % test_type.upper(),
+                    file=verification, border='-', color=Fore.WHITE + Style.BRIGHT)
 
                 base_url = "http://%s:%s" % (
                     self.benchmarker_config.server_host, self.port)
@@ -166,7 +165,7 @@ class FrameworkTest:
                     results = [('fail', "Server did not respond to request",
                                 base_url)]
                     log("Verifying test %s for %s caused an exception: %s" %
-                        (test_type, self.name, e))
+                        (test_type, self.name, e), color=Fore.RED)
                 except Exception as e:
                     results = [('fail', """Caused Exception in TFB
             This almost certainly means your return value is incorrect,
@@ -174,7 +173,7 @@ class FrameworkTest:
             including this message: %s\n%s""" % (e, traceback.format_exc()),
                                 base_url)]
                     log("Verifying test %s for %s caused an exception: %s" %
-                        (test_type, self.name, e))
+                        (test_type, self.name, e), color=Fore.RED)
                     traceback.format_exc()
 
                 test.failed = any(
@@ -193,14 +192,14 @@ class FrameworkTest:
                         color = Fore.RED
 
                     log("   {!s}{!s}{!s} for {!s}".format(
-                        color, result.upper(), Style.RESET_ALL, url), None,
-                        verification)
+                        color, result.upper(), Style.RESET_ALL, url),
+                        file=verification)
                     if reason is not None and len(reason) != 0:
                         for line in reason.splitlines():
-                            log("     " + line, None, verification)
+                            log("     " + line, file=verification)
                         if not test.passed:
                             log("     See {!s}".format(specific_rules_url),
-                                None, verification)
+                                file=verification)
 
                 [output_result(r1, r2, url) for (r1, r2, url) in results]
 

+ 9 - 12
toolset/benchmark/test_types/framework_test_type.py

@@ -7,7 +7,8 @@ import psycopg2
 import pymongo
 import traceback
 
-from toolset.utils.output_helper import log, log_error
+from colorama import Fore
+from toolset.utils.output_helper import log
 
 
 class FrameworkTestType:
@@ -85,7 +86,7 @@ class FrameworkTestType:
         Downloads a URL and returns the HTTP response headers
         and body content as a tuple
         '''
-        log("Accessing URL {!s}:".format(url))
+        log("Accessing URL {!s}: ".format(url), color=Fore.CYAN)
 
         headers = {'Accept': self.accept_header}
         r = requests.get(url, timeout=15, headers=headers)
@@ -94,10 +95,6 @@ class FrameworkTestType:
         body = r.content
         log(str(headers))
         log(body)
-        b = 40
-        log("  Response (trimmed to {:d} bytes): \"{!s}\"".format(
-            b,
-            body.strip()[:b]))
         return headers, body
 
     def verify(self, base_url):
@@ -167,8 +164,8 @@ class FrameworkTestType:
                 db.close()
             except Exception:
                 tb = traceback.format_exc()
-                log("ERROR: Unable to load current MySQL World table.")
-                log_error(tb)
+                log("ERROR: Unable to load current MySQL World table.", color=Fore.RED)
+                log(tb)
         elif database_name == "postgres":
             try:
                 db = psycopg2.connect(
@@ -188,8 +185,8 @@ class FrameworkTestType:
                 db.close()
             except Exception:
                 tb = traceback.format_exc()
-                log("ERROR: Unable to load current Postgres World table.")
-                log_error(tb)
+                log("ERROR: Unable to load current Postgres World table.", color=Fore.RED)
+                log(tb)
         elif database_name == "mongodb":
             try:
                 worlds_json = {}
@@ -208,8 +205,8 @@ class FrameworkTestType:
                 connection.close()
             except Exception:
                 tb = traceback.format_exc()
-                log("ERROR: Unable to load current MongoDB World table.")
-                log_error(tb)
+                log("ERROR: Unable to load current MongoDB World table.", color=Fore.RED)
+                log(tb)
         else:
             raise ValueError(
                 "Database: {!s} does not exist".format(database_name))

+ 2 - 2
toolset/benchmark/test_types/verifications.py

@@ -2,7 +2,7 @@ import json
 import re
 import traceback
 
-from toolset.utils.output_helper import log_error
+from toolset.utils.output_helper import log
 
 
 def basic_body_verification(body, url, is_json_check=True):
@@ -280,7 +280,7 @@ def verify_updates(old_worlds, new_worlds, updates_expected, url):
                         successful_updates += 1
             except Exception:
                 tb = traceback.format_exc()
-                log_error(tb)
+                log(tb)
         n += 1
 
     if successful_updates == 0:

+ 5 - 5
toolset/run-tests.py

@@ -41,8 +41,8 @@ class StoreSeqAction(argparse.Action):
             try:
                 (start, step, end) = sequence.split(':')
             except ValueError:
-                log("  Invalid: {!s}".format(sequence))
-                log("  Requires start:step:end, e.g. 1:2:10")
+                log("  Invalid: {!s}".format(sequence), color=Fore.RED)
+                log("  Requires start:step:end, e.g. 1:2:10", color=Fore.RED)
                 raise
             result.remove(sequence)
             result = result + range(int(start), int(end), int(step))
@@ -103,7 +103,7 @@ def main(argv=None):
                     os.environ['FWROOT'],
                     args.conf_file)) and not os.path.exists(
                         os.path.join(os.environ['FWROOT'] + 'benchmark.cfg')):
-            log("No config file found. Aborting!")
+            log("No config file found. Aborting!", Fore.RED)
             exit(1)
         with open(os.path.join(os.environ['FWROOT'], args.conf_file)):
             config = ConfigParser.SafeConfigParser()
@@ -116,7 +116,7 @@ def main(argv=None):
                 except Exception:
                     pass
     except IOError:
-        log("Configuration file not found!")
+        log("Configuration file not found!", Fore.RED)
         exit(1)
 
     ##########################################################
@@ -125,7 +125,7 @@ def main(argv=None):
 
     # Verify and massage options
     if defaults['client_user'] is None or defaults['client_host'] is None:
-        log("client_user and client_host are required!")
+        log("client_user and client_host are required!", color=Fore.RED)
         log("Please check your configuration file.")
         log("Aborting!")
         exit(1)

+ 24 - 14
toolset/utils/docker_helper.py

@@ -6,10 +6,12 @@ import multiprocessing
 import json
 import docker
 import time
+import re
 import traceback
 from threading import Thread
+from colorama import Fore, Style
 
-from toolset.utils.output_helper import log, log_error, FNULL
+from toolset.utils.output_helper import log, FNULL
 from toolset.utils.metadata_helper import gather_tests
 from toolset.utils.ordered_set import OrderedSet
 from toolset.utils.database_helper import test_database
@@ -94,8 +96,8 @@ def build(benchmarker_config, test_names, build_log_dir=os.devnull):
                                            dependency + ".dockerfile")
                     if not docker_file:
                         log("Docker build failed; %s could not be found; terminating"
-                            % (dependency + ".dockerfile"), log_prefix,
-                            build_log)
+                            % (dependency + ".dockerfile"),
+                            prefix=log_prefix, file=build_log, color=Fore.RED)
                         return 1
 
                     # Build the dependency image
@@ -110,12 +112,16 @@ def build(benchmarker_config, test_names, build_log_dir=os.devnull):
                             if line.startswith('{"stream":'):
                                 line = json.loads(line)
                                 line = line[line.keys()[0]].encode('utf-8')
-                                log(line, log_prefix, build_log)
+                                log(line,
+                                    prefix=log_prefix,
+                                    file=build_log,
+                                    color=Fore.WHITE + Style.BRIGHT \
+                                        if re.match(r'^Step \d+\/\d+', line) else '')
                     except Exception:
                         tb = traceback.format_exc()
                         log("Docker dependency build failed; terminating",
-                            log_prefix, build_log)
-                        log_error(tb, log_prefix, build_log)
+                            prefix=log_prefix, file=build_log, color=Fore.RED)
+                        log(tb, prefix=log_prefix, file=build_log)
                         return 1
 
         # Build the test images
@@ -138,12 +144,16 @@ def build(benchmarker_config, test_names, build_log_dir=os.devnull):
                         if line.startswith('{"stream":'):
                             line = json.loads(line)
                             line = line[line.keys()[0]].encode('utf-8')
-                            log(line, log_prefix, build_log)
+                            log(line,
+                                prefix=log_prefix,
+                                file=build_log,
+                                color=Fore.WHITE + Style.BRIGHT \
+                                    if re.match(r'^Step \d+\/\d+', line) else '')
                 except Exception:
                     tb = traceback.format_exc()
-                    log("Docker build failed; terminating", log_prefix,
-                        build_log)
-                    log_error(tb, log_prefix, build_log)
+                    log("Docker build failed; terminating",
+                        prefix=log_prefix, file=build_log, color=Fore.RED)
+                    log(tb, prefix=log_prefix, file=build_log)
                     return 1
 
     return 0
@@ -165,7 +175,7 @@ def run(benchmarker_config, docker_files, run_log_dir):
                             run_log_dir, "%s.log" % docker_file.replace(
                                 ".dockerfile", "").lower()), 'w') as run_log:
                     for line in container.logs(stream=True):
-                        log(line, log_prefix, run_log)
+                        log(line, prefix=log_prefix, file=run_log)
 
             extra_hosts = {
                 socket.gethostname(): str(benchmarker_config.server_host),
@@ -197,8 +207,8 @@ def run(benchmarker_config, docker_files, run_log_dir):
                         ".dockerfile", "").lower()), 'w') as run_log:
                 tb = traceback.format_exc()
                 log("Running docker cointainer: %s failed" % docker_file,
-                    log_prefix, run_log)
-                log_error(tb, log_prefix, run_log)
+                    prefix=log_prefix, file=run_log)
+                log(tb, prefix=log_prefix, file=run_log)
                 return 1
 
     return 0
@@ -225,7 +235,7 @@ def successfully_running_containers(docker_files, out):
         if image_name not in running_container_images:
             log_prefix = "%s: " % image_name
             log("ERROR: Expected tfb/test/%s to be running container" %
-                image_name, log_prefix, out)
+                image_name, prefix=log_prefix, file=out)
             return False
     return True
 

+ 30 - 58
toolset/utils/output_helper.py

@@ -1,5 +1,7 @@
 # -*- coding: utf-8 -*-
 import os, sys, re
+
+from colorama import Fore, Style
 from contextlib import contextmanager
 
 # RegExp for stripping color codes
@@ -8,80 +10,50 @@ seq = re.compile(r'\x1B\[\d+m')
 FNULL = open(os.devnull, 'w')
 
 
-def header(message,
-           top='-',
-           bottom='-',
-           log_file=None,
-           quiet=False,
-           color=None):
-    '''
-    Generates a clean header with the given message and top/bottom text breaks.
-    Optionally, a log file can be provided to log this header to, as well.
-    '''
-    topheader = (top * 80)[:80]
-    bottomheader = (bottom * 80)[:80]
-    result = ""
-    if topheader != "":
-        if color is not None:
-            result += color
-        result += "%s" % topheader
-    if message != "":
-        if color is not None:
-            result += color
-
-        if result == "":
-            result = "  %s" % message
-        else:
-            result += "%s  %s" % (os.linesep, message)
-    if bottomheader != "":
-        if color is not None:
-            result += color
-        result += "%s%s" % (os.linesep, bottomheader)
-    log(result + os.linesep, None, log_file, True, quiet)
-
-
-def log(log_text=None,
-        prefix=None,
-        log_file=None,
-        allow_newlines=False,
-        quiet=False):
+def log(log_text=None, **kwargs):
     '''
     Logs the given text and optional prefix to stdout (if quiet is False) and
     to an optional log file. By default, we strip out newlines in order to 
     print our lines correctly, but you can override this functionality if you
     want to print multi-line output.
     '''
-    if not log_text:
-        return
-
-    if not allow_newlines:
-        log_text = log_text.splitlines()[0] + os.linesep
 
-        if log_text.splitlines()[0].strip() is '':
-            return
+    # set up some defaults
+    color = kwargs.get('color', '')
+    color_reset = Style.RESET_ALL if color else ''
+    prefix = kwargs.get('prefix', '')
+    border = kwargs.get('border')
+    border_bottom = kwargs.get('border_bottom')
+    file = kwargs.get('file')
+    quiet = kwargs.get('quiet')
+
+    if border is not None:
+        border = color + (border * 80) + os.linesep + color_reset
+        border_bottom = border if border_bottom is None else \
+            color + (border_bottom * 80) + os.linesep + color_reset
+    elif not log_text:
+        return
 
     try:
         if not quiet:
-            if prefix is not None:
-                sys.stdout.write(prefix + log_text)
-            else:
-                sys.stdout.write(log_text)
+            new_log_text = border or ''
+            for line in log_text.splitlines():
+                if line.strip() is not '':
+                    if prefix:
+                        new_log_text += Style.DIM + prefix + Style.RESET_ALL
+                    new_log_text += color + line + color_reset + os.linesep
+            new_log_text += border_bottom or ''
+
+            sys.stdout.write(new_log_text)
             sys.stdout.flush()
 
-        if log_file is not None:
-            log_file.write(seq.sub('', log_text))
-            log_file.flush()
+        if file is not None:
+            file.write(seq.sub('', log_text))
+            file.flush()
     except:
         pass
 
 
-def log_error(exception=None, prefix=None, log_file=None, quiet=False):
-    '''
-    Logs the given exception
-    '''
-    log(exception + os.linesep, prefix, log_file, True, quiet)
-
-
 class QuietOutputStream:
     '''
     Provides an output stream which either writes to stdout or nothing 

+ 4 - 5
toolset/utils/results_helper.py

@@ -1,8 +1,7 @@
 from toolset.utils.metadata_helper import gather_remaining_tests, gather_frameworks
-from toolset.utils.output_helper import header, log, FNULL
+from toolset.utils.output_helper import log, FNULL
 
 import os
-import logging
 import subprocess
 import uuid
 import time
@@ -294,8 +293,8 @@ class Results:
             # Normally you don't have to use Fore.BLUE before each line, but
             # Travis-CI seems to reset color codes on newline (see travis-ci/travis-ci#2692)
             # or stream flush, so we have to ensure that the color code is printed repeatedly
-            header(
-                "Verification Summary", top='=', bottom='-', color=Fore.CYAN)
+            log(
+                "Verification Summary", border='=', border_bottom='-', color=Fore.CYAN)
             for test in tests:
                 log(Fore.CYAN + "| {!s}".format(test.name))
                 if test.name in self.verify.keys():
@@ -312,7 +311,7 @@ class Results:
                 else:
                     log(Fore.CYAN + "|      " + Fore.RED +
                         "NO RESULTS (Did framework launch?)")
-            header('', top='=', bottom='', color=Fore.CYAN)
+            log('', border='=', border_bottom='', color=Fore.CYAN)
 
         log("%sTime to complete: %s seconds" %
             (Style.RESET_ALL, str(int(time.time() - self.config.start_time))))