Browse Source

More time logging (#3651)

Mike Smith 7 năm trước cách đây
mục cha
commit
2526bce0c9

+ 29 - 20
toolset/benchmark/benchmarker.py

@@ -22,7 +22,7 @@ class Benchmarker:
         Initialize the benchmarker.
         '''
         self.config = config
-        self.timeLogger = TimeLogger()
+        self.time_logger = TimeLogger()
         self.metadata = Metadata(self)
         self.audit = Audit(self)
 
@@ -32,8 +32,6 @@ class Benchmarker:
         self.results = Results(self)
         self.docker_helper = DockerHelper(self)
 
-
-
     ##########################################################################################
     # Public methods
     ##########################################################################################
@@ -92,7 +90,7 @@ class Benchmarker:
                 prefix=prefix,
                 file=file,
                 color=Fore.RED if success else '')
-        self.timeLogger.log_test_end(log_prefix=prefix, file=file)
+        self.time_logger.log_test_end(log_prefix=prefix, file=file)
         return success
 
     def __run_test(self, test, benchmark_log):
@@ -103,11 +101,13 @@ class Benchmarker:
         '''
 
         log_prefix = "%s: " % test.name
-        self.timeLogger.log_test_start()
+        # Start timing the total test duration
+        self.time_logger.mark_test_start()
 
         # If the test is in the excludes list, we skip it
         if self.config.exclude and test.name in self.config.exclude:
-            message = "Test {name} has been added to the excludes list. Skipping.".format(name=test.name)
+            message = "Test {name} has been added to the excludes list. Skipping.".format(
+                name=test.name)
             self.results.write_intermediate(test.name, message)
             return self.__exit_test(
                 success=False,
@@ -119,6 +119,7 @@ class Benchmarker:
         try:
             # Start database container
             if test.database.lower() != "none":
+                self.time_logger.mark_starting_database()
                 database_container = self.docker_helper.start_database(
                     test.database.lower())
                 if database_container is None:
@@ -128,12 +129,15 @@ class Benchmarker:
                         message=message,
                         prefix=log_prefix,
                         file=benchmark_log)
+                self.time_logger.mark_started_database()
 
             # Start webapp
             container = test.start()
+            self.time_logger.mark_test_starting()
             if container is None:
                 self.docker_helper.stop([container, database_container])
-                message = "ERROR: Problem starting {name}".format(name=test.name)
+                message = "ERROR: Problem starting {name}".format(
+                    name=test.name)
                 self.results.write_intermediate(test.name, message)
                 return self.__exit_test(
                     success=False,
@@ -145,7 +149,8 @@ class Benchmarker:
             max_sleep = 60
             accepting_requests = False
             while not accepting_requests and slept < max_sleep:
-                if not self.docker_helper.server_container_exists(container.id):
+                if not self.docker_helper.server_container_exists(
+                        container.id):
                     break
                 accepting_requests = test.is_accepting_requests()
                 time.sleep(1)
@@ -161,6 +166,8 @@ class Benchmarker:
                     prefix=log_prefix,
                     file=benchmark_log)
 
+            self.time_logger.mark_test_accepting_requests()
+
             # Debug mode blocks execution here until ctrl+c
             if self.config.mode == "debug":
                 log("Entering debug mode. Server has started. CTRL-c to stop.",
@@ -172,23 +179,26 @@ class Benchmarker:
 
             # Verify URLs and audit
             log("Verifying framework URLs", prefix=log_prefix)
-            self.timeLogger.log_verify_start()
+            self.time_logger.mark_verify_start()
             passed_verify = test.verify_urls()
             self.audit.audit_test_dir(test.directory)
-            self.timeLogger.log_verify_end(
-                log_prefix=log_prefix,
-                file=benchmark_log)
 
             # Benchmark this test
             if self.config.mode == "benchmark":
                 log("Benchmarking %s" % test.name,
                     file=benchmark_log,
                     border='-')
-                self.timeLogger.log_benchmarking_start()
+                self.time_logger.mark_benchmarking_start()
                 self.__benchmark(test, benchmark_log)
-                self.timeLogger.log_benchmarking_end(
-                    log_prefix=log_prefix,
-                    file=benchmark_log)
+                self.time_logger.log_benchmarking_end(
+                    log_prefix=log_prefix, file=benchmark_log)
+
+            # Log test timing stats
+            self.time_logger.log_build_flush(benchmark_log)
+            self.time_logger.log_database_start_time(log_prefix, benchmark_log)
+            self.time_logger.log_test_accepting_requests(
+                log_prefix, benchmark_log)
+            self.time_logger.log_verify_end(log_prefix, benchmark_log)
 
             # Stop this test
             self.docker_helper.stop([container, database_container])
@@ -220,9 +230,7 @@ class Benchmarker:
                 file=benchmark_log)
 
         return self.__exit_test(
-            success=True,
-            prefix=log_prefix,
-            file=benchmark_log)
+            success=True, prefix=log_prefix, file=benchmark_log)
 
     def __benchmark(self, framework_test, benchmark_log):
         '''
@@ -251,7 +259,8 @@ class Benchmarker:
                                                        framework_test.port,
                                                        test.get_url()))
 
-                self.docker_helper.benchmark(script, script_variables, raw_file)
+                self.docker_helper.benchmark(script, script_variables,
+                                             raw_file)
 
                 # End resource usage metrics collection
                 self.__end_logging()

+ 1 - 2
toolset/run-tests.py

@@ -228,8 +228,7 @@ def main(argv=None):
                 return any_failed
     except Exception:
         tb = traceback.format_exc()
-        log("A fatal error has occurred",
-            color=Fore.RED)
+        log("A fatal error has occurred", color=Fore.RED)
         log(tb)
         # try one last time to stop docker containers on fatal error
         try:

+ 2 - 2
toolset/utils/audit.py

@@ -1,4 +1,3 @@
-import os
 from toolset.utils.output_helper import log
 from colorama import Fore
 
@@ -13,7 +12,8 @@ class Audit:
 
     def start_audit(self):
         for lang in self.benchmarker.metadata.gather_languages():
-            for test_dir in self.benchmarker.metadata.gather_language_tests(lang):
+            for test_dir in self.benchmarker.metadata.gather_language_tests(
+                    lang):
                 self.audit_test_dir(test_dir)
 
     def audit_test_dir(self, test_dir):

+ 27 - 20
toolset/utils/docker_helper.py

@@ -13,8 +13,7 @@ from toolset.utils.database_helper import test_database
 
 
 class DockerHelper:
-
-    def __init__(self, benchmarker = None):
+    def __init__(self, benchmarker=None):
         self.benchmarker = benchmarker
 
         self.client = docker.DockerClient(
@@ -24,12 +23,13 @@ class DockerHelper:
         self.database = docker.DockerClient(
             base_url=self.benchmarker.config.database_docker_host)
 
-    def __build(self, base_url, path, build_log_file, log_prefix, dockerfile, tag):
+    def __build(self, base_url, path, build_log_file, log_prefix, dockerfile,
+                tag):
         '''
         Builds docker containers using docker-py low-level api
         '''
 
-        self.benchmarker.timeLogger.log_build_start()
+        self.benchmarker.time_logger.mark_build_start()
         with open(build_log_file, 'w') as build_log:
             try:
                 client = docker.APIClient(base_url=base_url)
@@ -71,14 +71,12 @@ class DockerHelper:
                     file=build_log,
                     color=Fore.RED)
                 log(tb, prefix=log_prefix, file=build_log)
-                self.benchmarker.timeLogger.log_build_end(
-                    log_prefix=log_prefix,
-                    file=build_log)
+                self.benchmarker.time_logger.log_build_end(
+                    log_prefix=log_prefix, file=build_log)
                 raise
 
-            self.benchmarker.timeLogger.log_build_end(
-                log_prefix=log_prefix,
-                file=build_log)
+            self.benchmarker.time_logger.log_build_end(
+                log_prefix=log_prefix, file=build_log)
 
     def clean(self):
         '''
@@ -143,8 +141,9 @@ class DockerHelper:
 
             def watch_container(docker_container, docker_file):
                 with open(
-                        os.path.join(run_log_dir, "%s.log" % docker_file.replace(
-                            ".dockerfile", "").lower()), 'w') as run_log:
+                        os.path.join(
+                            run_log_dir, "%s.log" % docker_file.replace(
+                                ".dockerfile", "").lower()), 'w') as run_log:
                     for line in docker_container.logs(stream=True):
                         log(line, prefix=log_prefix, file=run_log)
 
@@ -153,9 +152,12 @@ class DockerHelper:
 
             if self.benchmarker.config.network is None:
                 extra_hosts = {
-                    socket.gethostname(): str(self.benchmarker.config.server_host),
-                    'tfb-server': str(self.benchmarker.config.server_host),
-                    'tfb-database': str(self.benchmarker.config.database_host)
+                    socket.gethostname():
+                    str(self.benchmarker.config.server_host),
+                    'tfb-server':
+                    str(self.benchmarker.config.server_host),
+                    'tfb-database':
+                    str(self.benchmarker.config.database_host)
                 }
                 name = None
 
@@ -200,7 +202,8 @@ class DockerHelper:
                     os.path.join(run_log_dir, "%s.log" % test.name.lower()),
                     'w') as run_log:
                 tb = traceback.format_exc()
-                log("Running docker cointainer: %s.dockerfile failed" % test.name,
+                log("Running docker cointainer: %s.dockerfile failed" %
+                    test.name,
                     prefix=log_prefix,
                     file=run_log)
                 log(tb, prefix=log_prefix, file=run_log)
@@ -212,8 +215,8 @@ class DockerHelper:
         try:
             client = container.client
             container.kill()
-            while container.id in map(
-                    lambda x: x.id, client.containers.list()):
+            while container.id in map(lambda x: x.id,
+                                      client.containers.list()):
                 pass
         except:
             # container has already been killed
@@ -262,7 +265,8 @@ class DockerHelper:
                 image_name = "techempower/%s:latest" % db
                 log_prefix = image_name + ": "
 
-                database_dir = os.path.join(self.benchmarker.config.db_root, db)
+                database_dir = os.path.join(self.benchmarker.config.db_root,
+                                            db)
                 docker_file = "%s.dockerfile" % db
 
                 self.__build(
@@ -282,7 +286,10 @@ class DockerHelper:
         image_name = "techempower/%s:latest" % database
         log_prefix = image_name + ": "
 
-        sysctl = {'net.core.somaxconn': 65535, 'kernel.sem': "250 32000 256 512"}
+        sysctl = {
+            'net.core.somaxconn': 65535,
+            'kernel.sem': "250 32000 256 512"
+        }
 
         ulimit = [{'name': 'nofile', 'hard': 65535, 'soft': 65535}]
 

+ 12 - 15
toolset/utils/results.py

@@ -49,9 +49,7 @@ class Results:
         self.pipelineConcurrencyLevels = self.config.pipeline_concurrency_levels
         self.queryIntervals = self.config.query_levels
         self.cachedQueryIntervals = self.config.cached_query_levels
-        self.frameworks = [
-            t.name for t in benchmarker.tests
-        ]
+        self.frameworks = [t.name for t in benchmarker.tests]
         self.duration = self.config.duration
         self.rawData = dict()
         self.rawData['json'] = dict()
@@ -109,8 +107,7 @@ class Results:
         results['results'] = []
         stats = []
 
-        if os.path.exists(
-                self.get_raw_file(framework_test.name, test_type)):
+        if os.path.exists(self.get_raw_file(framework_test.name, test_type)):
             with open(self.get_raw_file(framework_test.name,
                                         test_type)) as raw_data:
 
@@ -129,8 +126,8 @@ class Results:
                             rawData = dict()
                             results['results'].append(rawData)
                         if "Latency" in line:
-                            m = re.findall(
-                                r"([0-9]+\.*[0-9]*[us|ms|s|m|%]+)", line)
+                            m = re.findall(r"([0-9]+\.*[0-9]*[us|ms|s|m|%]+)",
+                                           line)
                             if len(m) == 4:
                                 rawData['latencyAvg'] = m[0]
                                 rawData['latencyStdev'] = m[1]
@@ -153,8 +150,8 @@ class Results:
                                 m = re.search("timeout ([0-9]+)", line)
                                 rawData['timeout'] = int(m.group(1))
                         if "Non-2xx" in line:
-                            m = re.search(
-                                "Non-2xx or 3xx responses: ([0-9]+)", line)
+                            m = re.search("Non-2xx or 3xx responses: ([0-9]+)",
+                                          line)
                             if m != None:
                                 rawData['5xx'] = int(m.group(1))
                         if "STARTTIME" in line:
@@ -165,12 +162,11 @@ class Results:
                             rawData["endTime"] = int(m.group(0))
                             test_stats = self.__parse_stats(
                                 framework_test, test_type,
-                                rawData["startTime"], rawData["endTime"],
-                                1)
+                                rawData["startTime"], rawData["endTime"], 1)
                             stats.append(test_stats)
         with open(
-                self.get_stats_file(framework_test.name, test_type) +
-                ".json", "w") as stats_file:
+                self.get_stats_file(framework_test.name, test_type) + ".json",
+                "w") as stats_file:
             json.dump(stats, stats_file, indent=2)
 
         return results
@@ -296,7 +292,7 @@ class Results:
                 log(Fore.CYAN + "| {!s}".format(test.name))
                 if test.name in self.verify.keys():
                     for test_type, result in self.verify[
-                        test.name].iteritems():
+                            test.name].iteritems():
                         if result.upper() == "PASS":
                             color = Fore.GREEN
                         elif result.upper() == "WARN":
@@ -353,7 +349,8 @@ class Results:
         '''
         Counts the significant lines of code for all tests and stores in results.
         '''
-        frameworks = self.benchmarker.metadata.gather_frameworks(self.config.test, self.config.exclude)
+        frameworks = self.benchmarker.metadata.gather_frameworks(
+            self.config.test, self.config.exclude)
 
         jsonResult = {}
         for framework, testlist in frameworks.items():

+ 3 - 1
toolset/utils/scaffolding.py

@@ -3,6 +3,7 @@ import os, re
 from shutil import copytree
 from toolset.utils.metadata import Metadata
 
+
 class Scaffolding:
     def __init__(self, benchmarker):
         print("""
@@ -208,7 +209,8 @@ class Scaffolding:
             prompt += "{!s}/".format(i)
             options.append(db[0])
             i += 1
-        print("  {!s}) None: No database at this time{!s}".format(i, os.linesep))
+        print("  {!s}) None: No database at this time{!s}".format(
+            i, os.linesep))
         prompt += "{!s}]: ".format(i)
         options.append("None")
         valid = self.__prompt_database(prompt, options)

+ 48 - 21
toolset/utils/time_logger.py

@@ -1,8 +1,9 @@
 import time
-from colorama import Fore, Style
+from colorama import Fore
 
 from toolset.utils.output_helper import log
 
+
 class TimeLogger:
     '''
     Class for keeping track of and logging execution times
@@ -14,8 +15,12 @@ class TimeLogger:
 
         self.benchmarking_start = 0
         self.benchmarking_total = 0
+        self.database_starting = 0
+        self.database_started = 0
         self.build_start = 0
         self.build_total = 0
+        self.test_started = 0
+        self.accepting_requests = 0
         self.test_start = 0
         self.test_total = 0
         self.verify_start = 0
@@ -35,29 +40,39 @@ class TimeLogger:
         output = output + "%ss" % s
         return output
 
-    def log_benchmarking_start(self):
+    def mark_starting_database(self):
+        self.database_starting = time.time()
+
+    def mark_started_database(self):
+        self.database_started = int(time.time() - self.database_starting)
+
+    def log_database_start_time(self, log_prefix, file):
+        log("Time starting database: %s" % TimeLogger.output(
+            self.database_started),
+            prefix=log_prefix,
+            file=file,
+            color=Fore.YELLOW)
+
+    def mark_benchmarking_start(self):
         self.benchmarking_start = time.time()
 
     def log_benchmarking_end(self, log_prefix, file):
         total = int(time.time() - self.benchmarking_start)
         self.benchmarking_total = self.benchmarking_total + total
-        log("Total benchmarking time: %s" % TimeLogger.output(total),
+        log("Benchmarking time: %s" % TimeLogger.output(total),
             prefix=log_prefix,
             file=file,
             color=Fore.YELLOW)
 
-    def log_build_start(self):
+    def mark_build_start(self):
         self.build_start = time.time()
 
     def log_build_end(self, log_prefix, file):
         total = int(time.time() - self.build_start)
         self.build_total = self.build_total + total
-        log_str = "Total build time: %s" % TimeLogger.output(total)
+        log_str = "Build time: %s" % TimeLogger.output(total)
         self.build_logs.append({'log_prefix': log_prefix, 'str': log_str})
-        log(log_str,
-            prefix=log_prefix,
-            file=file,
-            color=Fore.YELLOW)
+        log(log_str, prefix=log_prefix, file=file, color=Fore.YELLOW)
 
     def log_build_flush(self, file):
         for b_log in self.build_logs:
@@ -67,7 +82,20 @@ class TimeLogger:
                 color=Fore.YELLOW)
         self.build_logs = []
 
-    def log_test_start(self):
+    def mark_test_starting(self):
+        self.test_started = time.time()
+
+    def mark_test_accepting_requests(self):
+        self.accepting_requests = int(time.time() - self.test_started)
+
+    def log_test_accepting_requests(self, log_prefix, file):
+        log("Time until accepting requests: %s" % TimeLogger.output(
+            self.accepting_requests),
+            prefix=log_prefix,
+            file=file,
+            color=Fore.YELLOW)
+
+    def mark_test_start(self):
         self.test_start = time.time()
 
     def log_test_end(self, log_prefix, file):
@@ -76,37 +104,36 @@ class TimeLogger:
             prefix=log_prefix,
             file=file,
             color=Fore.YELLOW)
-        log("Total time building so far: %s"
-            % TimeLogger.output(self.build_total),
+        log("Total time building so far: %s" % TimeLogger.output(
+            self.build_total),
             prefix="tfb: ",
             file=file,
             color=Fore.YELLOW)
-        log("Total time verifying so far: %s"
-            % TimeLogger.output(self.verify_total),
+        log("Total time verifying so far: %s" % TimeLogger.output(
+            self.verify_total),
             prefix="tfb: ",
             file=file,
             color=Fore.YELLOW)
         if self.benchmarking_total > 0:
-            log("Total time benchmarking so far: %s"
-                % TimeLogger.output(self.benchmarking_total),
+            log("Total time benchmarking so far: %s" % TimeLogger.output(
+                self.benchmarking_total),
                 prefix="tfb: ",
                 file=file,
                 color=Fore.YELLOW)
         running_time = int(time.time() - self.start)
-        log("Total execution time so far: %s"
-            % TimeLogger.output(running_time),
+        log("Total execution time so far: %s" %
+            TimeLogger.output(running_time),
             prefix="tfb: ",
             file=file,
             color=Fore.YELLOW)
 
-    def log_verify_start(self):
+    def mark_verify_start(self):
         self.verify_start = time.time()
 
     def log_verify_end(self, log_prefix, file):
-        self.log_build_flush(file)
         total = int(time.time() - self.verify_start)
         self.verify_total = self.verify_total + total
-        log("Total verify time: %s" % TimeLogger.output(total),
+        log("Verify time: %s" % TimeLogger.output(total),
             prefix=log_prefix,
             file=file,
             color=Fore.YELLOW)

+ 1 - 0
toolset/wrk/wrk.dockerfile

@@ -5,6 +5,7 @@ RUN curl -sL https://github.com/wg/wrk/archive/4.1.0.tar.gz | tar xz --strip-com
 RUN make > /dev/null
 RUN cp wrk /usr/local/bin
 
+WORKDIR /
 # Required scripts for benchmarking
 COPY pipeline.lua pipeline.lua
 COPY concurrency.sh concurrency.sh