|
@@ -1,4 +1,4 @@
|
|
-from toolset.utils.output_helper import FNULL
|
|
|
|
|
|
+from toolset.utils.output_helper import log, FNULL
|
|
from toolset.utils.docker_helper import DockerHelper
|
|
from toolset.utils.docker_helper import DockerHelper
|
|
from toolset.utils.time_logger import TimeLogger
|
|
from toolset.utils.time_logger import TimeLogger
|
|
from toolset.utils.metadata import Metadata
|
|
from toolset.utils.metadata import Metadata
|
|
@@ -23,8 +23,7 @@ class Benchmarker:
|
|
Initialize the benchmarker.
|
|
Initialize the benchmarker.
|
|
'''
|
|
'''
|
|
self.config = config
|
|
self.config = config
|
|
- self.log = config.log
|
|
|
|
- self.time_logger = TimeLogger(config)
|
|
|
|
|
|
+ self.time_logger = TimeLogger()
|
|
self.metadata = Metadata(self)
|
|
self.metadata = Metadata(self)
|
|
self.audit = Audit(self)
|
|
self.audit = Audit(self)
|
|
|
|
|
|
@@ -50,7 +49,7 @@ class Benchmarker:
|
|
|
|
|
|
any_failed = False
|
|
any_failed = False
|
|
# Run tests
|
|
# Run tests
|
|
- self.log("Running Tests...", border='=')
|
|
|
|
|
|
+ log("Running Tests...", border='=')
|
|
|
|
|
|
# build wrk and all databases needed for current run
|
|
# build wrk and all databases needed for current run
|
|
self.docker_helper.build_wrk()
|
|
self.docker_helper.build_wrk()
|
|
@@ -59,7 +58,7 @@ class Benchmarker:
|
|
with open(os.path.join(self.results.directory, 'benchmark.log'),
|
|
with open(os.path.join(self.results.directory, 'benchmark.log'),
|
|
'w') as benchmark_log:
|
|
'w') as benchmark_log:
|
|
for test in self.tests:
|
|
for test in self.tests:
|
|
- self.log("Running Test: %s" % test.name, border='-')
|
|
|
|
|
|
+ log("Running Test: %s" % test.name, border='-')
|
|
with self.config.quiet_out.enable():
|
|
with self.config.quiet_out.enable():
|
|
if not self.__run_test(test, benchmark_log):
|
|
if not self.__run_test(test, benchmark_log):
|
|
any_failed = True
|
|
any_failed = True
|
|
@@ -68,7 +67,7 @@ class Benchmarker:
|
|
|
|
|
|
# Parse results
|
|
# Parse results
|
|
if self.config.mode == "benchmark":
|
|
if self.config.mode == "benchmark":
|
|
- self.log("Parsing Results ...", border='=')
|
|
|
|
|
|
+ log("Parsing Results ...", border='=')
|
|
self.results.parse(self.tests)
|
|
self.results.parse(self.tests)
|
|
|
|
|
|
self.results.set_completion_time()
|
|
self.results.set_completion_time()
|
|
@@ -78,7 +77,7 @@ class Benchmarker:
|
|
return any_failed
|
|
return any_failed
|
|
|
|
|
|
def stop(self, signal=None, frame=None):
|
|
def stop(self, signal=None, frame=None):
|
|
- self.log("Shutting down (may take a moment)")
|
|
|
|
|
|
+ log("Shutting down (may take a moment)")
|
|
self.docker_helper.stop()
|
|
self.docker_helper.stop()
|
|
sys.exit(0)
|
|
sys.exit(0)
|
|
|
|
|
|
@@ -88,7 +87,7 @@ class Benchmarker:
|
|
|
|
|
|
def __exit_test(self, success, prefix, file, message=None):
|
|
def __exit_test(self, success, prefix, file, message=None):
|
|
if message:
|
|
if message:
|
|
- self.log(message,
|
|
|
|
|
|
+ log(message,
|
|
prefix=prefix,
|
|
prefix=prefix,
|
|
file=file,
|
|
file=file,
|
|
color=Fore.RED if success else '')
|
|
color=Fore.RED if success else '')
|
|
@@ -173,7 +172,7 @@ class Benchmarker:
|
|
|
|
|
|
# Debug mode blocks execution here until ctrl+c
|
|
# Debug mode blocks execution here until ctrl+c
|
|
if self.config.mode == "debug":
|
|
if self.config.mode == "debug":
|
|
- self.log("Entering debug mode. Server has started. CTRL-c to stop.",
|
|
|
|
|
|
+ log("Entering debug mode. Server has started. CTRL-c to stop.",
|
|
prefix=log_prefix,
|
|
prefix=log_prefix,
|
|
file=benchmark_log,
|
|
file=benchmark_log,
|
|
color=Fore.YELLOW)
|
|
color=Fore.YELLOW)
|
|
@@ -181,14 +180,14 @@ class Benchmarker:
|
|
time.sleep(1)
|
|
time.sleep(1)
|
|
|
|
|
|
# Verify URLs and audit
|
|
# Verify URLs and audit
|
|
- self.log("Verifying framework URLs", prefix=log_prefix)
|
|
|
|
|
|
+ log("Verifying framework URLs", prefix=log_prefix)
|
|
self.time_logger.mark_verify_start()
|
|
self.time_logger.mark_verify_start()
|
|
passed_verify = test.verify_urls()
|
|
passed_verify = test.verify_urls()
|
|
self.audit.audit_test_dir(test.directory)
|
|
self.audit.audit_test_dir(test.directory)
|
|
|
|
|
|
# Benchmark this test
|
|
# Benchmark this test
|
|
if self.config.mode == "benchmark":
|
|
if self.config.mode == "benchmark":
|
|
- self.log("Benchmarking %s" % test.name,
|
|
|
|
|
|
+ log("Benchmarking %s" % test.name,
|
|
file=benchmark_log,
|
|
file=benchmark_log,
|
|
border='-')
|
|
border='-')
|
|
self.time_logger.mark_benchmarking_start()
|
|
self.time_logger.mark_benchmarking_start()
|
|
@@ -225,7 +224,7 @@ class Benchmarker:
|
|
tb = traceback.format_exc()
|
|
tb = traceback.format_exc()
|
|
self.results.write_intermediate(test.name,
|
|
self.results.write_intermediate(test.name,
|
|
"error during test: " + str(e))
|
|
"error during test: " + str(e))
|
|
- self.log(tb, prefix=log_prefix, file=benchmark_log)
|
|
|
|
|
|
+ log(tb, prefix=log_prefix, file=benchmark_log)
|
|
return self.__exit_test(
|
|
return self.__exit_test(
|
|
success=False,
|
|
success=False,
|
|
message="Error during test: %s" % test.name,
|
|
message="Error during test: %s" % test.name,
|
|
@@ -241,7 +240,7 @@ class Benchmarker:
|
|
'''
|
|
'''
|
|
|
|
|
|
def benchmark_type(test_type):
|
|
def benchmark_type(test_type):
|
|
- self.log("BENCHMARKING %s ... " % test_type.upper(), file=benchmark_log)
|
|
|
|
|
|
+ log("BENCHMARKING %s ... " % test_type.upper(), file=benchmark_log)
|
|
|
|
|
|
test = framework_test.runTests[test_type]
|
|
test = framework_test.runTests[test_type]
|
|
raw_file = self.results.get_raw_file(framework_test.name,
|
|
raw_file = self.results.get_raw_file(framework_test.name,
|
|
@@ -269,13 +268,13 @@ class Benchmarker:
|
|
self.__end_logging()
|
|
self.__end_logging()
|
|
|
|
|
|
results = self.results.parse_test(framework_test, test_type)
|
|
results = self.results.parse_test(framework_test, test_type)
|
|
- self.log("Benchmark results:", file=benchmark_log)
|
|
|
|
|
|
+ log("Benchmark results:", file=benchmark_log)
|
|
# TODO move into log somehow
|
|
# TODO move into log somehow
|
|
pprint(results)
|
|
pprint(results)
|
|
|
|
|
|
self.results.report_benchmark_results(framework_test, test_type,
|
|
self.results.report_benchmark_results(framework_test, test_type,
|
|
results['results'])
|
|
results['results'])
|
|
- self.log("Complete", file=benchmark_log)
|
|
|
|
|
|
+ log("Complete", file=benchmark_log)
|
|
|
|
|
|
for test_type in framework_test.runTests:
|
|
for test_type in framework_test.runTests:
|
|
benchmark_type(test_type)
|
|
benchmark_type(test_type)
|