Переглянути джерело

Misc toolset improvements (#3380)

* Attempt to fix continuous results uploads

* Remove "current_benchmark" toolset feature

It's been a source of frustration for me in the continuous benchmarking
environments.

* Propagate error codes from docker in framework_test.start

If a framework doesn't have a dockerfile for example, now the toolset
will recognize that framework as a failure immediately rather than
wasting time trying to run tests against it (wait 60 seconds, try to
access urls).
Michael Hixson 7 роки тому
батько
коміт
de865023ba

+ 0 - 8
toolset/benchmark/benchmarker.py

@@ -294,9 +294,6 @@ class Benchmarker:
         if self.config.os.lower() == 'windows':
             logging.debug("Executing __run_tests on Windows")
             for test in tests:
-                with open(self.config.current_benchmark,
-                          'w') as benchmark_resume_file:
-                    benchmark_resume_file.write(test.name)
                 with self.config.quiet_out.enable():
                     if self.__run_test(test) != 0:
                         error_happened = True
@@ -306,9 +303,6 @@ class Benchmarker:
             # These features do not work on Windows
             for test in tests:
                 print(header("Running Test: %s" % test.name))
-                with open(self.config.current_benchmark,
-                          'w') as benchmark_resume_file:
-                    benchmark_resume_file.write(test.name)
                 with self.config.quiet_out.enable():
                     test_process = Process(
                         target=self.__run_test,
@@ -331,8 +325,6 @@ class Benchmarker:
                 if test_process.exitcode != 0:
                     error_happened = True
 
-        if os.path.isfile(self.config.current_benchmark):
-            os.remove(self.config.current_benchmark)
         logging.debug("End __run_tests.")
 
         if error_happened:

+ 4 - 4
toolset/benchmark/framework_test.py

@@ -65,11 +65,11 @@ class FrameworkTest:
                 raise Exception(
                     "docker_files in benchmark_config.json must be an array")
 
-        docker_helper.build(self.benchmarker_config, [self.name], out)
+        result = docker_helper.build(self.benchmarker_config, [self.name], out)
+        if result != 0:
+            return result
 
-        docker_helper.run(self.benchmarker_config, test_docker_files, out)
-
-        return 0
+        return docker_helper.run(self.benchmarker_config, test_docker_files, out)
 
     def verify_urls(self, logPath):
         '''

+ 0 - 3
toolset/utils/benchmark_config.py

@@ -67,9 +67,6 @@ class BenchmarkConfig:
         # Remember root directory
         self.fwroot = setup_util.get_fwroot()
 
-        # setup current_benchmark.txt location
-        self.current_benchmark = "/tmp/current_benchmark.txt"
-
         if hasattr(self, 'parse') and self.parse != None:
             self.timestamp = self.parse
         else:

+ 4 - 0
toolset/utils/docker_helper.py

@@ -118,6 +118,8 @@ def build(benchmarker_config, test_names, out):
                 print(e)
                 return 1
 
+    return 0
+
 
 def run(benchmarker_config, docker_files, out):
     '''
@@ -157,6 +159,8 @@ def run(benchmarker_config, docker_files, out):
             print(e)
             return 1
 
+    return 0
+
 
 def stop(config, database_container_id, test, out):
     '''

+ 1 - 12
toolset/utils/metadata_helper.py

@@ -154,18 +154,7 @@ def gather_remaining_tests(config, results):
     '''
     Gathers the tests remaining in a current benchmark run.
     '''
-    tests = gather_tests(config.test, config.exclude, config, results)
-
-    # If the tests have been interrupted somehow, then we want to resume them where we left
-    # off, rather than starting from the beginning
-    if os.path.isfile(config.current_benchmark):
-        with open(config.current_benchmark, 'r') as interrupted_benchmark:
-            interrupt_bench = interrupted_benchmark.read().strip()
-        for index, atest in enumerate(tests):
-            if atest.name == interrupt_bench:
-                tests = tests[index:]
-                break
-    return tests
+    return gather_tests(config.test, config.exclude, config, results)
 
 
 def gather_frameworks(include=[], exclude=[], config=None):

+ 1 - 1
toolset/utils/results_helper.py

@@ -221,7 +221,7 @@ class Results:
                 requests.post(
                     self.config.results_upload_uri,
                     headers={'Content-Type': 'application/json'},
-                    data=json.dumps(self, indent=2))
+                    data=json.dumps(self.__to_jsonable(), indent=2))
             except (Exception):
                 logging.error("Error uploading results.json")