Browse Source

[ci skip] toolset: don't exit with an error status if the run "completes" (#3567)

The previous full run on Citrine exited with an error status code even
though it completed and was what we'd call a "successful" run.  It
probably ran into issues testing a couple of frameworks along the way,
which we assume that it logged, but the toolset didn't crash, and it
wasn't invoking with invalid arguments or anything.  So the command to
run the toolset should be considered a success.
Michael Hixson 7 years ago
parent
commit
9ece2b1a0b
2 changed files with 2 additions and 6 deletions
  1. 1 4
      toolset/benchmark/benchmarker.py
  2. 1 2
      toolset/run-tests.py

+ 1 - 4
toolset/benchmark/benchmarker.py

@@ -40,14 +40,13 @@ class Benchmarker:
         all_tests = gather_remaining_tests(self.config, self.results)
 
         # Run tests
-        success = True
         log("Running Tests...", border='=')
         with open(os.path.join(self.results.directory, 'benchmark.log'),
                   'w') as benchmark_log:
             for test in all_tests:
                 log("Running Test: %s" % test.name, border='-')
                 with self.config.quiet_out.enable():
-                    success = self.__run_test(test, benchmark_log) and success
+                    self.__run_test(test, benchmark_log)
                 # Load intermediate result from child process
                 self.results.load()
 
@@ -60,8 +59,6 @@ class Benchmarker:
         self.results.upload()
         self.results.finish()
 
-        return success
-
     ##########################################################################################
     # Private methods
     ##########################################################################################

+ 1 - 2
toolset/run-tests.py

@@ -235,8 +235,7 @@ def main(argv=None):
 
     else:
         benchmarker = Benchmarker(config, results)
-        if not benchmarker.run():
-            return 1
+        benchmarker.run()
 
     return 0