Browse Source

automatically generate metadata when a test is run

Keith Newman 9 years ago
parent
commit
b2cbfee6b8
1 changed files with 5 additions and 3 deletions
  1. 5 3
      toolset/benchmark/benchmarker.py

+ 5 - 3
toolset/benchmark/benchmarker.py

@@ -79,8 +79,6 @@ class Benchmarker:
     with open(os.path.join(self.full_results_directory(), "test_metadata.json"), "w") as f:
     with open(os.path.join(self.full_results_directory(), "test_metadata.json"), "w") as f:
       f.write(all_tests_json)
       f.write(all_tests_json)
 
 
-    self.__finish()
-
 
 
   ############################################################
   ############################################################
   # End run_list_test_metadata
   # End run_list_test_metadata
@@ -112,12 +110,15 @@ class Benchmarker:
   # running benchmarks against them.
   # running benchmarks against them.
   ############################################################
   ############################################################
   def run(self):
   def run(self):
+    ##########################
+    # Generate metadata
+    ##########################
+    self.run_list_test_metadata()
     ##########################
     ##########################
     # Get a list of all known
     # Get a list of all known
     # tests that we can run.
     # tests that we can run.
     ##########################
     ##########################
     all_tests = self.__gather_tests
     all_tests = self.__gather_tests
-
     ##########################
     ##########################
     # Setup client/server
     # Setup client/server
     ##########################
     ##########################
@@ -143,6 +144,7 @@ class Benchmarker:
       print header("Parsing Results ...", top='=', bottom='=')
       print header("Parsing Results ...", top='=', bottom='=')
       self.__parse_results(all_tests)
       self.__parse_results(all_tests)
 
 
+
     self.__finish()
     self.__finish()
     return result
     return result