Procházet zdrojové kódy

Modify results.json to replace warning array with verify array

Hamilton Turner před 11 roky
rodič
revize
bff6039899
2 změnil soubory, kde provedl 51 přidání a 59 odebrání
  1. 23 39
      toolset/benchmark/benchmarker.py
  2. 28 20
      toolset/benchmark/framework_test.py

+ 23 - 39
toolset/benchmark/benchmarker.py

@@ -203,32 +203,6 @@ class Benchmarker:
   # End output_file
   ############################################################
 
-  ############################################################
-  # get_warning_file(test_name, test_type)
-  # returns the output file name for this test_name and 
-  # test_type timestamp/test_type/test_name/raw 
-  ############################################################
-  def get_warning_file(self, test_name, test_type):
-    return os.path.join(self.result_directory, self.timestamp, test_type, test_name, "warn")
-  ############################################################
-  # End get_warning_file
-  ############################################################
-
-  ############################################################
-  # warning_file(test_name, test_type)
-  # returns the warning file for this test_name and test_type
-  # timestamp/test_type/test_name/raw 
-  ############################################################
-  def warning_file(self, test_name, test_type):
-    path = self.get_warning_file(test_name, test_type)
-    try:
-      os.makedirs(os.path.dirname(path))
-    except OSError:
-      pass
-    return path
-  ############################################################
-  # End warning_file
-  ############################################################
 
   ############################################################
   # get_stats_file(test_name, test_type)
@@ -286,22 +260,35 @@ class Benchmarker:
     return path
 
   ############################################################
-  # report_results
+  # report_verify_results
+  # Used by FrameworkTest to add verification details to our results
+  #
+  # TODO: Technically this is an IPC violation - we are accessing
+  # the parent process' memory from the child process
+  ############################################################
+  def report_verify_results(self, framework, test, result):
+    if framework.name not in self.results['verify'].keys():
+      self.results['verify'][framework.name] = dict()
+    self.results['verify'][framework.name][test] = result
+
+  ############################################################
+  # report_benchmark_results
+  # Used by FrameworkTest to add benchmark data to this
+  #
+  # TODO: Technically this is an IPC violation - we are accessing
+  # the parent process' memory from the child process
   ############################################################
-  def report_results(self, framework, test, results):
+  def report_benchmark_results(self, framework, test, results):
     if test not in self.results['rawData'].keys():
       self.results['rawData'][test] = dict()
 
     # If results has a size from the parse, then it succeeded.
     if results:
       self.results['rawData'][test][framework.name] = results
+
       # This may already be set for single-tests
       if framework.name not in self.results['succeeded'][test]:
         self.results['succeeded'][test].append(framework.name)
-      # Add this type
-      if (os.path.exists(self.get_warning_file(framework.name, test)) and
-          framework.name not in self.results['warning'][test]):
-        self.results['warning'][test].append(framework.name)
     else:
       # This may already be set for single-tests
       if framework.name not in self.results['failed'][test]:
@@ -804,6 +791,9 @@ class Benchmarker:
   # __finish
   ############################################################
   def __finish(self):
+    if self.mode == "verify":
+      print json.dumps(self.results, indent=2, sort_keys=True)
+
     print "Time to complete: " + str(int(time.time() - self.start_time)) + " seconds"
     print "Results are saved in " + os.path.join(self.result_directory, self.timestamp)
 
@@ -921,13 +911,7 @@ class Benchmarker:
       self.results['failed']['fortune'] = []
       self.results['failed']['update'] = []
       self.results['failed']['plaintext'] = []
-      self.results['warning'] = dict()
-      self.results['warning']['json'] = []
-      self.results['warning']['db'] = []
-      self.results['warning']['query'] = []
-      self.results['warning']['fortune'] = []
-      self.results['warning']['update'] = []
-      self.results['warning']['plaintext'] = []
+      self.results['verify'] = dict()
     else:
       #for x in self.__gather_tests():
       #  if x.name not in self.results['frameworks']:

+ 28 - 20
toolset/benchmark/framework_test.py

@@ -541,9 +541,11 @@ class FrameworkTest:
       if ret_tuple[0]:
         self.json_url_passed = True
         out.write("PASS\n\n")
+        self.benchmarker.report_verify_results(self, self.JSON, 'pass')
       else:
         self.json_url_passed = False
         out.write("\nFAIL" + ret_tuple[1] + "\n\n")
+        self.benchmarker.report_verify_results(self, self.JSON, 'fail')
         result = False
       out.flush()
 
@@ -564,14 +566,16 @@ class FrameworkTest:
         self.db_url_warn = False
       else:
         self.db_url_warn = True
-
       out.write("VALIDATING DB ... ")
       if self.db_url_passed:
         out.write("PASS")
+        self.benchmarker.report_verify_results(self, self.DB, 'pass')
         if self.db_url_warn:
           out.write(" (with warnings) " + validate_strict_ret_tuple[1])
+          self.benchmarker.report_verify_results(self, self.DB, 'warn')
         out.write("\n\n")
       else:
+        self.benchmarker.report_verify_results(self, self.DB, 'fail')
         out.write("\nFAIL" + validate_ret_tuple[1])
         result = False
       out.flush()
@@ -630,11 +634,14 @@ class FrameworkTest:
       out.write("VALIDATING QUERY ... ")
       if self.query_url_passed:
         out.write("PASS")
+        self.benchmarker.report_verify_results(self, self.QUERY, 'pass')
         if self.query_url_warn:
           out.write(" (with warnings)")
+          self.benchmarker.report_verify_results(self, self.QUERY, 'warn')
         out.write("\n\n")
       else:
         out.write("\nFAIL " + ret_tuple[1] + "\n\n")
+        self.benchmarker.report_verify_results(self, self.QUERY, 'fail')
         result = False
       out.flush()
 
@@ -649,9 +656,11 @@ class FrameworkTest:
       if self.validateFortune(output, out, err):
         self.fortune_url_passed = True
         out.write("PASS\n\n")
+        self.benchmarker.report_verify_results(self, self.FORTUNE, 'pass')
       else:
         self.fortune_url_passed = False
         out.write("\nFAIL\n\n")
+        self.benchmarker.report_verify_results(self, self.FORTUNE, 'fail')
         result = False
       out.flush()
 
@@ -667,9 +676,11 @@ class FrameworkTest:
       if ret_tuple[0]:
         self.update_url_passed = True
         out.write("PASS\n\n")
+        self.benchmarker.report_verify_results(self, self.UPDATE, 'pass')
       else:
         self.update_url_passed = False
         out.write("\nFAIL " + ret_tuple[1] + "\n\n")
+        self.benchmarker.report_verify_results(self, self.UPDATE, 'fail')
         result = False
       out.flush()
 
@@ -685,9 +696,11 @@ class FrameworkTest:
       if ret_tuple[0]:
         self.plaintext_url_passed = True
         out.write("PASS\n\n")
+        self.benchmarker.report_verify_results(self, self.PLAINTEXT, 'pass')
       else:
         self.plaintext_url_passed = False
         out.write("\nFAIL\n\n" + ret_tuple[1] + "\n\n")
+        self.benchmarker.report_verify_results(self, self.PLAINTEXT, 'fail')
         result = False
       out.flush()
 
@@ -747,7 +760,7 @@ class FrameworkTest:
           self.__end_logging()
         results = self.__parse_test(self.JSON)
         print results
-        self.benchmarker.report_results(framework=self, test=self.JSON, results=results['results'])
+        self.benchmarker.report_benchmark_results(framework=self, test=self.JSON, results=results['results'])
         out.write( "Complete\n" )
         out.flush()
       except AttributeError:
@@ -760,21 +773,18 @@ class FrameworkTest:
         out.flush()
         results = None
         output_file = self.benchmarker.output_file(self.name, self.DB)
-        warning_file = self.benchmarker.warning_file(self.name, self.DB)
         if not os.path.exists(output_file):
           with open(output_file, 'w'):
             # Simply opening the file in write mode should create the empty file.
             pass
-        if self.db_url_warn:
-          with open(warning_file, 'w'):
-            pass
         if self.db_url_passed:
+          self.benchmarker.report_verify_results(self, self.DB, 'pass')
           remote_script = self.__generate_concurrency_script(self.db_url, self.port, self.accept_json)
           self.__begin_logging(self.DB)
           self.__run_benchmark(remote_script, output_file, err)
           self.__end_logging()
         results = self.__parse_test(self.DB)
-        self.benchmarker.report_results(framework=self, test=self.DB, results=results['results'])
+        self.benchmarker.report_benchmark_results(framework=self, test=self.DB, results=results['results'])
         out.write( "Complete\n" )
       except AttributeError:
         pass
@@ -786,21 +796,19 @@ class FrameworkTest:
         out.flush()
         results = None
         output_file = self.benchmarker.output_file(self.name, self.QUERY)
-        warning_file = self.benchmarker.warning_file(self.name, self.QUERY)
         if not os.path.exists(output_file):
           with open(output_file, 'w'):
             # Simply opening the file in write mode should create the empty file.
             pass
         if self.query_url_warn:
-          with open(warning_file, 'w'):
-            pass
+          self.benchmarker.report_verify_results(framework=self, test=self.QUERY, results=None)
         if self.query_url_passed:
           remote_script = self.__generate_query_script(self.query_url, self.port, self.accept_json)
           self.__begin_logging(self.QUERY)
           self.__run_benchmark(remote_script, output_file, err)
           self.__end_logging()
         results = self.__parse_test(self.QUERY)
-        self.benchmarker.report_results(framework=self, test=self.QUERY, results=results['results'])
+        self.benchmarker.report_benchmark_results(framework=self, test=self.QUERY, results=results['results'])
         out.write( "Complete\n" )
         out.flush()
       except AttributeError:
@@ -823,7 +831,7 @@ class FrameworkTest:
           self.__run_benchmark(remote_script, output_file, err)
           self.__end_logging()
         results = self.__parse_test(self.FORTUNE)
-        self.benchmarker.report_results(framework=self, test=self.FORTUNE, results=results['results'])
+        self.benchmarker.report_benchmark_results(framework=self, test=self.FORTUNE, results=results['results'])
         out.write( "Complete\n" )
         out.flush()
       except AttributeError:
@@ -846,7 +854,7 @@ class FrameworkTest:
           self.__run_benchmark(remote_script, output_file, err)
           self.__end_logging()
         results = self.__parse_test(self.UPDATE)
-        self.benchmarker.report_results(framework=self, test=self.UPDATE, results=results['results'])
+        self.benchmarker.report_benchmark_results(framework=self, test=self.UPDATE, results=results['results'])
         out.write( "Complete\n" )
         out.flush()
       except AttributeError:
@@ -869,7 +877,7 @@ class FrameworkTest:
           self.__run_benchmark(remote_script, output_file, err)
           self.__end_logging()
         results = self.__parse_test(self.PLAINTEXT)
-        self.benchmarker.report_results(framework=self, test=self.PLAINTEXT, results=results['results'])
+        self.benchmarker.report_benchmark_results(framework=self, test=self.PLAINTEXT, results=results['results'])
         out.write( "Complete\n" )
         out.flush()
       except AttributeError:
@@ -888,32 +896,32 @@ class FrameworkTest:
     # JSON
     if os.path.exists(self.benchmarker.get_output_file(self.name, self.JSON)):
       results = self.__parse_test(self.JSON)
-      self.benchmarker.report_results(framework=self, test=self.JSON, results=results['results'])
+      self.benchmarker.report_benchmark_results(framework=self, test=self.JSON, results=results['results'])
     
     # DB
     if os.path.exists(self.benchmarker.get_output_file(self.name, self.DB)):
       results = self.__parse_test(self.DB)
-      self.benchmarker.report_results(framework=self, test=self.DB, results=results['results'])
+      self.benchmarker.report_benchmark_results(framework=self, test=self.DB, results=results['results'])
     
     # Query
     if os.path.exists(self.benchmarker.get_output_file(self.name, self.QUERY)):
       results = self.__parse_test(self.QUERY)
-      self.benchmarker.report_results(framework=self, test=self.QUERY, results=results['results'])
+      self.benchmarker.report_benchmark_results(framework=self, test=self.QUERY, results=results['results'])
 
     # Fortune
     if os.path.exists(self.benchmarker.get_output_file(self.name, self.FORTUNE)):
       results = self.__parse_test(self.FORTUNE)
-      self.benchmarker.report_results(framework=self, test=self.FORTUNE, results=results['results'])
+      self.benchmarker.report_benchmark_results(framework=self, test=self.FORTUNE, results=results['results'])
 
     # Update
     if os.path.exists(self.benchmarker.get_output_file(self.name, self.UPDATE)):
       results = self.__parse_test(self.UPDATE)
-      self.benchmarker.report_results(framework=self, test=self.UPDATE, results=results['results'])
+      self.benchmarker.report_benchmark_results(framework=self, test=self.UPDATE, results=results['results'])
 
     # Plaintext
     if os.path.exists(self.benchmarker.get_output_file(self.name, self.PLAINTEXT)):
       results = self.__parse_test(self.PLAINTEXT)
-      self.benchmarker.report_results(framework=self, test=self.PLAINTEXT, results=results['results'])
+      self.benchmarker.report_benchmark_results(framework=self, test=self.PLAINTEXT, results=results['results'])
   ############################################################
   # End parse_all
   ############################################################