Przeglądaj źródła

Refactor benchmark_configs

This is a work in progress.

1. Fix output for --list-test-metadata.
2. Fix reference: ssh_string -> client_ssh_string.
3. Print stack traces on errors during test runs.
4. Fix argument: --application-os -> --os.
Michael Hixson 12 lat temu
rodzic
commit
d4a23083e3

+ 20 - 3
toolset/benchmark/benchmarker.py

@@ -37,12 +37,29 @@ class Benchmarker:
   ############################################################
   def run_list_test_metadata(self):
     all_tests = self.__gather_tests()
+    all_tests_json = json.dumps(map(lambda test: {
+      "name": test.name,
+      "approach": test.approach,
+      "classification": test.classification,
+      "database": test.database,
+      "framework": test.framework,
+      "language": test.language,
+      "orm": test.orm,
+      "platform": test.platform,
+      "webserver": test.webserver,
+      "os": test.os,
+      "database_os": test.database_os,
+      "display_name": test.display_name,
+      "notes": test.notes,
+      "versus": test.versus
+    }, all_tests))
 
     with open(os.path.join(self.full_results_directory(), "test_metadata.json"), "w") as f:
-      f.write(json.dumps(all_tests))
+      f.write(all_tests_json)
 
     self.__finish()
 
+
   ############################################################
   # End run_list_test_metadata
   ############################################################
@@ -326,8 +343,8 @@ class Benchmarker:
     for test in tests:
       if test.os.lower() != self.os.lower() or test.database_os.lower() != self.database_os.lower():
         # the operating system requirements of this test for the
-		# application server or the database server don't match
-		# our current environment
+        # application server or the database server don't match
+        # our current environment
         continue
       
       # If the user specified which tests to run, then 

+ 8 - 1
toolset/benchmark/framework_test.py

@@ -5,6 +5,7 @@ import time
 import re
 import pprint
 import sys
+import traceback
 
 class FrameworkTest:
   ##########################################################################################
@@ -249,6 +250,7 @@ class FrameworkTest:
 
         print "Complete"
     except AttributeError:
+      traceback.print_exc()
       pass
 
     # Query
@@ -262,6 +264,7 @@ class FrameworkTest:
         self.benchmarker.report_results(framework=self, test="query", results=results['results'])
         print "Complete"
     except AttributeError:
+      traceback.print_exc()
       pass
 
     # fortune
@@ -275,6 +278,7 @@ class FrameworkTest:
         self.benchmarker.report_results(framework=self, test="fortune", results=results['results'])
         print "Complete"
     except AttributeError:
+      traceback.print_exc()
       pass
 
     # update
@@ -288,6 +292,7 @@ class FrameworkTest:
         self.benchmarker.report_results(framework=self, test="update", results=results['results'])
         print "Complete"
     except AttributeError:
+      traceback.print_exc()
       pass
 
     # plaintext
@@ -301,6 +306,7 @@ class FrameworkTest:
         self.benchmarker.report_results(framework=self, test="plaintext", results=results['results'])
         print "Complete"
     except AttributeError:
+      traceback.print_exc()
       pass
   ############################################################
   # End benchmark
@@ -449,7 +455,8 @@ class FrameworkTest:
   ############################################################
   def __run_benchmark(self, script, output_file):
     with open(output_file, 'w') as raw_file:
-      p = subprocess.Popen(self.benchmarker.ssh_string.split(" "), stdin=subprocess.PIPE, stdout=raw_file, stderr=raw_file)
+	  
+      p = subprocess.Popen(self.benchmarker.client_ssh_string.split(" "), stdin=subprocess.PIPE, stdout=raw_file, stderr=raw_file)
       p.communicate(script)
   ############################################################
   # End __run_benchmark

+ 1 - 1
toolset/run-tests.py

@@ -47,7 +47,7 @@ parser.add_argument('--starting-concurrency', default=8, type=int)
 parser.add_argument('--sleep', type=int, default=60, help='the amount of time to sleep after starting each test to allow the server to start up.')
 parser.add_argument('--parse', help='Parses the results of the given timestamp and merges that with the latest results')
 parser.add_argument('--name', default="ec2", help='The name to give this test. Results will be placed in a folder using this name.')
-parser.add_argument('--application-os', choices=['linux', 'windows'], default='linux', help='The operating system of the application server.')
+parser.add_argument('--os', choices=['linux', 'windows'], default='linux', help='The operating system of the application server.')
 parser.add_argument('--database-os', choices=['linux', 'windows'], default='linux', help='The operating system of the database server.')
 args = parser.parse_args()