Browse Source

Add a few new fields to results.json (#2585)

(Tracked internally as 51323)

The new fields are:
  name
  environmentDescription
  completionTime
  uuid

The name and environmentDescription are configurable using these new
configuration parameters:
  results_name
  results_environment

The results_name parameter is treated as a date format string.
Michael Hixson 8 years ago
parent
commit
37fdb1cb1e
3 changed files with 20 additions and 8 deletions
  1. 3 1
      benchmark.cfg.example
  2. 13 7
      toolset/benchmark/benchmarker.py
  3. 4 0
      toolset/run-tests.py

+ 3 - 1
benchmark.cfg.example

@@ -25,4 +25,6 @@ test=None
 type=all
 type=all
 verbose=True
 verbose=True
 clean=False
 clean=False
-clean_all=False
+clean_all=False
+#results_name=My Benchmarking Run %%Y-%%m-%%d %%H:%%M:%%S
+#results_environment=My Server Environment

+ 13 - 7
toolset/benchmark/benchmarker.py

@@ -8,6 +8,7 @@ from utils import gather_frameworks
 from utils import verify_database_connections
 from utils import verify_database_connections
 
 
 import os
 import os
+import uuid
 import shutil
 import shutil
 import stat
 import stat
 import json
 import json
@@ -143,6 +144,7 @@ class Benchmarker:
             self.__parse_results(all_tests)
             self.__parse_results(all_tests)
 
 
 
 
+        self.__set_completion_time()
         self.__finish()
         self.__finish()
         return result
         return result
 
 
@@ -883,20 +885,20 @@ class Benchmarker:
     # End __count_commits
     # End __count_commits
     ############################################################
     ############################################################
 
 
-    ############################################################
-    # __write_intermediate_results
-    ############################################################
     def __write_intermediate_results(self,test_name,status_message):
     def __write_intermediate_results(self,test_name,status_message):
+        self.results["completed"][test_name] = status_message
+        self.__write_results()
+
+    def __write_results(self):
         try:
         try:
-            self.results["completed"][test_name] = status_message
             with open(os.path.join(self.full_results_directory(), 'results.json'), 'w') as f:
             with open(os.path.join(self.full_results_directory(), 'results.json'), 'w') as f:
                 f.write(json.dumps(self.results, indent=2))
                 f.write(json.dumps(self.results, indent=2))
         except (IOError):
         except (IOError):
             logging.error("Error writing results.json")
             logging.error("Error writing results.json")
 
 
-    ############################################################
-    # End __write_intermediate_results
-    ############################################################
+    def __set_completion_time(self):
+        self.results['completionTime'] = int(round(time.time() * 1000))
+        self.__write_results()
 
 
     def __load_results(self):
     def __load_results(self):
         try:
         try:
@@ -1015,6 +1017,10 @@ class Benchmarker:
 
 
         if self.results == None:
         if self.results == None:
             self.results = dict()
             self.results = dict()
+            self.results['uuid'] = str(uuid.uuid4())
+            self.results['name'] = datetime.now().strftime(self.results_name)
+            self.results['environmentDescription'] = self.results_environment
+            self.results['completionTime'] = None
             self.results['concurrencyLevels'] = self.concurrency_levels
             self.results['concurrencyLevels'] = self.concurrency_levels
             self.results['queryIntervals'] = self.query_levels
             self.results['queryIntervals'] = self.query_levels
             self.results['frameworks'] = [t.name for t in self.__gather_tests]
             self.results['frameworks'] = [t.name for t in self.__gather_tests]

+ 4 - 0
toolset/run-tests.py

@@ -1,7 +1,9 @@
 #!/usr/bin/env python
 #!/usr/bin/env python
 import argparse
 import argparse
 import ConfigParser
 import ConfigParser
+import socket
 import sys
 import sys
+import time
 import os
 import os
 import platform
 import platform
 import multiprocessing
 import multiprocessing
@@ -154,6 +156,8 @@ def main(argv=None):
     parser.add_argument('--sleep', type=int, default=60, help='the amount of time to sleep after starting each test to allow the server to start up.')
     parser.add_argument('--sleep', type=int, default=60, help='the amount of time to sleep after starting each test to allow the server to start up.')
 
 
     # Misc Options
     # Misc Options
+    parser.add_argument('--results-name', help='Gives a name to this set of results, formatted as a date', default='(unspecified, datetime = %Y-%m-%d %H:%M:%S)')
+    parser.add_argument('--results-environment', help='Describes the environment in which these results were gathered', default='(unspecified, hostname = %s)' % socket.gethostname())
     parser.add_argument('--parse', help='Parses the results of the given timestamp and merges that with the latest results')
     parser.add_argument('--parse', help='Parses the results of the given timestamp and merges that with the latest results')
     parser.add_argument('-v', '--verbose', action='store_true', default=False, help='Causes the configuration to print before any other commands are executed.')
     parser.add_argument('-v', '--verbose', action='store_true', default=False, help='Causes the configuration to print before any other commands are executed.')
     parser.add_argument('--clear-tmp', action='store_true', default=False, help='Clears files written to /tmp after each framework\'s tests complete.')
     parser.add_argument('--clear-tmp', action='store_true', default=False, help='Clears files written to /tmp after each framework\'s tests complete.')