Browse Source

Fixed the bug with parsing files and getting str

msmith-techempower 10 years ago
parent
commit
61b110c025
3 changed files with 14 additions and 7 deletions
  1. 3 3
      benchmark.cfg.example
  2. 1 1
      toolset/benchmark/benchmarker.py
  3. 10 3
      toolset/run-tests.py

+ 3 - 3
benchmark.cfg.example

@@ -12,9 +12,9 @@ exclude=None
 install=server
 install=server
 install_error_action=continue
 install_error_action=continue
 install_strategy=unified
 install_strategy=unified
-#install_only=True
-#list_test_metadata=True
-#list_tests=True
+install_only=False
+list_test_metadata=False
+list_tests=False
 concurrency_levels=[8, 16, 32, 64, 128, 256]
 concurrency_levels=[8, 16, 32, 64, 128, 256]
 query_levels=[1, 5,10,15,20]
 query_levels=[1, 5,10,15,20]
 threads=8
 threads=8

+ 1 - 1
toolset/benchmark/benchmarker.py

@@ -940,7 +940,7 @@ class Benchmarker:
     
     
 
 
     args['max_threads'] = args['threads']
     args['max_threads'] = args['threads']
-    args['max_concurrency'] = str(max(args['concurrency_levels']))
+    args['max_concurrency'] = max(args['concurrency_levels'])
 
 
     self.__dict__.update(args)
     self.__dict__.update(args)
     # pprint(self.__dict__)
     # pprint(self.__dict__)

+ 10 - 3
toolset/run-tests.py

@@ -11,6 +11,7 @@ from pprint import pprint
 from benchmark.benchmarker import Benchmarker
 from benchmark.benchmarker import Benchmarker
 from setup.linux.unbuffered import Unbuffered
 from setup.linux.unbuffered import Unbuffered
 from setup.linux import setup_util
 from setup.linux import setup_util
+from ast import literal_eval
 
 
 # Enable cross-platform colored output
 # Enable cross-platform colored output
 from colorama import init
 from colorama import init
@@ -80,6 +81,12 @@ def main(argv=None):
             config = ConfigParser.SafeConfigParser()
             config = ConfigParser.SafeConfigParser()
             config.read([os.getcwd() + '/' + args.conf_file])
             config.read([os.getcwd() + '/' + args.conf_file])
             defaults = dict(config.items("Defaults"))
             defaults = dict(config.items("Defaults"))
+            # Convert strings into proper python types
+            for k,v in defaults.iteritems():
+                try:
+                    defaults[k] = literal_eval(v)
+                except:
+                    pass
     except IOError:
     except IOError:
         if args.conf_file != 'benchmark.cfg':
         if args.conf_file != 'benchmark.cfg':
             print 'Configuration file not found!'
             print 'Configuration file not found!'
@@ -185,13 +192,13 @@ def main(argv=None):
     # Run the benchmarker in the specified mode
     # Run the benchmarker in the specified mode
     #   Do not use benchmarker variables for these checks, 
     #   Do not use benchmarker variables for these checks, 
     #   they are either str or bool based on the python version
     #   they are either str or bool based on the python version
-    if (type(args.list_tests) is str and args.list_tests.lower() == 'true') or (type(args.list_tests) is bool and args.list_tests):
+    if args.list_tests:
       benchmarker.run_list_tests()
       benchmarker.run_list_tests()
-    elif (type(args.list_test_metadata) is str and args.list_test_metadata.lower() == 'true') or (type(args.list_test_metadata) is bool and args.list_test_metadata):
+    elif args.list_test_metadata:
       benchmarker.run_list_test_metadata()
       benchmarker.run_list_test_metadata()
     elif args.parse != None:
     elif args.parse != None:
       benchmarker.parse_timestamp()
       benchmarker.parse_timestamp()
-    elif not ((type(args.install_only) is str and args.install_only.lower() == 'true') or (type(args.install_only) is bool and args.install_only)):
+    elif not args.install_only:
       return benchmarker.run()
       return benchmarker.run()
 
 
 if __name__ == "__main__":
 if __name__ == "__main__":