Browse Source

Create static gather_tests utility method

Hamilton Turner 11 years ago
parent
commit
f34af1d395
2 changed files with 108 additions and 29 deletions
  1. 29 29
      benchmark.cfg.example
  2. 79 0
      toolset/benchmark/utils.py

+ 29 - 29
benchmark.cfg.example

@@ -1,31 +1,31 @@
 [Defaults]
 # Available Keys: 
-# client_host='localhost'
-# client_identity_file=None
-# client_user=None
-# database_host=None
-# database_identity_file=None
-# database_os='linux'
-# database_user=None
-# duration=60
-# exclude=None
-# install='all'
-# install_error_action='continue'
-# install_software=False
-# list_test_metadata=False
-# list_tests=False
-# max_concurrency=256
-# max_queries=20
-# max_threads=8
-# mode='benchmark'
-# name='ec2'
-# os='linux'
-# parse=None
-# password_prompt=False
-# query_interval=5
-# server_host='localhost'
-# sleep=60
-# starting_concurrency=8
-# test=None
-# type='all'
-# verbose=True
+client_host=localhost
+client_identity_file=None
+client_user=localhost
+database_host=localhost
+database_identity_file=None
+database_os=linux
+database_user=tfb
+duration=60
+exclude=None
+install=server
+install_error_action=continue
+install_strategy=unified
+list_test_metadata=False
+list_tests=False
+max_concurrency=256
+max_queries=20
+max_threads=8
+mode=benchmark
+name=ec2
+os=linux
+parse=None
+password_prompt=False
+query_interval=5
+server_host=localhost
+sleep=60
+starting_concurrency=8
+test=None
+type=all
+verbose=True

+ 79 - 0
toolset/benchmark/utils.py

@@ -1,3 +1,82 @@
+import ConfigParser
+import os
+import glob
+import json
+
+from ast import literal_eval
+
+def gather_tests(include = [], exclude=[], benchmarker=None):
+  '''
+  Given test names as strings, returns a list of FrameworkTest objects. 
+  For example, 'aspnet-mysql-raw' turns into a FrameworkTest object with
+  variables for checking the test directory, the test database os, and 
+  other useful items. 
+
+  With no arguments, every test in this framework will be returned.  
+  With include, only tests with this exact name will be returned. 
+  With exclude, all tests but those excluded will be returned. 
+
+  A benchmarker is needed to construct full FrameworkTest objects. If
+  one is not provided, a default Benchmarker will be created. 
+  '''
+
+  # Avoid setting up a circular import
+  from benchmark import framework_test
+  from benchmark.benchmarker import Benchmarker
+  from setup.linux import setup_util
+
+  # Help callers out a bit
+  if include is None:
+    include = []
+  if exclude is None:
+    exclude = []
+  
+  # Setup default Benchmarker using example configuration
+  if benchmarker is None:
+    default_config = setup_util.get_fwroot() + "/benchmark.cfg.example"
+    config = ConfigParser.SafeConfigParser()
+    config.readfp(open(default_config))
+    defaults = dict(config.items("Defaults"))
+    
+    # Convert strings into proper python types
+    for k,v in defaults.iteritems():
+      try:
+        defaults[k] = literal_eval(v)
+      except:
+        pass
+
+    # Ensure we only run the __init__ method of Benchmarker
+    defaults['install'] = None
+    
+    benchmarker = Benchmarker(defaults)
+
+  # Assume we are running from FrameworkBenchmarks
+  config_files = glob.glob('*/benchmark_config')
+
+  tests = []
+  for config_file_name in config_files:
+    config = None
+    with open(config_file_name, 'r') as config_file:
+      try:
+        config = json.load(config_file)
+      except:
+        # User-friendly errors
+        print("Error loading '%s'." % config_file_name)
+        raise
+
+    # Find all tests in the config file
+    config_tests = framework_test.parse_config(config, 
+      os.path.dirname(config_file_name), benchmarker)
+    
+    # Filter
+    for test in config_tests:
+      if test.name in exclude:
+        continue
+      elif len(include) is 0 or test.name in include:
+        tests.append(test)
+
+  tests.sort(key=lambda x: x.name)
+  return tests
 
 def header(message, top='-', bottom='-'):
     '''