run-tests.py 9.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184
  1. #!/usr/bin/env python
  2. import argparse
  3. import ConfigParser
  4. import socket
  5. import sys
  6. import time
  7. import os
  8. import platform
  9. import multiprocessing
  10. import itertools
  11. import copy
  12. from benchmark.benchmarker import Benchmarker
  13. from setup.linux.unbuffered import Unbuffered
  14. from setup.linux import setup_util
  15. from scaffolding import Scaffolding
  16. from ast import literal_eval
  17. # Enable cross-platform colored output
  18. from colorama import init
  19. init()
  20. class StoreSeqAction(argparse.Action):
  21. '''Helper class for parsing a sequence from the command line'''
  22. def __init__(self, option_strings, dest, nargs=None, **kwargs):
  23. super(StoreSeqAction, self).__init__(option_strings, dest, type=str, **kwargs)
  24. def __call__(self, parser, namespace, values, option_string=None):
  25. setattr(namespace, self.dest, self.parse_seq(values))
  26. def parse_seq(self, argument):
  27. result = argument.split(',')
  28. sequences = [x for x in result if ":" in x]
  29. for sequence in sequences:
  30. try:
  31. (start,step,end) = sequence.split(':')
  32. except ValueError:
  33. print(" Invalid: {!s}".format(sequence))
  34. print(" Requires start:step:end, e.g. 1:2:10")
  35. raise
  36. result.remove(sequence)
  37. result = result + range(int(start), int(end), int(step))
  38. return [abs(int(item)) for item in result]
  39. ###################################################################################################
  40. # Main
  41. ###################################################################################################
  42. def main(argv=None):
  43. ''' Runs the program. There are three ways to pass arguments
  44. 1) environment variables TFB_*
  45. 2) configuration file benchmark.cfg
  46. 3) command line flags
  47. In terms of precedence, 3 > 2 > 1, so config file trumps environment variables
  48. but command line flags have the final say
  49. '''
  50. # Do argv default this way, as doing it in the functional declaration sets it at compile time
  51. if argv is None:
  52. argv = sys.argv
  53. # Enable unbuffered output so messages will appear in the proper order with subprocess output.
  54. sys.stdout=Unbuffered(sys.stdout)
  55. # Update python environment
  56. # 1) Ensure the current directory (which should be the benchmark home directory) is in the path so that the tests can be imported.
  57. sys.path.append('.')
  58. # 2) Ensure toolset/setup/linux is in the path so that the tests can "import setup_util".
  59. sys.path.append('toolset/setup/linux')
  60. # Update environment for shell scripts
  61. os.environ['FWROOT'] = setup_util.get_fwroot()
  62. os.environ['IROOT'] = os.environ['FWROOT'] + '/installs'
  63. # 'Ubuntu', '14.04', 'trusty' respectively
  64. os.environ['TFB_DISTRIB_ID'], os.environ['TFB_DISTRIB_RELEASE'], os.environ['TFB_DISTRIB_CODENAME'] = platform.linux_distribution()
  65. # App server cpu count
  66. os.environ['CPU_COUNT'] = str(multiprocessing.cpu_count())
  67. print("FWROOT is {!s}.".format(os.environ['FWROOT']))
  68. conf_parser = argparse.ArgumentParser(
  69. description=__doc__,
  70. formatter_class=argparse.RawDescriptionHelpFormatter,
  71. add_help=False)
  72. conf_parser.add_argument(
  73. '--conf_file', default='benchmark.cfg', metavar='FILE',
  74. help='Optional configuration file to provide argument defaults. All config options can be overridden using the command line.')
  75. args, remaining_argv = conf_parser.parse_known_args()
  76. defaults = {}
  77. try:
  78. if not os.path.exists(os.path.join(os.environ['FWROOT'], args.conf_file)) and not os.path.exists(os.path.join(os.environ['FWROOT'] + 'benchmark.cfg')):
  79. print("No config file found. Aborting!")
  80. exit(1)
  81. with open (os.path.join(os.environ['FWROOT'], args.conf_file)):
  82. config = ConfigParser.SafeConfigParser()
  83. config.read([os.path.join(os.environ['FWROOT'], args.conf_file)])
  84. defaults.update(dict(config.items("Defaults")))
  85. # Convert strings into proper python types
  86. for k, v in defaults.iteritems():
  87. try:
  88. defaults[k] = literal_eval(v)
  89. except Exception:
  90. pass
  91. except IOError:
  92. print("Configuration file not found!")
  93. exit(1)
  94. ##########################################################
  95. # Set up default values
  96. ##########################################################
  97. # Verify and massage options
  98. if defaults['client_user'] is None or defaults['client_host'] is None:
  99. print("client_user and client_host are required!")
  100. print("Please check your configuration file.")
  101. print("Aborting!")
  102. exit(1)
  103. if defaults['database_user'] is None:
  104. defaults['database_user'] = defaults['client_user']
  105. if defaults['database_host'] is None:
  106. defaults['database_host'] = defaults['client_host']
  107. if defaults['server_host'] is None:
  108. defaults['server_host'] = defaults['client_host']
  109. if defaults['ulimit'] is None:
  110. defaults['ulimit'] = 200000
  111. os.environ['ULIMIT'] = str(defaults['ulimit'])
  112. ##########################################################
  113. # Set up argument parser
  114. ##########################################################
  115. parser = argparse.ArgumentParser(description="Install or run the Framework Benchmarks test suite.",
  116. parents=[conf_parser],
  117. formatter_class=argparse.ArgumentDefaultsHelpFormatter,
  118. epilog='''If an argument includes (type int-sequence), then it accepts integer lists in multiple forms.
  119. Using a single number e.g. 5 will create a list [5]. Using commas will create a list containing those
  120. values e.g. 1,3,6 creates [1, 3, 6]. Using three colon-separated numbers of start:step:end will create a
  121. list, using the semantics of python's range function, e.g. 1:3:15 creates [1, 4, 7, 10, 13] while
  122. 0:1:5 creates [0, 1, 2, 3, 4]
  123. ''')
  124. # Install options
  125. parser.add_argument('--clean', action='store_true', default=False, help='Removes the results directory')
  126. parser.add_argument('--clean-all', action='store_true', dest='clean_all', default=False, help='Removes the results and installs directories')
  127. parser.add_argument('--new', action='store_true', default=False, help='Initialize a new framework test')
  128. # Test options
  129. parser.add_argument('--test', nargs='+', help='names of tests to run')
  130. parser.add_argument('--test-dir', nargs='+', dest='test_dir', help='name of framework directory containing all tests to run')
  131. parser.add_argument('--exclude', nargs='+', help='names of tests to exclude')
  132. parser.add_argument('--type', choices=['all', 'json', 'db', 'query', 'cached_query', 'fortune', 'update', 'plaintext'], default='all', help='which type of test to run')
  133. parser.add_argument('-m', '--mode', choices=['benchmark', 'verify', 'debug'], default='benchmark', help='verify mode will only start up the tests, curl the urls and shutdown. debug mode will skip verification and leave the server running.')
  134. parser.add_argument('--list-tests', action='store_true', default=False, help='lists all the known tests that can run')
  135. # Benchmark options
  136. parser.add_argument('--duration', default=15, help='Time in seconds that each test should run for.')
  137. parser.add_argument('--sleep', type=int, default=60, help='the amount of time to sleep after starting each test to allow the server to start up.')
  138. # Misc Options
  139. parser.add_argument('--results-name', help='Gives a name to this set of results, formatted as a date', default='(unspecified, datetime = %Y-%m-%d %H:%M:%S)')
  140. parser.add_argument('--results-environment', help='Describes the environment in which these results were gathered', default='(unspecified, hostname = %s)' % socket.gethostname())
  141. parser.add_argument('--results-upload-uri', default=None, help='A URI where the in-progress results.json file will be POSTed periodically')
  142. parser.add_argument('--parse', help='Parses the results of the given timestamp and merges that with the latest results')
  143. parser.add_argument('-v', '--verbose', action='store_true', default=False, help='Causes the configuration to print before any other commands are executed.')
  144. parser.add_argument('--quiet', action='store_true', default=False, help='Only print a limited set of messages to stdout, keep the bulk of messages in log files only')
  145. parser.set_defaults(**defaults) # Must do this after add, or each option's default will override the configuration file default
  146. args = parser.parse_args(remaining_argv)
  147. if args.new:
  148. Scaffolding()
  149. return 0
  150. benchmarker = Benchmarker(vars(args))
  151. # Run the benchmarker in the specified mode
  152. # Do not use benchmarker variables for these checks,
  153. # they are either str or bool based on the python version
  154. if args.list_tests:
  155. benchmarker.run_list_tests()
  156. elif args.parse != None:
  157. benchmarker.parse_timestamp()
  158. else:
  159. return benchmarker.run()
  160. if __name__ == "__main__":
  161. sys.exit(main())