run-tests.py 8.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247
  1. import argparse
  2. import socket
  3. import sys
  4. import signal
  5. import traceback
  6. from toolset.benchmark.benchmarker import Benchmarker
  7. from toolset.utils.scaffolding import Scaffolding
  8. from toolset.utils.audit import Audit
  9. from toolset.utils import cleaner
  10. from toolset.utils.benchmark_config import BenchmarkConfig
  11. from toolset.utils.output_helper import log
  12. # Enable cross-platform colored output
  13. from colorama import init, Fore
  14. init()
  15. class StoreSeqAction(argparse.Action):
  16. '''
  17. Helper class for parsing a sequence from the command line
  18. '''
  19. def __init__(self, option_strings, dest, nargs=None, **kwargs):
  20. super(StoreSeqAction, self).__init__(
  21. option_strings, dest, type=str, **kwargs)
  22. def __call__(self, parser, namespace, values, option_string=None):
  23. setattr(namespace, self.dest, self.parse_seq(values))
  24. def parse_seq(self, argument):
  25. result = argument.split(',')
  26. sequences = [x for x in result if ":" in x]
  27. for sequence in sequences:
  28. try:
  29. (start, step, end) = sequence.split(':')
  30. except ValueError:
  31. log(" Invalid: {!s}".format(sequence), color=Fore.RED)
  32. log(" Requires start:step:end, e.g. 1:2:10", color=Fore.RED)
  33. raise
  34. result.remove(sequence)
  35. result = result + range(int(start), int(end), int(step))
  36. return [abs(int(item)) for item in result]
  37. ###################################################################################################
  38. # Main
  39. ###################################################################################################
  40. def main(argv=None):
  41. '''
  42. Runs the toolset.
  43. '''
  44. # Do argv default this way, as doing it in the functional declaration sets it at compile time
  45. if argv is None:
  46. argv = sys.argv
  47. ##########################################################
  48. # Set up argument parser
  49. ##########################################################
  50. parser = argparse.ArgumentParser(
  51. description="Install or run the Framework Benchmarks test suite.",
  52. formatter_class=argparse.ArgumentDefaultsHelpFormatter,
  53. epilog=
  54. '''If an argument includes (type int-sequence), then it accepts integer lists in multiple forms.
  55. Using a single number e.g. 5 will create a list [5]. Using commas will create a list containing those
  56. values e.g. 1,3,6 creates [1, 3, 6]. Using three colon-separated numbers of start:step:end will create a
  57. list, using the semantics of python's range function, e.g. 1:3:15 creates [1, 4, 7, 10, 13] while
  58. 0:1:5 creates [0, 1, 2, 3, 4]
  59. ''')
  60. # Suite options
  61. parser.add_argument(
  62. '--audit',
  63. action='store_true',
  64. default=False,
  65. help='Audits framework tests for inconsistencies')
  66. parser.add_argument(
  67. '--clean',
  68. action='store_true',
  69. default=False,
  70. help='Removes the results directory')
  71. parser.add_argument(
  72. '--new',
  73. action='store_true',
  74. default=False,
  75. help='Initialize a new framework test')
  76. parser.add_argument(
  77. '--quiet',
  78. action='store_true',
  79. default=False,
  80. help=
  81. 'Only print a limited set of messages to stdout, keep the bulk of messages in log files only'
  82. )
  83. parser.add_argument(
  84. '--results-name',
  85. help='Gives a name to this set of results, formatted as a date',
  86. default='(unspecified, datetime = %Y-%m-%d %H:%M:%S)')
  87. parser.add_argument(
  88. '--results-environment',
  89. help='Describes the environment in which these results were gathered',
  90. default='(unspecified, hostname = %s)' % socket.gethostname())
  91. parser.add_argument(
  92. '--results-upload-uri',
  93. default=None,
  94. help=
  95. 'A URI where the in-progress results.json file will be POSTed periodically'
  96. )
  97. parser.add_argument(
  98. '--parse',
  99. help=
  100. 'Parses the results of the given timestamp and merges that with the latest results'
  101. )
  102. # Test options
  103. parser.add_argument(
  104. '--test', default=None, nargs='+', help='names of tests to run')
  105. parser.add_argument(
  106. '--test-dir',
  107. nargs='+',
  108. dest='test_dir',
  109. help='name of framework directory containing all tests to run')
  110. parser.add_argument(
  111. '--test-lang',
  112. nargs='+',
  113. dest='test_lang',
  114. help='name of language directory containing all tests to run')
  115. parser.add_argument(
  116. '--exclude', default=None, nargs='+', help='names of tests to exclude')
  117. parser.add_argument(
  118. '--type',
  119. choices=[
  120. 'all', 'json', 'db', 'query', 'cached_query', 'fortune', 'update',
  121. 'plaintext'
  122. ],
  123. default='all',
  124. help='which type of test to run')
  125. parser.add_argument(
  126. '-m',
  127. '--mode',
  128. choices=['benchmark', 'verify', 'debug'],
  129. default='benchmark',
  130. help=
  131. 'verify mode will only start up the tests, curl the urls and shutdown. debug mode will skip verification and leave the server running.'
  132. )
  133. parser.add_argument(
  134. '--list-tests',
  135. action='store_true',
  136. default=False,
  137. help='lists all the known tests that can run')
  138. # Benchmark options
  139. parser.add_argument(
  140. '--duration',
  141. default=15,
  142. help='Time in seconds that each test should run for.')
  143. parser.add_argument(
  144. '--server-host',
  145. default='tfb-server',
  146. help='Hostname/IP for application server')
  147. parser.add_argument(
  148. '--database-host',
  149. default='tfb-database',
  150. help='Hostname/IP for database server')
  151. parser.add_argument(
  152. '--client-host', default='', help='Hostname/IP for client server')
  153. parser.add_argument(
  154. '--concurrency-levels',
  155. nargs='+',
  156. default=[16, 32, 64, 128, 256, 512],
  157. help='List of concurrencies to benchmark')
  158. parser.add_argument(
  159. '--pipeline-concurrency-levels',
  160. nargs='+',
  161. default=[256, 1024, 4096, 16384],
  162. help='List of pipeline concurrencies to benchmark')
  163. parser.add_argument(
  164. '--query-levels',
  165. nargs='+',
  166. default=[1, 5, 10, 15, 20],
  167. help='List of query levels to benchmark')
  168. parser.add_argument(
  169. '--cached-query-levels',
  170. nargs='+',
  171. default=[1, 10, 20, 50, 100],
  172. help='List of cached query levels to benchmark')
  173. parser.add_argument(
  174. '--benchmark-env',
  175. default='none',
  176. help='Hostname/IP for database server')
  177. # Network options
  178. parser.add_argument(
  179. '--network-mode',
  180. default=None,
  181. help='The network mode to run docker in')
  182. args = parser.parse_args()
  183. config = BenchmarkConfig(args)
  184. benchmarker = Benchmarker(config)
  185. signal.signal(signal.SIGTERM, benchmarker.stop)
  186. signal.signal(signal.SIGINT, benchmarker.stop)
  187. try:
  188. if config.new:
  189. Scaffolding(benchmarker)
  190. elif config.audit:
  191. Audit(benchmarker).start_audit()
  192. elif config.clean:
  193. cleaner.clean(benchmarker.results)
  194. benchmarker.docker_helper.clean()
  195. elif config.list_tests:
  196. all_tests = benchmarker.metadata.gather_tests()
  197. for test in all_tests:
  198. log(test.name)
  199. elif config.parse:
  200. all_tests = benchmarker.metadata.gather_tests()
  201. for test in all_tests:
  202. test.parse_all()
  203. benchmarker.results.parse(all_tests)
  204. else:
  205. any_failed = benchmarker.run()
  206. if config.mode == "verify":
  207. return any_failed
  208. except Exception:
  209. tb = traceback.format_exc()
  210. log("A fatal error has occurred", color=Fore.RED)
  211. log(tb)
  212. # try one last time to stop docker containers on fatal error
  213. try:
  214. benchmarker.stop()
  215. except:
  216. sys.exit(1)
  217. return 0
  218. if __name__ == "__main__":
  219. sys.exit(main())