run-tests.py 8.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272
  1. import argparse
  2. import socket
  3. import sys
  4. import signal
  5. import traceback
  6. from toolset.benchmark.benchmarker import Benchmarker
  7. from toolset.utils.scaffolding import Scaffolding
  8. from toolset.utils.audit import Audit
  9. from toolset.utils.benchmark_config import BenchmarkConfig
  10. from toolset.utils.output_helper import log
  11. # Enable cross-platform colored output
  12. from colorama import Fore, just_fix_windows_console
  13. just_fix_windows_console()
  14. class StoreSeqAction(argparse.Action):
  15. '''
  16. Helper class for parsing a sequence from the command line
  17. '''
  18. def __init__(self, option_strings, dest, nargs=None, **kwargs):
  19. super(StoreSeqAction, self).__init__(
  20. option_strings, dest, type=str, **kwargs)
  21. def __call__(self, parser, namespace, values, option_string=None):
  22. setattr(namespace, self.dest, self.parse_seq(values))
  23. def parse_seq(self, argument):
  24. result = argument.split(',')
  25. sequences = [x for x in result if ":" in x]
  26. for sequence in sequences:
  27. try:
  28. (start, step, end) = sequence.split(':')
  29. except ValueError:
  30. log(" Invalid: {!s}".format(sequence), color=Fore.RED)
  31. log(" Requires start:step:end, e.g. 1:2:10", color=Fore.RED)
  32. raise
  33. result.remove(sequence)
  34. result = result + range(int(start), int(end), int(step))
  35. return [abs(int(item)) for item in result]
  36. ###################################################################################################
  37. # Main
  38. ###################################################################################################
  39. def main(argv=None):
  40. '''
  41. Runs the toolset.
  42. '''
  43. # Do argv default this way, as doing it in the functional declaration sets it at compile time
  44. if argv is None:
  45. argv = sys.argv
  46. ##########################################################
  47. # Set up argument parser
  48. ##########################################################
  49. parser = argparse.ArgumentParser(
  50. description="Install or run the Framework Benchmarks test suite.",
  51. formatter_class=argparse.ArgumentDefaultsHelpFormatter,
  52. epilog=
  53. '''If an argument includes (type int-sequence), then it accepts integer lists in multiple forms.
  54. Using a single number e.g. 5 will create a list [5]. Using commas will create a list containing those
  55. values e.g. 1,3,6 creates [1, 3, 6]. Using three colon-separated numbers of start:step:end will create a
  56. list, using the semantics of python's range function, e.g. 1:3:15 creates [1, 4, 7, 10, 13] while
  57. 0:1:5 creates [0, 1, 2, 3, 4]
  58. ''')
  59. # Suite options
  60. # CPU set options
  61. parser.add_argument(
  62. '--cpuset-cpus',
  63. default=None,
  64. help='The cpu set to run framework container on')
  65. parser.add_argument(
  66. '--audit',
  67. action='store_true',
  68. default=False,
  69. help='Audits framework tests for inconsistencies')
  70. parser.add_argument(
  71. '--new',
  72. action='store_true',
  73. default=False,
  74. help='Initialize a new framework test')
  75. parser.add_argument(
  76. '--quiet',
  77. action='store_true',
  78. default=False,
  79. help=
  80. 'Only print a limited set of messages to stdout, keep the bulk of messages in log files only'
  81. )
  82. parser.add_argument(
  83. '--reverse-order',
  84. action='store_true',
  85. default=False,
  86. help=
  87. 'Run the tests in reverse order, starting with the last test in the list'
  88. )
  89. parser.add_argument(
  90. '--results-name',
  91. help='Gives a name to this set of results, formatted as a date',
  92. default='(unspecified, datetime = %Y-%m-%d %H:%M:%S)')
  93. parser.add_argument(
  94. '--results-environment',
  95. help='Describes the environment in which these results were gathered',
  96. default='(unspecified, hostname = %s)' % socket.gethostname())
  97. parser.add_argument(
  98. '--results-upload-uri',
  99. default=None,
  100. help=
  101. 'A URI where the in-progress results.json file will be POSTed periodically'
  102. )
  103. parser.add_argument(
  104. '--parse',
  105. help=
  106. 'Parses the results of the given timestamp and merges that with the latest results'
  107. )
  108. # Test options
  109. parser.add_argument(
  110. '--test', default=None, nargs='+', help='names of tests to run')
  111. parser.add_argument(
  112. '--test-dir',
  113. nargs='+',
  114. dest='test_dir',
  115. help='name of framework directory containing all tests to run')
  116. parser.add_argument(
  117. '--test-lang',
  118. nargs='+',
  119. dest='test_lang',
  120. help='name of language directory containing all tests to run')
  121. parser.add_argument(
  122. '--tag',
  123. nargs='+',
  124. dest='tag',
  125. help='tests to be run by tag name')
  126. parser.add_argument(
  127. '--exclude', default=None, nargs='+', help='names of tests to exclude')
  128. parser.add_argument(
  129. '--type',
  130. choices=[
  131. 'all', 'json', 'db', 'query', 'cached-query', 'fortune', 'update',
  132. 'plaintext'
  133. ],
  134. nargs='+',
  135. default='all',
  136. help='which type of test to run')
  137. parser.add_argument(
  138. '-m',
  139. '--mode',
  140. choices=['benchmark', 'verify', 'debug'],
  141. default='benchmark',
  142. help=
  143. 'verify mode will only start up the tests, curl the urls and shutdown. debug mode will skip verification and leave the server running.'
  144. )
  145. parser.add_argument(
  146. '--list-tests',
  147. action='store_true',
  148. default=False,
  149. help='lists all the known tests that can run')
  150. parser.add_argument(
  151. '--list-tag',
  152. dest='list_tag',
  153. default=False,
  154. help='lists all the known tests with a specific tag')
  155. # Benchmark options
  156. parser.add_argument(
  157. '--duration',
  158. default=15,
  159. help='Time in seconds that each test should run for.')
  160. parser.add_argument(
  161. '--server-host',
  162. default='tfb-server',
  163. help='Hostname/IP for application server')
  164. parser.add_argument(
  165. '--database-host',
  166. default='tfb-database',
  167. help='Hostname/IP for database server')
  168. parser.add_argument(
  169. '--client-host', default='', help='Hostname/IP for client server')
  170. parser.add_argument(
  171. '--concurrency-levels',
  172. nargs='+',
  173. type=int,
  174. default=[16, 32, 64, 128, 256, 512],
  175. help='List of concurrencies to benchmark')
  176. parser.add_argument(
  177. '--pipeline-concurrency-levels',
  178. nargs='+',
  179. default=[256, 1024, 4096, 16384],
  180. help='List of pipeline concurrencies to benchmark')
  181. parser.add_argument(
  182. '--query-levels',
  183. nargs='+',
  184. default=[1, 5, 10, 15, 20],
  185. help='List of query levels to benchmark')
  186. parser.add_argument(
  187. '--cached-query-levels',
  188. nargs='+',
  189. default=[1, 10, 20, 50, 100],
  190. help='List of cached query levels to benchmark')
  191. parser.add_argument(
  192. '--test-container-memory',
  193. default=None,
  194. help='Amount of memory to be given to the test container')
  195. parser.add_argument(
  196. '--extra-docker-runtime-args',
  197. nargs='*',
  198. default=None,
  199. help='Extra docker arguments to be passed to the test container')
  200. # Network options
  201. parser.add_argument(
  202. '--network-mode',
  203. default=None,
  204. help='The network mode to run docker in')
  205. args = parser.parse_args()
  206. config = BenchmarkConfig(args)
  207. benchmarker = Benchmarker(config)
  208. signal.signal(signal.SIGTERM, benchmarker.stop)
  209. signal.signal(signal.SIGINT, benchmarker.stop)
  210. try:
  211. if config.new:
  212. Scaffolding(benchmarker)
  213. elif config.audit:
  214. Audit(benchmarker).start_audit()
  215. elif config.list_tests:
  216. all_tests = benchmarker.metadata.gather_tests()
  217. for test in all_tests:
  218. log(test.name)
  219. elif config.list_tag:
  220. all_tests = benchmarker.metadata.gather_tests()
  221. for test in all_tests:
  222. if hasattr(test, "tags") and config.list_tag in test.tags:
  223. log(test.name)
  224. elif config.parse:
  225. all_tests = benchmarker.metadata.gather_tests()
  226. for test in all_tests:
  227. test.parse_all()
  228. benchmarker.results.parse(all_tests)
  229. else:
  230. any_failed = benchmarker.run()
  231. if config.mode == "verify":
  232. return any_failed
  233. except Exception:
  234. tb = traceback.format_exc()
  235. log("A fatal error has occurred", color=Fore.RED)
  236. log(tb)
  237. # try one last time to stop docker containers on fatal error
  238. try:
  239. benchmarker.stop()
  240. except:
  241. sys.exit(1)
  242. return 0
  243. if __name__ == "__main__":
  244. sys.exit(main())