run-tests.py 7.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244
  1. import argparse
  2. import socket
  3. import sys
  4. import signal
  5. from toolset.benchmark.benchmarker import Benchmarker
  6. from toolset.utils.scaffolding import Scaffolding
  7. from toolset.utils import cleaner
  8. from toolset.utils.results_helper import Results
  9. from toolset.utils.benchmark_config import BenchmarkConfig
  10. from toolset.utils import docker_helper
  11. from toolset.utils.metadata_helper import gather_tests
  12. from toolset.utils.output_helper import log
  13. # Enable cross-platform colored output
  14. from colorama import init, Fore
  15. init()
  16. # Required to be globally known
  17. config = None
  18. class StoreSeqAction(argparse.Action):
  19. '''
  20. Helper class for parsing a sequence from the command line
  21. '''
  22. def __init__(self, option_strings, dest, nargs=None, **kwargs):
  23. super(StoreSeqAction, self).__init__(
  24. option_strings, dest, type=str, **kwargs)
  25. def __call__(self, parser, namespace, values, option_string=None):
  26. setattr(namespace, self.dest, self.parse_seq(values))
  27. def parse_seq(self, argument):
  28. result = argument.split(',')
  29. sequences = [x for x in result if ":" in x]
  30. for sequence in sequences:
  31. try:
  32. (start, step, end) = sequence.split(':')
  33. except ValueError:
  34. log(" Invalid: {!s}".format(sequence), color=Fore.RED)
  35. log(" Requires start:step:end, e.g. 1:2:10", color=Fore.RED)
  36. raise
  37. result.remove(sequence)
  38. result = result + range(int(start), int(end), int(step))
  39. return [abs(int(item)) for item in result]
  40. def __stop(signal, frame):
  41. log("Shutting down (may take a moment)")
  42. docker_helper.stop(config)
  43. sys.exit(0)
  44. signal.signal(signal.SIGTERM, __stop)
  45. signal.signal(signal.SIGINT, __stop)
  46. ###################################################################################################
  47. # Main
  48. ###################################################################################################
  49. def main(argv=None):
  50. '''
  51. Runs the toolset.
  52. '''
  53. # Do argv default this way, as doing it in the functional declaration sets it at compile time
  54. if argv is None:
  55. argv = sys.argv
  56. ##########################################################
  57. # Set up argument parser
  58. ##########################################################
  59. parser = argparse.ArgumentParser(
  60. description="Install or run the Framework Benchmarks test suite.",
  61. formatter_class=argparse.ArgumentDefaultsHelpFormatter,
  62. epilog=
  63. '''If an argument includes (type int-sequence), then it accepts integer lists in multiple forms.
  64. Using a single number e.g. 5 will create a list [5]. Using commas will create a list containing those
  65. values e.g. 1,3,6 creates [1, 3, 6]. Using three colon-separated numbers of start:step:end will create a
  66. list, using the semantics of python's range function, e.g. 1:3:15 creates [1, 4, 7, 10, 13] while
  67. 0:1:5 creates [0, 1, 2, 3, 4]
  68. ''')
  69. # Suite options
  70. parser.add_argument(
  71. '--build',
  72. nargs='+',
  73. help='Builds the dockerfile(s) for the given test(s)')
  74. parser.add_argument(
  75. '--clean',
  76. action='store_true',
  77. default=False,
  78. help='Removes the results directory')
  79. parser.add_argument(
  80. '--new',
  81. action='store_true',
  82. default=False,
  83. help='Initialize a new framework test')
  84. parser.add_argument(
  85. '--quiet',
  86. action='store_true',
  87. default=False,
  88. help=
  89. 'Only print a limited set of messages to stdout, keep the bulk of messages in log files only'
  90. )
  91. parser.add_argument(
  92. '--results-name',
  93. help='Gives a name to this set of results, formatted as a date',
  94. default='(unspecified, datetime = %Y-%m-%d %H:%M:%S)')
  95. parser.add_argument(
  96. '--results-environment',
  97. help='Describes the environment in which these results were gathered',
  98. default='(unspecified, hostname = %s)' % socket.gethostname())
  99. parser.add_argument(
  100. '--results-upload-uri',
  101. default=None,
  102. help=
  103. 'A URI where the in-progress results.json file will be POSTed periodically'
  104. )
  105. parser.add_argument(
  106. '--parse',
  107. help=
  108. 'Parses the results of the given timestamp and merges that with the latest results'
  109. )
  110. # Test options
  111. parser.add_argument(
  112. '--test', default=None, nargs='+', help='names of tests to run')
  113. parser.add_argument(
  114. '--test-dir',
  115. nargs='+',
  116. dest='test_dir',
  117. help='name of framework directory containing all tests to run')
  118. parser.add_argument(
  119. '--test-lang',
  120. nargs='+',
  121. dest='test_lang',
  122. help='name of language directory containing all tests to run')
  123. parser.add_argument(
  124. '--exclude', default=None, nargs='+', help='names of tests to exclude')
  125. parser.add_argument(
  126. '--type',
  127. choices=[
  128. 'all', 'json', 'db', 'query', 'cached_query', 'fortune', 'update',
  129. 'plaintext'
  130. ],
  131. default='all',
  132. help='which type of test to run')
  133. parser.add_argument(
  134. '-m',
  135. '--mode',
  136. choices=['benchmark', 'verify', 'debug'],
  137. default='benchmark',
  138. help=
  139. 'verify mode will only start up the tests, curl the urls and shutdown. debug mode will skip verification and leave the server running.'
  140. )
  141. parser.add_argument(
  142. '--list-tests',
  143. action='store_true',
  144. default=False,
  145. help='lists all the known tests that can run')
  146. # Benchmark options
  147. parser.add_argument(
  148. '--duration',
  149. default=15,
  150. help='Time in seconds that each test should run for.')
  151. parser.add_argument(
  152. '--server-host',
  153. default='tfb-server',
  154. help='Hostname/IP for application server')
  155. parser.add_argument(
  156. '--database-host',
  157. default='tfb-database',
  158. help='Hostname/IP for database server')
  159. parser.add_argument(
  160. '--client-host', default='', help='Hostname/IP for client server')
  161. parser.add_argument(
  162. '--concurrency-levels',
  163. nargs='+',
  164. default=[16, 32, 64, 128, 256, 512],
  165. help='List of concurrencies to benchmark')
  166. parser.add_argument(
  167. '--pipeline-concurrency-levels',
  168. nargs='+',
  169. default=[256, 1024, 4096, 16384],
  170. help='List of pipeline concurrencies to benchmark')
  171. parser.add_argument(
  172. '--query-levels',
  173. nargs='+',
  174. default=[1, 5, 10, 15, 20],
  175. help='List of query levels to benchmark')
  176. parser.add_argument(
  177. '--cached-query-levels',
  178. nargs='+',
  179. default=[1, 10, 20, 50, 100],
  180. help='List of cached query levels to benchmark')
  181. # Network options
  182. parser.add_argument(
  183. '--network-mode',
  184. default=None,
  185. help='The network mode to run docker in')
  186. args = parser.parse_args()
  187. global config
  188. config = BenchmarkConfig(args)
  189. results = Results(config)
  190. if config.new:
  191. Scaffolding(config)
  192. elif config.build:
  193. docker_helper.build(config, config.build)
  194. elif config.clean:
  195. cleaner.clean(results)
  196. docker_helper.clean(config)
  197. elif config.list_tests:
  198. all_tests = gather_tests(benchmarker_config=config)
  199. for test in all_tests:
  200. log(test.name)
  201. elif config.parse != None:
  202. # TODO: broken
  203. all_tests = gather_tests(benchmarker_config=config)
  204. for test in all_tests:
  205. test.parse_all()
  206. results.parse(all_tests)
  207. else:
  208. benchmarker = Benchmarker(config, results)
  209. benchmarker.run()
  210. return 0
  211. if __name__ == "__main__":
  212. sys.exit(main())