run-tests.py 7.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242
  1. import argparse
  2. import socket
  3. import sys
  4. import signal
  5. import traceback
  6. from toolset.benchmark.benchmarker import Benchmarker
  7. from toolset.utils.scaffolding import Scaffolding
  8. from toolset.utils.audit import Audit
  9. from toolset.utils import cleaner
  10. from toolset.utils.benchmark_config import BenchmarkConfig
  11. # Enable cross-platform colored output
  12. from colorama import init, Fore
  13. init()
  14. class StoreSeqAction(argparse.Action):
  15. '''
  16. Helper class for parsing a sequence from the command line
  17. '''
  18. def __init__(self, option_strings, dest, nargs=None, **kwargs):
  19. super(StoreSeqAction, self).__init__(
  20. option_strings, dest, type=str, **kwargs)
  21. def __call__(self, parser, namespace, values, option_string=None):
  22. setattr(namespace, self.dest, self.parse_seq(values))
  23. def parse_seq(self, argument):
  24. result = argument.split(',')
  25. sequences = [x for x in result if ":" in x]
  26. for sequence in sequences:
  27. try:
  28. (start, step, end) = sequence.split(':')
  29. except ValueError:
  30. log(" Invalid: {!s}".format(sequence), color=Fore.RED)
  31. log(" Requires start:step:end, e.g. 1:2:10", color=Fore.RED)
  32. raise
  33. result.remove(sequence)
  34. result = result + range(int(start), int(end), int(step))
  35. return [abs(int(item)) for item in result]
  36. ###################################################################################################
  37. # Main
  38. ###################################################################################################
  39. def main(argv=None):
  40. '''
  41. Runs the toolset.
  42. '''
  43. # Do argv default this way, as doing it in the functional declaration sets it at compile time
  44. if argv is None:
  45. argv = sys.argv
  46. ##########################################################
  47. # Set up argument parser
  48. ##########################################################
  49. parser = argparse.ArgumentParser(
  50. description="Install or run the Framework Benchmarks test suite.",
  51. formatter_class=argparse.ArgumentDefaultsHelpFormatter,
  52. epilog=
  53. '''If an argument includes (type int-sequence), then it accepts integer lists in multiple forms.
  54. Using a single number e.g. 5 will create a list [5]. Using commas will create a list containing those
  55. values e.g. 1,3,6 creates [1, 3, 6]. Using three colon-separated numbers of start:step:end will create a
  56. list, using the semantics of python's range function, e.g. 1:3:15 creates [1, 4, 7, 10, 13] while
  57. 0:1:5 creates [0, 1, 2, 3, 4]
  58. ''')
  59. # Suite options
  60. parser.add_argument(
  61. '--audit',
  62. action='store_true',
  63. default=False,
  64. help='Audits framework tests for inconsistencies')
  65. parser.add_argument(
  66. '--clean',
  67. action='store_true',
  68. default=False,
  69. help='Removes the results directory')
  70. parser.add_argument(
  71. '--new',
  72. action='store_true',
  73. default=False,
  74. help='Initialize a new framework test')
  75. parser.add_argument(
  76. '--quiet',
  77. action='store_true',
  78. default=False,
  79. help=
  80. 'Only print a limited set of messages to stdout, keep the bulk of messages in log files only'
  81. )
  82. parser.add_argument(
  83. '--results-name',
  84. help='Gives a name to this set of results, formatted as a date',
  85. default='(unspecified, datetime = %Y-%m-%d %H:%M:%S)')
  86. parser.add_argument(
  87. '--results-environment',
  88. help='Describes the environment in which these results were gathered',
  89. default='(unspecified, hostname = %s)' % socket.gethostname())
  90. parser.add_argument(
  91. '--results-upload-uri',
  92. default=None,
  93. help=
  94. 'A URI where the in-progress results.json file will be POSTed periodically'
  95. )
  96. parser.add_argument(
  97. '--parse',
  98. help=
  99. 'Parses the results of the given timestamp and merges that with the latest results'
  100. )
  101. # Test options
  102. parser.add_argument(
  103. '--test', default=None, nargs='+', help='names of tests to run')
  104. parser.add_argument(
  105. '--test-dir',
  106. nargs='+',
  107. dest='test_dir',
  108. help='name of framework directory containing all tests to run')
  109. parser.add_argument(
  110. '--test-lang',
  111. nargs='+',
  112. dest='test_lang',
  113. help='name of language directory containing all tests to run')
  114. parser.add_argument(
  115. '--exclude', default=None, nargs='+', help='names of tests to exclude')
  116. parser.add_argument(
  117. '--type',
  118. choices=[
  119. 'all', 'json', 'db', 'query', 'cached_query', 'fortune', 'update',
  120. 'plaintext'
  121. ],
  122. default='all',
  123. help='which type of test to run')
  124. parser.add_argument(
  125. '-m',
  126. '--mode',
  127. choices=['benchmark', 'verify', 'debug'],
  128. default='benchmark',
  129. help=
  130. 'verify mode will only start up the tests, curl the urls and shutdown. debug mode will skip verification and leave the server running.'
  131. )
  132. parser.add_argument(
  133. '--list-tests',
  134. action='store_true',
  135. default=False,
  136. help='lists all the known tests that can run')
  137. # Benchmark options
  138. parser.add_argument(
  139. '--duration',
  140. default=15,
  141. help='Time in seconds that each test should run for.')
  142. parser.add_argument(
  143. '--server-host',
  144. default='tfb-server',
  145. help='Hostname/IP for application server')
  146. parser.add_argument(
  147. '--database-host',
  148. default='tfb-database',
  149. help='Hostname/IP for database server')
  150. parser.add_argument(
  151. '--client-host', default='', help='Hostname/IP for client server')
  152. parser.add_argument(
  153. '--concurrency-levels',
  154. nargs='+',
  155. default=[16, 32, 64, 128, 256, 512],
  156. help='List of concurrencies to benchmark')
  157. parser.add_argument(
  158. '--pipeline-concurrency-levels',
  159. nargs='+',
  160. default=[256, 1024, 4096, 16384],
  161. help='List of pipeline concurrencies to benchmark')
  162. parser.add_argument(
  163. '--query-levels',
  164. nargs='+',
  165. default=[1, 5, 10, 15, 20],
  166. help='List of query levels to benchmark')
  167. parser.add_argument(
  168. '--cached-query-levels',
  169. nargs='+',
  170. default=[1, 10, 20, 50, 100],
  171. help='List of cached query levels to benchmark')
  172. # Network options
  173. parser.add_argument(
  174. '--network-mode',
  175. default=None,
  176. help='The network mode to run docker in')
  177. args = parser.parse_args()
  178. config = BenchmarkConfig(args)
  179. benchmarker = Benchmarker(config)
  180. signal.signal(signal.SIGTERM, benchmarker.stop)
  181. signal.signal(signal.SIGINT, benchmarker.stop)
  182. try:
  183. if config.new:
  184. Scaffolding(benchmarker)
  185. elif config.audit:
  186. Audit(benchmarker).start_audit()
  187. elif config.clean:
  188. cleaner.clean(benchmarker.results)
  189. benchmarker.docker_helper.clean()
  190. elif config.list_tests:
  191. all_tests = benchmarker.metadata.gather_tests()
  192. for test in all_tests:
  193. config.log(test.name)
  194. elif config.parse:
  195. all_tests = benchmarker.metadata.gather_tests()
  196. for test in all_tests:
  197. test.parse_all()
  198. benchmarker.results.parse(all_tests)
  199. else:
  200. any_failed = benchmarker.run()
  201. if config.mode == "verify":
  202. return any_failed
  203. except Exception:
  204. tb = traceback.format_exc()
  205. config.log("A fatal error has occurred", color=Fore.RED)
  206. config.log(tb)
  207. # try one last time to stop docker containers on fatal error
  208. try:
  209. benchmarker.stop()
  210. except:
  211. sys.exit(1)
  212. return 0
  213. if __name__ == "__main__":
  214. sys.exit(main())