benchmarker.py 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302
  1. from toolset.utils.output_helper import log, FNULL
  2. from toolset.utils.docker_helper import DockerHelper
  3. from toolset.utils.time_logger import TimeLogger
  4. from toolset.utils.metadata import Metadata
  5. from toolset.utils.results import Results
  6. from toolset.utils.audit import Audit
  7. import os
  8. import subprocess
  9. import traceback
  10. import sys
  11. import time
  12. import shlex
  13. from pprint import pprint
  14. from colorama import Fore
  15. class Benchmarker:
  16. def __init__(self, config):
  17. '''
  18. Initialize the benchmarker.
  19. '''
  20. self.config = config
  21. self.time_logger = TimeLogger()
  22. self.metadata = Metadata(self)
  23. self.audit = Audit(self)
  24. # a list of all tests for this run
  25. self.tests = self.metadata.tests_to_run()
  26. self.results = Results(self)
  27. self.docker_helper = DockerHelper(self)
  28. ##########################################################################################
  29. # Public methods
  30. ##########################################################################################
  31. def run(self):
  32. '''
  33. This process involves setting up the client/server machines
  34. with any necessary change. Then going through each test,
  35. running their docker build and run, verifying the URLs, and
  36. running benchmarks against them.
  37. '''
  38. # Generate metadata
  39. self.metadata.list_test_metadata()
  40. any_failed = False
  41. # Run tests
  42. log("Running Tests...", border='=')
  43. # build wrk and all databases needed for current run
  44. self.docker_helper.build_wrk()
  45. self.docker_helper.build_databases()
  46. with open(os.path.join(self.results.directory, 'benchmark.log'),
  47. 'w') as benchmark_log:
  48. for test in self.tests:
  49. log("Running Test: %s" % test.name, border='-')
  50. with self.config.quiet_out.enable():
  51. if not self.__run_test(test, benchmark_log):
  52. any_failed = True
  53. # Load intermediate result from child process
  54. self.results.load()
  55. # Parse results
  56. if self.config.mode == "benchmark":
  57. log("Parsing Results ...", border='=')
  58. self.results.parse(self.tests)
  59. self.results.set_completion_time()
  60. self.results.upload()
  61. self.results.finish()
  62. return any_failed
  63. def stop(self, signal=None, frame=None):
  64. log("Shutting down (may take a moment)")
  65. self.docker_helper.stop()
  66. sys.exit(0)
  67. ##########################################################################################
  68. # Private methods
  69. ##########################################################################################
  70. def __exit_test(self, success, prefix, file, message=None):
  71. if message:
  72. log(message,
  73. prefix=prefix,
  74. file=file,
  75. color=Fore.RED if success else '')
  76. self.time_logger.log_test_end(log_prefix=prefix, file=file)
  77. return success
  78. def __run_test(self, test, benchmark_log):
  79. '''
  80. Runs the given test, verifies that the webapp is accepting requests,
  81. optionally benchmarks the webapp, and ultimately stops all services
  82. started for this test.
  83. '''
  84. log_prefix = "%s: " % test.name
  85. # Start timing the total test duration
  86. self.time_logger.mark_test_start()
  87. # If the test is in the excludes list, we skip it
  88. if self.config.exclude and test.name in self.config.exclude:
  89. message = "Test {name} has been added to the excludes list. Skipping.".format(
  90. name=test.name)
  91. self.results.write_intermediate(test.name, message)
  92. return self.__exit_test(
  93. success=False,
  94. message=message,
  95. prefix=log_prefix,
  96. file=benchmark_log)
  97. database_container = None
  98. try:
  99. # Start database container
  100. if test.database.lower() != "none":
  101. self.time_logger.mark_starting_database()
  102. database_container = self.docker_helper.start_database(
  103. test.database.lower())
  104. if database_container is None:
  105. message = "ERROR: Problem building/running database container"
  106. return self.__exit_test(
  107. success=False,
  108. message=message,
  109. prefix=log_prefix,
  110. file=benchmark_log)
  111. self.time_logger.mark_started_database()
  112. # Start webapp
  113. container = test.start()
  114. self.time_logger.mark_test_starting()
  115. if container is None:
  116. self.docker_helper.stop([container, database_container])
  117. message = "ERROR: Problem starting {name}".format(
  118. name=test.name)
  119. self.results.write_intermediate(test.name, message)
  120. return self.__exit_test(
  121. success=False,
  122. message=message,
  123. prefix=log_prefix,
  124. file=benchmark_log)
  125. slept = 0
  126. max_sleep = 60
  127. accepting_requests = False
  128. while not accepting_requests and slept < max_sleep:
  129. if not self.docker_helper.server_container_exists(
  130. container.id):
  131. break
  132. accepting_requests = test.is_accepting_requests()
  133. time.sleep(1)
  134. slept += 1
  135. if not accepting_requests:
  136. self.docker_helper.stop([container, database_container])
  137. message = "ERROR: Framework is not accepting requests from client machine"
  138. self.results.write_intermediate(test.name, message)
  139. return self.__exit_test(
  140. success=False,
  141. message=message,
  142. prefix=log_prefix,
  143. file=benchmark_log)
  144. self.time_logger.mark_test_accepting_requests()
  145. # Debug mode blocks execution here until ctrl+c
  146. if self.config.mode == "debug":
  147. log("Entering debug mode. Server has started. CTRL-c to stop.",
  148. prefix=log_prefix,
  149. file=benchmark_log,
  150. color=Fore.YELLOW)
  151. while True:
  152. time.sleep(1)
  153. # Verify URLs and audit
  154. log("Verifying framework URLs", prefix=log_prefix)
  155. self.time_logger.mark_verify_start()
  156. passed_verify = test.verify_urls()
  157. self.audit.audit_test_dir(test.directory)
  158. # Benchmark this test
  159. if self.config.mode == "benchmark":
  160. log("Benchmarking %s" % test.name,
  161. file=benchmark_log,
  162. border='-')
  163. self.time_logger.mark_benchmarking_start()
  164. self.__benchmark(test, benchmark_log)
  165. self.time_logger.log_benchmarking_end(
  166. log_prefix=log_prefix, file=benchmark_log)
  167. # Log test timing stats
  168. self.time_logger.log_build_flush(benchmark_log)
  169. self.time_logger.log_database_start_time(log_prefix, benchmark_log)
  170. self.time_logger.log_test_accepting_requests(
  171. log_prefix, benchmark_log)
  172. self.time_logger.log_verify_end(log_prefix, benchmark_log)
  173. # Stop this test
  174. self.docker_helper.stop([container, database_container])
  175. # Save results thus far into the latest results directory
  176. self.results.write_intermediate(test.name,
  177. time.strftime(
  178. "%Y%m%d%H%M%S",
  179. time.localtime()))
  180. # Upload the results thus far to another server (optional)
  181. self.results.upload()
  182. if self.config.mode == "verify" and not passed_verify:
  183. return self.__exit_test(
  184. success=False,
  185. message="Failed verify!",
  186. prefix=log_prefix,
  187. file=benchmark_log)
  188. except Exception as e:
  189. tb = traceback.format_exc()
  190. self.results.write_intermediate(test.name,
  191. "error during test: " + str(e))
  192. log(tb, prefix=log_prefix, file=benchmark_log)
  193. return self.__exit_test(
  194. success=False,
  195. message="Error during test: %s" % test.name,
  196. prefix=log_prefix,
  197. file=benchmark_log)
  198. return self.__exit_test(
  199. success=True, prefix=log_prefix, file=benchmark_log)
  200. def __benchmark(self, framework_test, benchmark_log):
  201. '''
  202. Runs the benchmark for each type of test that it implements
  203. '''
  204. def benchmark_type(test_type):
  205. log("BENCHMARKING %s ... " % test_type.upper(), file=benchmark_log)
  206. test = framework_test.runTests[test_type]
  207. raw_file = self.results.get_raw_file(framework_test.name,
  208. test_type)
  209. if not os.path.exists(raw_file):
  210. # Open to create the empty file
  211. with open(raw_file, 'w'):
  212. pass
  213. if not test.failed:
  214. # Begin resource usage metrics collection
  215. self.__begin_logging(framework_test, test_type)
  216. script = self.config.types[test_type].get_script_name()
  217. script_variables = self.config.types[
  218. test_type].get_script_variables(
  219. test.name, "http://%s:%s%s" % (self.config.server_host,
  220. framework_test.port,
  221. test.get_url()))
  222. self.docker_helper.benchmark(script, script_variables,
  223. raw_file)
  224. # End resource usage metrics collection
  225. self.__end_logging()
  226. results = self.results.parse_test(framework_test, test_type)
  227. log("Benchmark results:", file=benchmark_log)
  228. # TODO move into log somehow
  229. pprint(results)
  230. self.results.report_benchmark_results(framework_test, test_type,
  231. results['results'])
  232. log("Complete", file=benchmark_log)
  233. for test_type in framework_test.runTests:
  234. benchmark_type(test_type)
  235. def __begin_logging(self, framework_test, test_type):
  236. '''
  237. Starts a thread to monitor the resource usage, to be synced with the
  238. client's time.
  239. TODO: MySQL and InnoDB are possible. Figure out how to implement them.
  240. '''
  241. output_file = "{file_name}".format(
  242. file_name=self.results.get_stats_file(framework_test.name,
  243. test_type))
  244. dstat_string = "dstat -Tafilmprs --aio --fs --ipc --lock --raw --socket --tcp \
  245. --raw --socket --tcp --udp --unix --vm --disk-util \
  246. --rpc --rpcd --output {output_file}".format(
  247. output_file=output_file)
  248. cmd = shlex.split(dstat_string)
  249. self.subprocess_handle = subprocess.Popen(
  250. cmd, stdout=FNULL, stderr=subprocess.STDOUT)
  251. def __end_logging(self):
  252. '''
  253. Stops the logger thread and blocks until shutdown is complete.
  254. '''
  255. self.subprocess_handle.terminate()
  256. self.subprocess_handle.communicate()