benchmarker.py 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305
  1. from toolset.utils.output_helper import log, FNULL
  2. from toolset.utils.docker_helper import DockerHelper
  3. from toolset.utils.time_logger import TimeLogger
  4. from toolset.utils.metadata import Metadata
  5. from toolset.utils.results import Results
  6. from toolset.utils.audit import Audit
  7. import os
  8. import subprocess
  9. import traceback
  10. import sys
  11. import time
  12. import shlex
  13. from pprint import pprint
  14. from colorama import Fore
  15. import numbers
  16. class Benchmarker:
  17. def __init__(self, config):
  18. '''
  19. Initialize the benchmarker.
  20. '''
  21. self.config = config
  22. self.time_logger = TimeLogger()
  23. self.metadata = Metadata(self)
  24. self.audit = Audit(self)
  25. # a list of all tests for this run
  26. self.tests = self.metadata.tests_to_run()
  27. self.results = Results(self)
  28. self.docker_helper = DockerHelper(self)
  29. ##########################################################################################
  30. # Public methods
  31. ##########################################################################################
  32. def run(self):
  33. '''
  34. This process involves setting up the client/server machines
  35. with any necessary change. Then going through each test,
  36. running their docker build and run, verifying the URLs, and
  37. running benchmarks against them.
  38. '''
  39. # Generate metadata
  40. self.metadata.list_test_metadata()
  41. any_failed = False
  42. # Run tests
  43. log("Running Tests...", border='=')
  44. # build wrk and all databases needed for current run
  45. self.docker_helper.build_wrk()
  46. self.docker_helper.build_databases()
  47. with open(os.path.join(self.results.directory, 'benchmark.log'),
  48. 'w') as benchmark_log:
  49. for test in self.tests:
  50. log("Running Test: %s" % test.name, border='-')
  51. with self.config.quiet_out.enable():
  52. if not self.__run_test(test, benchmark_log):
  53. any_failed = True
  54. # Load intermediate result from child process
  55. self.results.load()
  56. # Parse results
  57. if self.config.mode == "benchmark":
  58. log("Parsing Results ...", border='=')
  59. self.results.parse(self.tests)
  60. self.results.set_completion_time()
  61. self.results.upload()
  62. self.results.finish()
  63. return any_failed
  64. def stop(self, signal=None, frame=None):
  65. log("Shutting down (may take a moment)")
  66. self.docker_helper.stop()
  67. sys.exit(0)
  68. ##########################################################################################
  69. # Private methods
  70. ##########################################################################################
  71. def __exit_test(self, success, prefix, file, message=None):
  72. if message:
  73. log(message,
  74. prefix=prefix,
  75. file=file,
  76. color=Fore.RED if success else '')
  77. self.time_logger.log_test_end(log_prefix=prefix, file=file)
  78. return success
  79. def __run_test(self, test, benchmark_log):
  80. '''
  81. Runs the given test, verifies that the webapp is accepting requests,
  82. optionally benchmarks the webapp, and ultimately stops all services
  83. started for this test.
  84. '''
  85. log_prefix = "%s: " % test.name
  86. # Start timing the total test duration
  87. self.time_logger.mark_test_start()
  88. # If the test is in the excludes list, we skip it
  89. if self.config.exclude and test.name in self.config.exclude:
  90. message = "Test {name} has been added to the excludes list. Skipping.".format(
  91. name=test.name)
  92. self.results.write_intermediate(test.name, message)
  93. return self.__exit_test(
  94. success=False,
  95. message=message,
  96. prefix=log_prefix,
  97. file=benchmark_log)
  98. database_container = None
  99. try:
  100. # Start database container
  101. if test.database.lower() != "none":
  102. self.time_logger.mark_starting_database()
  103. database_container = self.docker_helper.start_database(
  104. test.database.lower())
  105. if database_container is None:
  106. message = "ERROR: Problem building/running database container"
  107. return self.__exit_test(
  108. success=False,
  109. message=message,
  110. prefix=log_prefix,
  111. file=benchmark_log)
  112. self.time_logger.mark_started_database()
  113. # Start webapp
  114. container = test.start()
  115. self.time_logger.mark_test_starting()
  116. if container is None:
  117. self.docker_helper.stop([container, database_container])
  118. message = "ERROR: Problem starting {name}".format(
  119. name=test.name)
  120. self.results.write_intermediate(test.name, message)
  121. return self.__exit_test(
  122. success=False,
  123. message=message,
  124. prefix=log_prefix,
  125. file=benchmark_log)
  126. max_time = time.time() + 60
  127. while True:
  128. accepting_requests = test.is_accepting_requests()
  129. if accepting_requests \
  130. or time.time() >= max_time \
  131. or not self.docker_helper.server_container_exists(container.id):
  132. break
  133. time.sleep(1)
  134. if hasattr(test, 'wait_before_sending_requests') and isinstance(test.wait_before_sending_requests, numbers.Integral) and test.wait_before_sending_requests > 0:
  135. time.sleep(test.wait_before_sending_requests)
  136. if not accepting_requests:
  137. self.docker_helper.stop([container, database_container])
  138. message = "ERROR: Framework is not accepting requests from client machine"
  139. self.results.write_intermediate(test.name, message)
  140. return self.__exit_test(
  141. success=False,
  142. message=message,
  143. prefix=log_prefix,
  144. file=benchmark_log)
  145. self.time_logger.mark_test_accepting_requests()
  146. # Debug mode blocks execution here until ctrl+c
  147. if self.config.mode == "debug":
  148. log("Entering debug mode. Server has started. CTRL-c to stop.",
  149. prefix=log_prefix,
  150. file=benchmark_log,
  151. color=Fore.YELLOW)
  152. while True:
  153. time.sleep(1)
  154. # Verify URLs and audit
  155. log("Verifying framework URLs", prefix=log_prefix)
  156. self.time_logger.mark_verify_start()
  157. passed_verify = test.verify_urls()
  158. self.audit.audit_test_dir(test.directory)
  159. # Benchmark this test
  160. if self.config.mode == "benchmark":
  161. log("Benchmarking %s" % test.name,
  162. file=benchmark_log,
  163. border='-')
  164. self.time_logger.mark_benchmarking_start()
  165. self.__benchmark(test, benchmark_log)
  166. self.time_logger.log_benchmarking_end(
  167. log_prefix=log_prefix, file=benchmark_log)
  168. # Log test timing stats
  169. self.time_logger.log_build_flush(benchmark_log)
  170. self.time_logger.log_database_start_time(log_prefix, benchmark_log)
  171. self.time_logger.log_test_accepting_requests(
  172. log_prefix, benchmark_log)
  173. self.time_logger.log_verify_end(log_prefix, benchmark_log)
  174. # Stop this test
  175. self.docker_helper.stop([container, database_container])
  176. # Save results thus far into the latest results directory
  177. self.results.write_intermediate(test.name,
  178. time.strftime(
  179. "%Y%m%d%H%M%S",
  180. time.localtime()))
  181. # Upload the results thus far to another server (optional)
  182. self.results.upload()
  183. if self.config.mode == "verify" and not passed_verify:
  184. return self.__exit_test(
  185. success=False,
  186. message="Failed verify!",
  187. prefix=log_prefix,
  188. file=benchmark_log)
  189. except Exception as e:
  190. tb = traceback.format_exc()
  191. self.results.write_intermediate(test.name,
  192. "error during test: " + str(e))
  193. log(tb, prefix=log_prefix, file=benchmark_log)
  194. return self.__exit_test(
  195. success=False,
  196. message="Error during test: %s" % test.name,
  197. prefix=log_prefix,
  198. file=benchmark_log)
  199. return self.__exit_test(
  200. success=True, prefix=log_prefix, file=benchmark_log)
  201. def __benchmark(self, framework_test, benchmark_log):
  202. '''
  203. Runs the benchmark for each type of test that it implements
  204. '''
  205. def benchmark_type(test_type):
  206. log("BENCHMARKING %s ... " % test_type.upper(), file=benchmark_log)
  207. test = framework_test.runTests[test_type]
  208. raw_file = self.results.get_raw_file(framework_test.name,
  209. test_type)
  210. if not os.path.exists(raw_file):
  211. # Open to create the empty file
  212. with open(raw_file, 'w'):
  213. pass
  214. if not test.failed:
  215. # Begin resource usage metrics collection
  216. self.__begin_logging(framework_test, test_type)
  217. script = self.config.types[test_type].get_script_name()
  218. script_variables = self.config.types[
  219. test_type].get_script_variables(
  220. test.name, "http://%s:%s%s" % (self.config.server_host,
  221. framework_test.port,
  222. test.get_url()))
  223. self.docker_helper.benchmark(script, script_variables,
  224. raw_file)
  225. # End resource usage metrics collection
  226. self.__end_logging()
  227. results = self.results.parse_test(framework_test, test_type)
  228. log("Benchmark results:", file=benchmark_log)
  229. # TODO move into log somehow
  230. pprint(results)
  231. self.results.report_benchmark_results(framework_test, test_type,
  232. results['results'])
  233. log("Complete", file=benchmark_log)
  234. for test_type in framework_test.runTests:
  235. benchmark_type(test_type)
  236. def __begin_logging(self, framework_test, test_type):
  237. '''
  238. Starts a thread to monitor the resource usage, to be synced with the
  239. client's time.
  240. TODO: MySQL and InnoDB are possible. Figure out how to implement them.
  241. '''
  242. output_file = "{file_name}".format(
  243. file_name=self.results.get_stats_file(framework_test.name,
  244. test_type))
  245. dstat_string = "dstat -Tafilmprs --aio --fs --ipc --lock --raw --socket --tcp \
  246. --raw --socket --tcp --udp --unix --vm --disk-util \
  247. --rpc --rpcd --output {output_file}".format(
  248. output_file=output_file)
  249. cmd = shlex.split(dstat_string)
  250. self.subprocess_handle = subprocess.Popen(
  251. cmd, stdout=FNULL, stderr=subprocess.STDOUT)
  252. def __end_logging(self):
  253. '''
  254. Stops the logger thread and blocks until shutdown is complete.
  255. '''
  256. self.subprocess_handle.terminate()
  257. self.subprocess_handle.communicate()