benchmarker.py 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336
  1. from toolset.utils.output_helper import log, FNULL
  2. from toolset.utils.metadata_helper import gather_tests, gather_remaining_tests
  3. from toolset.utils import docker_helper
  4. import os
  5. import subprocess
  6. import traceback
  7. import socket
  8. import time
  9. import json
  10. import shlex
  11. from pprint import pprint
  12. from colorama import Fore
  13. class Benchmarker:
  14. def __init__(self, config, results):
  15. '''
  16. Initialize the benchmarker.
  17. '''
  18. self.config = config
  19. self.results = results
  20. ##########################################################################################
  21. # Public methods
  22. ##########################################################################################
  23. def run(self):
  24. '''
  25. This process involves setting up the client/server machines
  26. with any necessary change. Then going through each test,
  27. running their docker build and run, verifying the URLs, and
  28. running benchmarks against them.
  29. '''
  30. # Generate metadata
  31. self.__run_list_test_metadata()
  32. # Get a list of all known tests that we can run.
  33. all_tests = gather_remaining_tests(self.config, self.results)
  34. # Run tests
  35. success = True
  36. log("Running Tests...", border='=')
  37. with open(os.path.join(self.results.directory, 'benchmark.log'),
  38. 'w') as benchmark_log:
  39. for test in all_tests:
  40. log("Running Test: %s" % test.name, border='-')
  41. with self.config.quiet_out.enable():
  42. success = self.__run_test(test, benchmark_log) and success
  43. # Load intermediate result from child process
  44. self.results.load()
  45. # Parse results
  46. if self.config.mode == "benchmark":
  47. log("Parsing Results ...", border='=')
  48. self.results.parse(all_tests)
  49. self.results.set_completion_time()
  50. self.results.upload()
  51. self.results.finish()
  52. return success
  53. ##########################################################################################
  54. # Private methods
  55. ##########################################################################################
  56. def __run_list_test_metadata(self):
  57. '''
  58. Prints the metadata for all the available tests
  59. '''
  60. all_tests = gather_tests(benchmarker_config=self.config)
  61. all_tests_json = json.dumps(map(lambda test: {
  62. "name": test.name,
  63. "approach": test.approach,
  64. "classification": test.classification,
  65. "database": test.database,
  66. "framework": test.framework,
  67. "language": test.language,
  68. "orm": test.orm,
  69. "platform": test.platform,
  70. "webserver": test.webserver,
  71. "os": test.os,
  72. "database_os": test.database_os,
  73. "display_name": test.display_name,
  74. "notes": test.notes,
  75. "versus": test.versus
  76. }, all_tests))
  77. with open(
  78. os.path.join(self.results.directory, "test_metadata.json"),
  79. "w") as f:
  80. f.write(all_tests_json)
  81. def __run_test(self, test, benchmark_log):
  82. '''
  83. Runs the given test, verifies that the webapp is accepting requests,
  84. optionally benchmarks the webapp, and ultimately stops all services
  85. started for this test.
  86. '''
  87. log_prefix = "%s: " % test.name
  88. # If the test is in the excludes list, we skip it
  89. if self.config.exclude != None and test.name in self.config.exclude:
  90. message = "Test {name} has been added to the excludes list. Skipping.".format(name=test.name)
  91. self.results.write_intermediate(test.name, message)
  92. log(message,
  93. prefix=log_prefix,
  94. file=benchmark_log)
  95. return False
  96. database_container = None
  97. try:
  98. if self.__is_port_bound(test.port):
  99. time.sleep(60)
  100. if self.__is_port_bound(test.port):
  101. # We gave it our all
  102. message = "Error: Port %s is not available, cannot start %s" % (test.port, test.name)
  103. self.results.write_intermediate(test.name, message)
  104. log(message,
  105. prefix=log_prefix,
  106. file=benchmark_log,
  107. color=Fore.RED)
  108. return False
  109. # Start database container
  110. if test.database.lower() != "none":
  111. database_container = docker_helper.start_database(
  112. self.config, test, test.database.lower())
  113. if database_container is None:
  114. message = "ERROR: Problem building/running database container"
  115. self.results.write_intermediate(test.name, message)
  116. log(message,
  117. prefix=log_prefix,
  118. file=benchmark_log,
  119. color=Fore.RED)
  120. return False
  121. # Start webapp
  122. containers = test.start()
  123. if containers is None:
  124. docker_helper.stop(self.config, containers, database_container,
  125. test)
  126. message = "ERROR: Problem starting {name}".format(name=test.name)
  127. self.results.write_intermediate(test.name, message)
  128. log(message,
  129. prefix=log_prefix,
  130. file=benchmark_log,
  131. color=Fore.RED)
  132. return False
  133. slept = 0
  134. max_sleep = 60
  135. accepting_requests = False
  136. while not accepting_requests and slept < max_sleep:
  137. accepting_requests = test.is_accepting_requests()
  138. if not docker_helper.successfully_running_containers(
  139. self.config, test, benchmark_log):
  140. docker_helper.stop(self.config, containers,
  141. database_container, test)
  142. message = "ERROR: One or more expected docker containers exited early"
  143. self.results.write_intermediate(test.name, message)
  144. log(message,
  145. prefix=log_prefix,
  146. file=benchmark_log,
  147. color=Fore.RED)
  148. return False
  149. time.sleep(1)
  150. slept += 1
  151. if not accepting_requests:
  152. docker_helper.stop(self.config, containers, database_container,
  153. test)
  154. message = "ERROR: Framework is not accepting requests from client machine"
  155. self.results.write_intermediate(test.name, message)
  156. log(message,
  157. prefix=log_prefix,
  158. file=benchmark_log,
  159. color=Fore.RED)
  160. return False
  161. # Debug mode blocks execution here until ctrl+c
  162. if self.config.mode == "debug":
  163. log("Entering debug mode. Server has started. CTRL-c to stop.",
  164. prefix=log_prefix,
  165. file=benchmark_log,
  166. color=Fore.YELLOW)
  167. while True:
  168. time.sleep(1)
  169. # Verify URLs
  170. log("Verifying framework URLs", prefix=log_prefix)
  171. passed_verify = test.verify_urls()
  172. # Benchmark this test
  173. if self.config.mode == "benchmark":
  174. log("Benchmarking %s" % test.name,
  175. file=benchmark_log,
  176. border='-')
  177. self.__benchmark(test, benchmark_log)
  178. # Stop this test
  179. docker_helper.stop(self.config, containers, database_container,
  180. test)
  181. # Save results thus far into the latest results directory
  182. self.results.write_intermediate(test.name,
  183. time.strftime(
  184. "%Y%m%d%H%M%S",
  185. time.localtime()))
  186. # Upload the results thus far to another server (optional)
  187. self.results.upload()
  188. if self.config.mode == "verify" and not passed_verify:
  189. log("Failed verify!",
  190. prefix=log_prefix,
  191. file=benchmark_log,
  192. color=Fore.RED)
  193. return False
  194. except (OSError, IOError, subprocess.CalledProcessError) as e:
  195. tb = traceback.format_exc()
  196. self.results.write_intermediate(test.name,
  197. "error during test: " + str(e))
  198. log("Subprocess Error %s" % test.name,
  199. file=benchmark_log,
  200. border='-',
  201. color=Fore.RED)
  202. log(tb, prefix=log_prefix, file=benchmark_log)
  203. return False
  204. return True
  205. def __benchmark(self, framework_test, benchmark_log):
  206. '''
  207. Runs the benchmark for each type of test that it implements
  208. '''
  209. def benchmark_type(test_type):
  210. log("BENCHMARKING %s ... " % test_type.upper(), file=benchmark_log)
  211. test = framework_test.runTests[test_type]
  212. test.setup_out(benchmark_log)
  213. raw_file = self.results.get_raw_file(framework_test.name,
  214. test_type)
  215. if not os.path.exists(raw_file):
  216. # Open to create the empty file
  217. with open(raw_file, 'w'):
  218. pass
  219. if not test.failed:
  220. # Begin resource usage metrics collection
  221. self.__begin_logging(framework_test, test_type)
  222. script = self.config.types[test_type].get_script_name()
  223. script_variables = self.config.types[
  224. test_type].get_script_variables(
  225. test.name, "http://%s:%s%s" % (self.config.server_host,
  226. framework_test.port,
  227. test.get_url()))
  228. docker_helper.benchmark(self.config, script, script_variables,
  229. raw_file)
  230. # End resource usage metrics collection
  231. self.__end_logging()
  232. results = self.results.parse_test(framework_test, test_type)
  233. log("Benchmark results:", file=benchmark_log)
  234. # TODO move into log somehow
  235. pprint(results)
  236. self.results.report_benchmark_results(framework_test, test_type,
  237. results['results'])
  238. log("Complete", file=benchmark_log)
  239. for test_type in framework_test.runTests:
  240. benchmark_type(test_type)
  241. def __begin_logging(self, framework_test, test_type):
  242. '''
  243. Starts a thread to monitor the resource usage, to be synced with the
  244. client's time.
  245. TODO: MySQL and InnoDB are possible. Figure out how to implement them.
  246. '''
  247. output_file = "{file_name}".format(
  248. file_name=self.results.get_stats_file(framework_test.name,
  249. test_type))
  250. dstat_string = "dstat -Tafilmprs --aio --fs --ipc --lock --raw --socket --tcp \
  251. --raw --socket --tcp --udp --unix --vm --disk-util \
  252. --rpc --rpcd --output {output_file}".format(
  253. output_file=output_file)
  254. cmd = shlex.split(dstat_string)
  255. self.subprocess_handle = subprocess.Popen(
  256. cmd, stdout=FNULL, stderr=subprocess.STDOUT)
  257. def __end_logging(self):
  258. '''
  259. Stops the logger thread and blocks until shutdown is complete.
  260. '''
  261. self.subprocess_handle.terminate()
  262. self.subprocess_handle.communicate()
  263. def __is_port_bound(self, port):
  264. '''
  265. Check if the requested port is available. If it isn't available, then a
  266. previous test probably didn't shutdown properly.
  267. '''
  268. port = int(port)
  269. s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
  270. try:
  271. # Try to bind to all IP addresses, this port
  272. s.bind(("", port))
  273. # If we get here, we were able to bind successfully,
  274. # which means the port is free.
  275. except socket.error:
  276. # If we get an exception, it might be because the port is still bound
  277. # which would be bad, or maybe it is a privileged port (<1024) and we
  278. # are not running as root, or maybe the server is gone, but sockets are
  279. # still in TIME_WAIT (SO_REUSEADDR). To determine which scenario, try to
  280. # connect.
  281. try:
  282. s.connect(("127.0.0.1", port))
  283. # If we get here, we were able to connect to something, which means
  284. # that the port is still bound.
  285. return True
  286. except socket.error:
  287. # An exception means that we couldn't connect, so a server probably
  288. # isn't still running on the port.
  289. pass
  290. finally:
  291. s.close()
  292. return False