benchmarker.py 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333
  1. from toolset.utils.output_helper import log, FNULL
  2. from toolset.utils.metadata_helper import gather_tests, gather_remaining_tests
  3. from toolset.utils import docker_helper
  4. import os
  5. import subprocess
  6. import traceback
  7. import socket
  8. import time
  9. import json
  10. import shlex
  11. from pprint import pprint
  12. from colorama import Fore
  13. class Benchmarker:
  14. def __init__(self, config, results):
  15. '''
  16. Initialize the benchmarker.
  17. '''
  18. self.config = config
  19. self.results = results
  20. ##########################################################################################
  21. # Public methods
  22. ##########################################################################################
  23. def run(self):
  24. '''
  25. This process involves setting up the client/server machines
  26. with any necessary change. Then going through each test,
  27. running their docker build and run, verifying the URLs, and
  28. running benchmarks against them.
  29. '''
  30. # Generate metadata
  31. self.__run_list_test_metadata()
  32. # Get a list of all known tests that we can run.
  33. all_tests = gather_remaining_tests(self.config, self.results)
  34. # Run tests
  35. log("Running Tests...", border='=')
  36. with open(os.path.join(self.results.directory, 'benchmark.log'),
  37. 'w') as benchmark_log:
  38. for test in all_tests:
  39. log("Running Test: %s" % test.name, border='-')
  40. with self.config.quiet_out.enable():
  41. self.__run_test(test, benchmark_log)
  42. # Load intermediate result from child process
  43. self.results.load()
  44. # Parse results
  45. if self.config.mode == "benchmark":
  46. log("Parsing Results ...", border='=')
  47. self.results.parse(all_tests)
  48. self.results.set_completion_time()
  49. self.results.upload()
  50. self.results.finish()
  51. ##########################################################################################
  52. # Private methods
  53. ##########################################################################################
  54. def __run_list_test_metadata(self):
  55. '''
  56. Prints the metadata for all the available tests
  57. '''
  58. all_tests = gather_tests(benchmarker_config=self.config)
  59. all_tests_json = json.dumps(map(lambda test: {
  60. "name": test.name,
  61. "approach": test.approach,
  62. "classification": test.classification,
  63. "database": test.database,
  64. "framework": test.framework,
  65. "language": test.language,
  66. "orm": test.orm,
  67. "platform": test.platform,
  68. "webserver": test.webserver,
  69. "os": test.os,
  70. "database_os": test.database_os,
  71. "display_name": test.display_name,
  72. "notes": test.notes,
  73. "versus": test.versus
  74. }, all_tests))
  75. with open(
  76. os.path.join(self.results.directory, "test_metadata.json"),
  77. "w") as f:
  78. f.write(all_tests_json)
  79. def __run_test(self, test, benchmark_log):
  80. '''
  81. Runs the given test, verifies that the webapp is accepting requests,
  82. optionally benchmarks the webapp, and ultimately stops all services
  83. started for this test.
  84. '''
  85. log_prefix = "%s: " % test.name
  86. # If the test is in the excludes list, we skip it
  87. if self.config.exclude != None and test.name in self.config.exclude:
  88. message = "Test {name} has been added to the excludes list. Skipping.".format(name=test.name)
  89. self.results.write_intermediate(test.name, message)
  90. log(message,
  91. prefix=log_prefix,
  92. file=benchmark_log)
  93. return False
  94. database_container = None
  95. try:
  96. if self.__is_port_bound(test.port):
  97. time.sleep(60)
  98. if self.__is_port_bound(test.port):
  99. # We gave it our all
  100. message = "Error: Port %s is not available, cannot start %s" % (test.port, test.name)
  101. self.results.write_intermediate(test.name, message)
  102. log(message,
  103. prefix=log_prefix,
  104. file=benchmark_log,
  105. color=Fore.RED)
  106. return False
  107. # Start database container
  108. if test.database.lower() != "none":
  109. database_container = docker_helper.start_database(
  110. self.config, test, test.database.lower())
  111. if database_container is None:
  112. message = "ERROR: Problem building/running database container"
  113. self.results.write_intermediate(test.name, message)
  114. log(message,
  115. prefix=log_prefix,
  116. file=benchmark_log,
  117. color=Fore.RED)
  118. return False
  119. # Start webapp
  120. containers = test.start()
  121. if containers is None:
  122. docker_helper.stop(self.config, containers, database_container,
  123. test)
  124. message = "ERROR: Problem starting {name}".format(name=test.name)
  125. self.results.write_intermediate(test.name, message)
  126. log(message,
  127. prefix=log_prefix,
  128. file=benchmark_log,
  129. color=Fore.RED)
  130. return False
  131. slept = 0
  132. max_sleep = 60
  133. accepting_requests = False
  134. while not accepting_requests and slept < max_sleep:
  135. accepting_requests = test.is_accepting_requests()
  136. if not docker_helper.successfully_running_containers(
  137. self.config, test, benchmark_log):
  138. docker_helper.stop(self.config, containers,
  139. database_container, test)
  140. message = "ERROR: One or more expected docker containers exited early"
  141. self.results.write_intermediate(test.name, message)
  142. log(message,
  143. prefix=log_prefix,
  144. file=benchmark_log,
  145. color=Fore.RED)
  146. return False
  147. time.sleep(1)
  148. slept += 1
  149. if not accepting_requests:
  150. docker_helper.stop(self.config, containers, database_container,
  151. test)
  152. message = "ERROR: Framework is not accepting requests from client machine"
  153. self.results.write_intermediate(test.name, message)
  154. log(message,
  155. prefix=log_prefix,
  156. file=benchmark_log,
  157. color=Fore.RED)
  158. return False
  159. # Debug mode blocks execution here until ctrl+c
  160. if self.config.mode == "debug":
  161. log("Entering debug mode. Server has started. CTRL-c to stop.",
  162. prefix=log_prefix,
  163. file=benchmark_log,
  164. color=Fore.YELLOW)
  165. while True:
  166. time.sleep(1)
  167. # Verify URLs
  168. log("Verifying framework URLs", prefix=log_prefix)
  169. passed_verify = test.verify_urls()
  170. # Benchmark this test
  171. if self.config.mode == "benchmark":
  172. log("Benchmarking %s" % test.name,
  173. file=benchmark_log,
  174. border='-')
  175. self.__benchmark(test, benchmark_log)
  176. # Stop this test
  177. docker_helper.stop(self.config, containers, database_container,
  178. test)
  179. # Save results thus far into the latest results directory
  180. self.results.write_intermediate(test.name,
  181. time.strftime(
  182. "%Y%m%d%H%M%S",
  183. time.localtime()))
  184. # Upload the results thus far to another server (optional)
  185. self.results.upload()
  186. if self.config.mode == "verify" and not passed_verify:
  187. log("Failed verify!",
  188. prefix=log_prefix,
  189. file=benchmark_log,
  190. color=Fore.RED)
  191. return False
  192. except (OSError, IOError, subprocess.CalledProcessError) as e:
  193. tb = traceback.format_exc()
  194. self.results.write_intermediate(test.name,
  195. "error during test: " + str(e))
  196. log("Subprocess Error %s" % test.name,
  197. file=benchmark_log,
  198. border='-',
  199. color=Fore.RED)
  200. log(tb, prefix=log_prefix, file=benchmark_log)
  201. return False
  202. return True
  203. def __benchmark(self, framework_test, benchmark_log):
  204. '''
  205. Runs the benchmark for each type of test that it implements
  206. '''
  207. def benchmark_type(test_type):
  208. log("BENCHMARKING %s ... " % test_type.upper(), file=benchmark_log)
  209. test = framework_test.runTests[test_type]
  210. test.setup_out(benchmark_log)
  211. raw_file = self.results.get_raw_file(framework_test.name,
  212. test_type)
  213. if not os.path.exists(raw_file):
  214. # Open to create the empty file
  215. with open(raw_file, 'w'):
  216. pass
  217. if not test.failed:
  218. # Begin resource usage metrics collection
  219. self.__begin_logging(framework_test, test_type)
  220. script = self.config.types[test_type].get_script_name()
  221. script_variables = self.config.types[
  222. test_type].get_script_variables(
  223. test.name, "http://%s:%s%s" % (self.config.server_host,
  224. framework_test.port,
  225. test.get_url()))
  226. docker_helper.benchmark(self.config, script, script_variables,
  227. raw_file)
  228. # End resource usage metrics collection
  229. self.__end_logging()
  230. results = self.results.parse_test(framework_test, test_type)
  231. log("Benchmark results:", file=benchmark_log)
  232. # TODO move into log somehow
  233. pprint(results)
  234. self.results.report_benchmark_results(framework_test, test_type,
  235. results['results'])
  236. log("Complete", file=benchmark_log)
  237. for test_type in framework_test.runTests:
  238. benchmark_type(test_type)
  239. def __begin_logging(self, framework_test, test_type):
  240. '''
  241. Starts a thread to monitor the resource usage, to be synced with the
  242. client's time.
  243. TODO: MySQL and InnoDB are possible. Figure out how to implement them.
  244. '''
  245. output_file = "{file_name}".format(
  246. file_name=self.results.get_stats_file(framework_test.name,
  247. test_type))
  248. dstat_string = "dstat -Tafilmprs --aio --fs --ipc --lock --raw --socket --tcp \
  249. --raw --socket --tcp --udp --unix --vm --disk-util \
  250. --rpc --rpcd --output {output_file}".format(
  251. output_file=output_file)
  252. cmd = shlex.split(dstat_string)
  253. self.subprocess_handle = subprocess.Popen(
  254. cmd, stdout=FNULL, stderr=subprocess.STDOUT)
  255. def __end_logging(self):
  256. '''
  257. Stops the logger thread and blocks until shutdown is complete.
  258. '''
  259. self.subprocess_handle.terminate()
  260. self.subprocess_handle.communicate()
  261. def __is_port_bound(self, port):
  262. '''
  263. Check if the requested port is available. If it isn't available, then a
  264. previous test probably didn't shutdown properly.
  265. '''
  266. port = int(port)
  267. s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
  268. try:
  269. # Try to bind to all IP addresses, this port
  270. s.bind(("", port))
  271. # If we get here, we were able to bind successfully,
  272. # which means the port is free.
  273. except socket.error:
  274. # If we get an exception, it might be because the port is still bound
  275. # which would be bad, or maybe it is a privileged port (<1024) and we
  276. # are not running as root, or maybe the server is gone, but sockets are
  277. # still in TIME_WAIT (SO_REUSEADDR). To determine which scenario, try to
  278. # connect.
  279. try:
  280. s.connect(("127.0.0.1", port))
  281. # If we get here, we were able to connect to something, which means
  282. # that the port is still bound.
  283. return True
  284. except socket.error:
  285. # An exception means that we couldn't connect, so a server probably
  286. # isn't still running on the port.
  287. pass
  288. finally:
  289. s.close()
  290. return False