framework_test.py 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456
  1. import importlib
  2. import os
  3. import subprocess
  4. import time
  5. import re
  6. import pprint
  7. import sys
  8. class FrameworkTest:
  9. ##########################################################################################
  10. # Class variables
  11. ##########################################################################################
  12. concurrency_template = """
  13. mysqladmin flush-hosts -uroot -psecret
  14. echo ""
  15. echo "---------------------------------------------------------"
  16. echo " Running Primer {name}"
  17. echo " wrk -d 60 -c 8 -t 8 http://{server_host}:{port}{url}"
  18. echo "---------------------------------------------------------"
  19. echo ""
  20. wrk -d 5 -c 8 -t 8 http://{server_host}:{port}{url}
  21. sleep 5
  22. echo ""
  23. echo "---------------------------------------------------------"
  24. echo " Running Warmup {name}"
  25. echo " wrk -d {duration} -c {max_concurrency} -t {max_threads} http://{server_host}:{port}{url}"
  26. echo "---------------------------------------------------------"
  27. echo ""
  28. wrk -d {duration} -c {max_concurrency} -t {max_threads} http://{server_host}:{port}{url}
  29. sleep 5
  30. for c in {interval}
  31. do
  32. echo ""
  33. echo "---------------------------------------------------------"
  34. echo " Concurrency: $c for {name}"
  35. echo " wrk -d {duration} -c $c -t $(($c>{max_threads}?{max_threads}:$c)) http://{server_host}:{port}{url}"
  36. echo "---------------------------------------------------------"
  37. echo ""
  38. wrk -d {duration} -c "$c" -t "$(($c>{max_threads}?{max_threads}:$c))" http://{server_host}:{port}{url}
  39. sleep 2
  40. done
  41. """
  42. query_template = """
  43. mysqladmin flush-hosts -uroot -psecret
  44. echo ""
  45. echo "---------------------------------------------------------"
  46. echo " Running Primer {name}"
  47. echo " wrk -d 5 -c 8 -t 8 http://{server_host}:{port}{url}2"
  48. echo "---------------------------------------------------------"
  49. echo ""
  50. wrk -d 5 -c 8 -t 8 http://{server_host}:{port}{url}2
  51. sleep 5
  52. echo ""
  53. echo "---------------------------------------------------------"
  54. echo " Running Warmup {name}"
  55. echo " wrk -d {duration} -c {max_concurrency} -t {max_threads} http://{server_host}:{port}{url}2"
  56. echo "---------------------------------------------------------"
  57. echo ""
  58. wrk -d {duration} -c {max_concurrency} -t {max_threads} http://{server_host}:{port}{url}2
  59. sleep 5
  60. for c in {interval}
  61. do
  62. echo ""
  63. echo "---------------------------------------------------------"
  64. echo " Queries: $c for {name}"
  65. echo " wrk -d {duration} -c {max_concurrency} -t {max_threads} http://{server_host}:{port}{url}$c"
  66. echo "---------------------------------------------------------"
  67. echo ""
  68. wrk -d {duration} -c {max_concurrency} -t {max_threads} http://{server_host}:{port}{url}"$c"
  69. sleep 2
  70. done
  71. """
  72. # The sort value is the order in which we represent all the tests. (Mainly helpful for our charts to give the underlying data)
  73. # a consistent ordering even when we add or remove tests. Each test should give a sort value in it's benchmark_config file.
  74. sort = 1000
  75. ##########################################################################################
  76. # Public Methods
  77. ##########################################################################################
  78. ############################################################
  79. # start(benchmarker)
  80. # Start the test using it's setup file
  81. ############################################################
  82. def start(self):
  83. return self.setup_module.start(self.benchmarker)
  84. ############################################################
  85. # End start
  86. ############################################################
  87. ############################################################
  88. # stop(benchmarker)
  89. # Stops the test using it's setup file
  90. ############################################################
  91. def stop(self):
  92. return self.setup_module.stop()
  93. ############################################################
  94. # End stop
  95. ############################################################
  96. ############################################################
  97. # verify_urls
  98. # Verifys each of the URLs for this test. THis will sinply
  99. # curl the URL and check for it's return status.
  100. # For each url, a flag will be set on this object for whether
  101. # or not it passed
  102. ############################################################
  103. def verify_urls(self):
  104. # JSON
  105. try:
  106. print "VERIFYING JSON (" + self.json_url + ") ..."
  107. url = self.benchmarker.generate_url(self.json_url, self.port)
  108. subprocess.check_call(["curl", "-f", url])
  109. print ""
  110. self.json_url_passed = True
  111. except (AttributeError, subprocess.CalledProcessError) as e:
  112. self.json_url_passed = False
  113. # DB
  114. try:
  115. print "VERIFYING DB (" + self.db_url + ") ..."
  116. url = self.benchmarker.generate_url(self.db_url, self.port)
  117. subprocess.check_call(["curl", "-f", url])
  118. print ""
  119. self.db_url_passed = True
  120. except (AttributeError, subprocess.CalledProcessError) as e:
  121. self.db_url_passed = False
  122. # Query
  123. try:
  124. print "VERIFYING Query (" + self.query_url + "2) ..."
  125. url = self.benchmarker.generate_url(self.query_url + "2", self.port)
  126. subprocess.check_call(["curl", "-f", url])
  127. print ""
  128. self.query_url_passed = True
  129. except (AttributeError, subprocess.CalledProcessError) as e:
  130. self.query_url_passed = False
  131. # Fortune
  132. try:
  133. print "VERIFYING Fortune (" + self.fortune_url + ") ..."
  134. url = self.benchmarker.generate_url(self.fortune_url, self.port)
  135. subprocess.check_call(["curl", "-f", url])
  136. print ""
  137. self.fortune_url_passed = True
  138. except (AttributeError, subprocess.CalledProcessError) as e:
  139. self.fortune_url_passed = False
  140. ############################################################
  141. # End verify_urls
  142. ############################################################
  143. ############################################################
  144. # benchmark
  145. # Runs the benchmark for each type of test that it implements
  146. # JSON/DB/Query.
  147. ############################################################
  148. def benchmark(self):
  149. # JSON
  150. try:
  151. if self.json_url_passed and (self.benchmarker.type == "all" or self.benchmarker.type == "json"):
  152. sys.stdout.write("BENCHMARKING JSON ... ")
  153. remote_script = self.__generate_concurrency_script(self.json_url, self.port)
  154. self.__run_benchmark(remote_script, self.benchmarker.output_file(self.name, 'json'))
  155. results = self.__parse_test('json')
  156. self.benchmarker.report_results(framework=self, test="json", requests=results['requests'], latency=results['latency'],
  157. results=results['results'], total_time=results['total_time'], errors=results['errors'], total_requests=results['totalRequests'])
  158. print "Complete"
  159. except AttributeError:
  160. pass
  161. # DB
  162. try:
  163. if self.db_url_passed and (self.benchmarker.type == "all" or self.benchmarker.type == "db"):
  164. sys.stdout.write("BENCHMARKING DB ... ")
  165. remote_script = self.__generate_concurrency_script(self.db_url, self.port)
  166. self.__run_benchmark(remote_script, self.benchmarker.output_file(self.name, 'db'))
  167. results = self.__parse_test('db')
  168. self.benchmarker.report_results(framework=self, test="db", requests=results['requests'], latency=results['latency'],
  169. results=results['results'], total_time=results['total_time'], errors=results['errors'], total_requests=results['totalRequests'])
  170. print "Complete"
  171. except AttributeError:
  172. pass
  173. # Query
  174. try:
  175. if self.query_url_passed and (self.benchmarker.type == "all" or self.benchmarker.type == "query"):
  176. sys.stdout.write("BENCHMARKING Query ... ")
  177. remote_script = self.__generate_query_script(self.query_url, self.port)
  178. self.__run_benchmark(remote_script, self.benchmarker.output_file(self.name, 'query'))
  179. results = self.__parse_test('query')
  180. self.benchmarker.report_results(framework=self, test="query", requests=results['requests'], latency=results['latency'],
  181. results=results['results'], total_time=results['total_time'], errors=results['errors'], total_requests=results['totalRequests'])
  182. print "Complete"
  183. except AttributeError:
  184. pass
  185. # fortune
  186. try:
  187. if self.fortune_url_passed and (self.benchmarker.type == "all" or self.benchmarker.type == "fortune"):
  188. sys.stdout.write("BENCHMARKING Fortune ... ")
  189. remote_script = self.__generate_concurrency_script(self.fortune_url, self.port)
  190. self.__run_benchmark(remote_script, self.benchmarker.output_file(self.name, 'fortune'))
  191. results = self.__parse_test('fortune')
  192. self.benchmarker.report_results(framework=self, test="fortune", requests=results['requests'], latency=results['latency'],
  193. results=results['results'], total_time=results['total_time'], errors=results['errors'], total_requests=results['totalRequests'])
  194. print "Complete"
  195. except AttributeError:
  196. pass
  197. ############################################################
  198. # End benchmark
  199. ############################################################
  200. ############################################################
  201. # parse_all
  202. # Method meant to be run for a given timestamp
  203. ############################################################
  204. def parse_all(self):
  205. # JSON
  206. if os.path.exists(self.benchmarker.output_file(self.name, 'json')):
  207. results = self.__parse_test('json')
  208. self.benchmarker.report_results(framework=self, test="json", requests=results['requests'], latency=results['latency'],
  209. results=results['results'], total_time=results['total_time'], errors=results['errors'], total_requests=results['totalRequests'])
  210. # DB
  211. if os.path.exists(self.benchmarker.output_file(self.name, 'db')):
  212. results = self.__parse_test('db')
  213. self.benchmarker.report_results(framework=self, test="db", requests=results['requests'], latency=results['latency'],
  214. results=results['results'], total_time=results['total_time'], errors=results['errors'], total_requests=results['totalRequests'])
  215. # Query
  216. if os.path.exists(self.benchmarker.output_file(self.name, 'query')):
  217. results = self.__parse_test('query')
  218. self.benchmarker.report_results(framework=self, test="query", requests=results['requests'], latency=results['latency'],
  219. results=results['results'], total_time=results['total_time'], errors=results['errors'], total_requests=results['totalRequests'])
  220. # Query
  221. if os.path.exists(self.benchmarker.output_file(self.name, 'fortune')):
  222. results = self.__parse_test('fortune')
  223. self.benchmarker.report_results(framework=self, test="fortune", requests=results['requests'], latency=results['latency'],
  224. results=results['results'], total_time=results['total_time'], errors=results['errors'], total_requests=results['totalRequests'])
  225. ############################################################
  226. # End parse_all
  227. ############################################################
  228. ############################################################
  229. # __parse_test(test_type)
  230. ############################################################
  231. def __parse_test(self, test_type):
  232. try:
  233. results = dict()
  234. results['results'] = []
  235. results['total_time'] = 0
  236. results['totalRequests'] = 0
  237. results['latency'] = dict()
  238. results['latency']['avg'] = 0
  239. results['latency']['stdev'] = 0
  240. results['latency']['max'] = 0
  241. results['latency']['stdevPercent'] = 0
  242. results['requests'] = dict()
  243. results['requests']['avg'] = 0
  244. results['requests']['stdev'] = 0
  245. results['requests']['max'] = 0
  246. results['requests']['stdevPercent'] = 0
  247. results['errors'] = dict()
  248. results['errors']['connect'] = 0
  249. results['errors']['read'] = 0
  250. results['errors']['write'] = 0
  251. results['errors']['timeout'] = 0
  252. results['errors']['5xx'] = 0
  253. with open(self.benchmarker.output_file(self.name, test_type)) as raw_data:
  254. is_warmup = False
  255. for line in raw_data:
  256. if "Queries:" in line or "Concurrency:" in line:
  257. is_warmup = False
  258. continue
  259. if "Warmup" in line or "Primer" in line:
  260. is_warmup = True
  261. continue
  262. if not is_warmup:
  263. if "Requests/sec:" in line:
  264. m = re.search("Requests/sec:\s+([0-9]+)", line)
  265. results['results'].append(m.group(1))
  266. # search for weighttp data such as succeeded and failed.
  267. if "Latency" in line:
  268. m = re.findall("([0-9]+\.*[0-9]*[us|ms|s|m|%]+)", line)
  269. if len(m) == 4:
  270. results['latency']['avg'] = m[0]
  271. results['latency']['stdev'] = m[1]
  272. results['latency']['max'] = m[2]
  273. results['latency']['stdevPercent'] = m[3]
  274. if "Req/Sec" in line:
  275. m = re.findall("([0-9]+\.*[0-9]*[k|%]*)", line)
  276. if len(m) == 4:
  277. results['requests']['avg'] = m[0]
  278. results['requests']['stdev'] = m[1]
  279. results['requests']['max'] = m[2]
  280. results['requests']['stdevPercent'] = m[3]
  281. if "requests in" in line:
  282. m = re.search("requests in ([0-9]+\.*[0-9]*[ms|s|m|h]+)", line)
  283. if m != None:
  284. # parse out the raw time, which may be in minutes or seconds
  285. raw_time = m.group(1)
  286. if "ms" in raw_time:
  287. results['total_time'] += float(raw_time[:len(raw_time)-2]) / 1000.0
  288. elif "s" in raw_time:
  289. results['total_time'] += float(raw_time[:len(raw_time)-1])
  290. elif "m" in raw_time:
  291. results['total_time'] += float(raw_time[:len(raw_time)-1]) * 60.0
  292. elif "h" in raw_time:
  293. results['total_time'] += float(raw_time[:len(raw_time)-1]) * 3600.0
  294. if "requests in" in line:
  295. m = re.search("([0-9]+) requests in", line)
  296. if m != None:
  297. results['totalRequests'] += int(m.group(1))
  298. if "Socket errors" in line:
  299. if "connect" in line:
  300. m = re.search("connect ([0-9]+)", line)
  301. results['errors']['connect'] += int(m.group(1))
  302. if "read" in line:
  303. m = re.search("read ([0-9]+)", line)
  304. results['errors']['read'] += int(m.group(1))
  305. if "write" in line:
  306. m = re.search("write ([0-9]+)", line)
  307. results['errors']['write'] += int(m.group(1))
  308. if "timeout" in line:
  309. m = re.search("timeout ([0-9]+)", line)
  310. results['errors']['timeout'] += int(m.group(1))
  311. if "Non-2xx" in line:
  312. m = re.search("Non-2xx or 3xx responses: ([0-9]+)", line)
  313. if m != None:
  314. results['errors']['5xx'] += int(m.group(1))
  315. return results
  316. except IOError:
  317. return None
  318. ############################################################
  319. # End benchmark
  320. ############################################################
  321. ##########################################################################################
  322. # Private Methods
  323. ##########################################################################################
  324. ############################################################
  325. # __run_benchmark(script, output_file)
  326. # Runs a single benchmark using the script which is a bash
  327. # template that uses weighttp to run the test. All the results
  328. # outputed to the output_file.
  329. ############################################################
  330. def __run_benchmark(self, script, output_file):
  331. with open(output_file, 'w') as raw_file:
  332. p = subprocess.Popen(self.benchmarker.ssh_string.split(" "), stdin=subprocess.PIPE, stdout=raw_file, stderr=raw_file)
  333. p.communicate(script)
  334. ############################################################
  335. # End __run_benchmark
  336. ############################################################
  337. ############################################################
  338. # __generate_concurrency_script(url, port)
  339. # Generates the string containing the bash script that will
  340. # be run on the client to benchmark a single test. This
  341. # specifically works for the variable concurrency tests (JSON
  342. # and DB)
  343. ############################################################
  344. def __generate_concurrency_script(self, url, port):
  345. return self.concurrency_template.format(max_concurrency=self.benchmarker.max_concurrency,
  346. max_threads=self.benchmarker.max_threads, name=self.name, duration=self.benchmarker.duration,
  347. interval=" ".join("{}".format(item) for item in self.benchmarker.concurrency_levels),
  348. server_host=self.benchmarker.server_host, port=port, url=url)
  349. ############################################################
  350. # End __generate_concurrency_script
  351. ############################################################
  352. ############################################################
  353. # __generate_query_script(url, port)
  354. # Generates the string containing the bash script that will
  355. # be run on the client to benchmark a single test. This
  356. # specifically works for the variable query tests (Query)
  357. ############################################################
  358. def __generate_query_script(self, url, port):
  359. return self.query_template.format(max_concurrency=self.benchmarker.max_concurrency,
  360. max_threads=self.benchmarker.max_threads, name=self.name, duration=self.benchmarker.duration,
  361. interval=" ".join("{}".format(item) for item in self.benchmarker.query_intervals),
  362. server_host=self.benchmarker.server_host, port=port, url=url)
  363. ############################################################
  364. # End __generate_query_script
  365. ############################################################
  366. ##########################################################################################
  367. # Constructor
  368. ##########################################################################################
  369. def __init__(self, name, directory, benchmarker, args):
  370. self.name = name
  371. self.directory = directory
  372. self.benchmarker = benchmarker
  373. self.__dict__.update(args)
  374. # ensure diretory has __init__.py file so that we can use it as a pythong package
  375. if not os.path.exists(os.path.join(directory, "__init__.py")):
  376. open(os.path.join(directory, "__init__.py"), 'w').close()
  377. self.setup_module = setup_module = importlib.import_module(directory + '.' + self.setup_file)
  378. ############################################################
  379. # End __init__
  380. ############################################################
  381. ############################################################
  382. # End FrameworkTest
  383. ############################################################
  384. ##########################################################################################
  385. # Static methods
  386. ##########################################################################################
  387. ##############################################################
  388. # parse_config(config, directory, benchmarker)
  389. # parses a config file and returns a list of FrameworkTest
  390. # objects based on that config file.
  391. ##############################################################
  392. def parse_config(config, directory, benchmarker):
  393. tests = []
  394. # The config object can specify multiple tests, we neep to loop
  395. # over them and parse them out
  396. for test in config['tests']:
  397. for key, value in test.iteritems():
  398. test_name = config['framework']
  399. # if the test uses the 'defualt' keywork, then we don't
  400. # append anything to it's name. All configs should only have 1 default
  401. if key != 'default':
  402. # we need to use the key in the test_name
  403. test_name = test_name + "-" + key
  404. tests.append(FrameworkTest(test_name, directory, benchmarker, value))
  405. return tests
  406. ##############################################################
  407. # End parse_config
  408. ##############################################################