framework_test.py 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526
  1. import importlib
  2. import os
  3. import subprocess
  4. import time
  5. import re
  6. import pprint
  7. import sys
  8. class FrameworkTest:
  9. ##########################################################################################
  10. # Class variables
  11. ##########################################################################################
  12. headers = "-H 'Host: localhost' -H 'Accept: text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8' -H 'Accept-Language: en-US,en;q=0.5' -H 'User-Agent: Mozilla/5.0 (X11; Linux x86_64) Gecko/20130501 Firefox/30.0 AppleWebKit/600.00 Chrome/30.0.0000.0 Trident/10.0 Safari/600.00' -H 'Cookie: uid=12345678901234567890; __utma=1.1234567890.1234567890.1234567890.1234567890.12; wd=2560x1600' -H 'Connection: keep-alive'"
  13. concurrency_template = """
  14. echo ""
  15. echo "---------------------------------------------------------"
  16. echo " Running Primer {name}"
  17. echo " {wrk} {headers} -d 60 -c 8 -t 8 http://{server_host}:{port}{url}"
  18. echo "---------------------------------------------------------"
  19. echo ""
  20. {wrk} {headers} -d 5 -c 8 -t 8 http://{server_host}:{port}{url}
  21. sleep 5
  22. echo ""
  23. echo "---------------------------------------------------------"
  24. echo " Running Warmup {name}"
  25. echo " {wrk} {headers} -d {duration} -c {max_concurrency} -t {max_threads} http://{server_host}:{port}{url}"
  26. echo "---------------------------------------------------------"
  27. echo ""
  28. {wrk} {headers} -d {duration} -c {max_concurrency} -t {max_threads} http://{server_host}:{port}{url}
  29. sleep 5
  30. for c in {interval}
  31. do
  32. echo ""
  33. echo "---------------------------------------------------------"
  34. echo " Concurrency: $c for {name}"
  35. echo " {wrk} {headers} -d {duration} -c $c -t $(($c>{max_threads}?{max_threads}:$c)) http://{server_host}:{port}{url}"
  36. echo "---------------------------------------------------------"
  37. echo ""
  38. {wrk} {headers} -d {duration} -c "$c" -t "$(($c>{max_threads}?{max_threads}:$c))" http://{server_host}:{port}{url}
  39. sleep 2
  40. done
  41. """
  42. query_template = """
  43. echo ""
  44. echo "---------------------------------------------------------"
  45. echo " Running Primer {name}"
  46. echo " wrk {headers} -d 5 -c 8 -t 8 http://{server_host}:{port}{url}2"
  47. echo "---------------------------------------------------------"
  48. echo ""
  49. wrk {headers} -d 5 -c 8 -t 8 http://{server_host}:{port}{url}2
  50. sleep 5
  51. echo ""
  52. echo "---------------------------------------------------------"
  53. echo " Running Warmup {name}"
  54. echo " wrk {headers} -d {duration} -c {max_concurrency} -t {max_threads} http://{server_host}:{port}{url}2"
  55. echo "---------------------------------------------------------"
  56. echo ""
  57. wrk {headers} -d {duration} -c {max_concurrency} -t {max_threads} http://{server_host}:{port}{url}2
  58. sleep 5
  59. for c in {interval}
  60. do
  61. echo ""
  62. echo "---------------------------------------------------------"
  63. echo " Queries: $c for {name}"
  64. echo " wrk {headers} -d {duration} -c {max_concurrency} -t {max_threads} http://{server_host}:{port}{url}$c"
  65. echo "---------------------------------------------------------"
  66. echo ""
  67. wrk {headers} -d {duration} -c {max_concurrency} -t {max_threads} http://{server_host}:{port}{url}"$c"
  68. sleep 2
  69. done
  70. """
  71. # The sort value is the order in which we represent all the tests. (Mainly helpful for our charts to give the underlying data)
  72. # a consistent ordering even when we add or remove tests. Each test should give a sort value in it's benchmark_config file.
  73. sort = 1000
  74. os = 'linux'
  75. ##########################################################################################
  76. # Public Methods
  77. ##########################################################################################
  78. ############################################################
  79. # start(benchmarker)
  80. # Start the test using it's setup file
  81. ############################################################
  82. def start(self):
  83. return self.setup_module.start(self.benchmarker)
  84. ############################################################
  85. # End start
  86. ############################################################
  87. ############################################################
  88. # stop(benchmarker)
  89. # Stops the test using it's setup file
  90. ############################################################
  91. def stop(self):
  92. return self.setup_module.stop()
  93. ############################################################
  94. # End stop
  95. ############################################################
  96. ############################################################
  97. # verify_urls
  98. # Verifys each of the URLs for this test. THis will sinply
  99. # curl the URL and check for it's return status.
  100. # For each url, a flag will be set on this object for whether
  101. # or not it passed
  102. ############################################################
  103. def verify_urls(self):
  104. # JSON
  105. try:
  106. print "VERIFYING JSON (" + self.json_url + ") ..."
  107. url = self.benchmarker.generate_url(self.json_url, self.port)
  108. subprocess.check_call(["curl", "-f", url])
  109. print ""
  110. self.json_url_passed = True
  111. except (AttributeError, subprocess.CalledProcessError) as e:
  112. self.json_url_passed = False
  113. # DB
  114. try:
  115. print "VERIFYING DB (" + self.db_url + ") ..."
  116. url = self.benchmarker.generate_url(self.db_url, self.port)
  117. subprocess.check_call(["curl", "-f", url])
  118. print ""
  119. self.db_url_passed = True
  120. except (AttributeError, subprocess.CalledProcessError) as e:
  121. self.db_url_passed = False
  122. # Query
  123. try:
  124. print "VERIFYING Query (" + self.query_url + "2) ..."
  125. url = self.benchmarker.generate_url(self.query_url + "2", self.port)
  126. subprocess.check_call(["curl", "-f", url])
  127. print ""
  128. self.query_url_passed = True
  129. except (AttributeError, subprocess.CalledProcessError) as e:
  130. self.query_url_passed = False
  131. # Fortune
  132. try:
  133. print "VERIFYING Fortune (" + self.fortune_url + ") ..."
  134. url = self.benchmarker.generate_url(self.fortune_url, self.port)
  135. subprocess.check_call(["curl", "-f", url])
  136. print ""
  137. self.fortune_url_passed = True
  138. except (AttributeError, subprocess.CalledProcessError) as e:
  139. self.fortune_url_passed = False
  140. # Update
  141. try:
  142. print "VERIFYING Update (" + self.update_url + "2) ..."
  143. url = self.benchmarker.generate_url(self.update_url + "2", self.port)
  144. subprocess.check_call(["curl", "-f", url])
  145. print ""
  146. self.update_url_passed = True
  147. except (AttributeError, subprocess.CalledProcessError) as e:
  148. self.update_url_passed = False
  149. # plaintext
  150. try:
  151. print "VERIFYING Plaintext (" + self.plaintext_url + ") ..."
  152. url = self.benchmarker.generate_url(self.plaintext_url, self.port)
  153. subprocess.check_call(["curl", "-f", url])
  154. print ""
  155. self.plaintext_url_passed = True
  156. except (AttributeError, subprocess.CalledProcessError) as e:
  157. self.plaintext_url_passed = False
  158. ############################################################
  159. # End verify_urls
  160. ############################################################
  161. ############################################################
  162. # contains_type(type)
  163. # true if this test contains an implementation of the given
  164. # test type (json, db, etc.)
  165. ############################################################
  166. def contains_type(self, type):
  167. try:
  168. if type == 'json' and self.json_url != None:
  169. return True
  170. if type == 'db' and self.db_url != None:
  171. return True
  172. if type == 'query' and self.query_url != None:
  173. return True
  174. if type == 'fortune' and self.fortune_url != None:
  175. return True
  176. if type == 'update' and self.update_url != None:
  177. return True
  178. if type == 'plaintext' and self.plaintext_url != None:
  179. return True
  180. except AttributeError:
  181. pass
  182. return False
  183. ############################################################
  184. # End stop
  185. ############################################################
  186. ############################################################
  187. # benchmark
  188. # Runs the benchmark for each type of test that it implements
  189. # JSON/DB/Query.
  190. ############################################################
  191. def benchmark(self):
  192. # JSON
  193. try:
  194. if self.json_url_passed and (self.benchmarker.type == "all" or self.benchmarker.type == "json"):
  195. sys.stdout.write("BENCHMARKING JSON ... ")
  196. sys.stdout.flush()
  197. remote_script = self.__generate_concurrency_script(self.json_url, self.port)
  198. self.__run_benchmark(remote_script, self.benchmarker.output_file(self.name, 'json'))
  199. results = self.__parse_test('json')
  200. self.benchmarker.report_results(framework=self, test="json", results=results['results'])
  201. print "Complete"
  202. except AttributeError:
  203. pass
  204. # DB
  205. try:
  206. if self.db_url_passed and (self.benchmarker.type == "all" or self.benchmarker.type == "db"):
  207. sys.stdout.write("BENCHMARKING DB ... ")
  208. sys.stdout.flush()
  209. remote_script = self.__generate_concurrency_script(self.db_url, self.port)
  210. self.__run_benchmark(remote_script, self.benchmarker.output_file(self.name, 'db'))
  211. results = self.__parse_test('db')
  212. self.benchmarker.report_results(framework=self, test="db", results=results['results'])
  213. print "Complete"
  214. except AttributeError:
  215. pass
  216. # Query
  217. try:
  218. if self.query_url_passed and (self.benchmarker.type == "all" or self.benchmarker.type == "query"):
  219. sys.stdout.write("BENCHMARKING Query ... ")
  220. sys.stdout.flush()
  221. remote_script = self.__generate_query_script(self.query_url, self.port)
  222. self.__run_benchmark(remote_script, self.benchmarker.output_file(self.name, 'query'))
  223. results = self.__parse_test('query')
  224. self.benchmarker.report_results(framework=self, test="query", results=results['results'])
  225. print "Complete"
  226. except AttributeError:
  227. pass
  228. # fortune
  229. try:
  230. if self.fortune_url_passed and (self.benchmarker.type == "all" or self.benchmarker.type == "fortune"):
  231. sys.stdout.write("BENCHMARKING Fortune ... ")
  232. sys.stdout.flush()
  233. remote_script = self.__generate_concurrency_script(self.fortune_url, self.port)
  234. self.__run_benchmark(remote_script, self.benchmarker.output_file(self.name, 'fortune'))
  235. results = self.__parse_test('fortune')
  236. self.benchmarker.report_results(framework=self, test="fortune", results=results['results'])
  237. print "Complete"
  238. except AttributeError:
  239. pass
  240. # update
  241. try:
  242. if self.update_url_passed and (self.benchmarker.type == "all" or self.benchmarker.type == "update"):
  243. sys.stdout.write("BENCHMARKING Update ... ")
  244. sys.stdout.flush()
  245. remote_script = self.__generate_query_script(self.update_url, self.port)
  246. self.__run_benchmark(remote_script, self.benchmarker.output_file(self.name, 'update'))
  247. results = self.__parse_test('update')
  248. self.benchmarker.report_results(framework=self, test="update", results=results['results'])
  249. print "Complete"
  250. except AttributeError:
  251. pass
  252. # plaintext
  253. try:
  254. if self.plaintext_url_passed and (self.benchmarker.type == "all" or self.benchmarker.type == "plaintext"):
  255. sys.stdout.write("BENCHMARKING Plaintext ... ")
  256. sys.stdout.flush()
  257. remote_script = self.__generate_concurrency_script(self.plaintext_url, self.port, wrk_command="wrk-pipeline", intervals=[256,1024,4096,16384])
  258. self.__run_benchmark(remote_script, self.benchmarker.output_file(self.name, 'plaintext'))
  259. results = self.__parse_test('plaintext')
  260. self.benchmarker.report_results(framework=self, test="plaintext", results=results['results'])
  261. print "Complete"
  262. except AttributeError:
  263. pass
  264. ############################################################
  265. # End benchmark
  266. ############################################################
  267. ############################################################
  268. # parse_all
  269. # Method meant to be run for a given timestamp
  270. ############################################################
  271. def parse_all(self):
  272. # JSON
  273. if os.path.exists(self.benchmarker.output_file(self.name, 'json')):
  274. results = self.__parse_test('json')
  275. self.benchmarker.report_results(framework=self, test="json", results=results['results'])
  276. # DB
  277. if os.path.exists(self.benchmarker.output_file(self.name, 'db')):
  278. results = self.__parse_test('db')
  279. self.benchmarker.report_results(framework=self, test="db", results=results['results'])
  280. # Query
  281. if os.path.exists(self.benchmarker.output_file(self.name, 'query')):
  282. results = self.__parse_test('query')
  283. self.benchmarker.report_results(framework=self, test="query", results=results['results'])
  284. # Fortune
  285. if os.path.exists(self.benchmarker.output_file(self.name, 'fortune')):
  286. results = self.__parse_test('fortune')
  287. self.benchmarker.report_results(framework=self, test="fortune", results=results['results'])
  288. # Update
  289. if os.path.exists(self.benchmarker.output_file(self.name, 'update')):
  290. results = self.__parse_test('update')
  291. self.benchmarker.report_results(framework=self, test="update", results=results['results'])
  292. # Plaintext
  293. if os.path.exists(self.benchmarker.output_file(self.name, 'plaintext')):
  294. results = self.__parse_test('plaintext')
  295. self.benchmarker.report_results(framework=self, test="plaintext", results=results['results'])
  296. ############################################################
  297. # End parse_all
  298. ############################################################
  299. ############################################################
  300. # __parse_test(test_type)
  301. ############################################################
  302. def __parse_test(self, test_type):
  303. try:
  304. results = dict()
  305. results['results'] = []
  306. with open(self.benchmarker.output_file(self.name, test_type)) as raw_data:
  307. is_warmup = True
  308. rawData = None
  309. for line in raw_data:
  310. if "Queries:" in line or "Concurrency:" in line:
  311. is_warmup = False
  312. rawData = None
  313. continue
  314. if "Warmup" in line or "Primer" in line:
  315. is_warmup = True
  316. continue
  317. if not is_warmup:
  318. if rawData == None:
  319. rawData = dict()
  320. results['results'].append(rawData)
  321. #if "Requests/sec:" in line:
  322. # m = re.search("Requests/sec:\s+([0-9]+)", line)
  323. # rawData['reportedResults'] = m.group(1)
  324. # search for weighttp data such as succeeded and failed.
  325. if "Latency" in line:
  326. m = re.findall("([0-9]+\.*[0-9]*[us|ms|s|m|%]+)", line)
  327. if len(m) == 4:
  328. rawData['latencyAvg'] = m[0]
  329. rawData['latencyStdev'] = m[1]
  330. rawData['latencyMax'] = m[2]
  331. # rawData['latencyStdevPercent'] = m[3]
  332. #if "Req/Sec" in line:
  333. # m = re.findall("([0-9]+\.*[0-9]*[k|%]*)", line)
  334. # if len(m) == 4:
  335. # rawData['requestsAvg'] = m[0]
  336. # rawData['requestsStdev'] = m[1]
  337. # rawData['requestsMax'] = m[2]
  338. # rawData['requestsStdevPercent'] = m[3]
  339. #if "requests in" in line:
  340. # m = re.search("requests in ([0-9]+\.*[0-9]*[ms|s|m|h]+)", line)
  341. # if m != None:
  342. # # parse out the raw time, which may be in minutes or seconds
  343. # raw_time = m.group(1)
  344. # if "ms" in raw_time:
  345. # rawData['total_time'] = float(raw_time[:len(raw_time)-2]) / 1000.0
  346. # elif "s" in raw_time:
  347. # rawData['total_time'] = float(raw_time[:len(raw_time)-1])
  348. # elif "m" in raw_time:
  349. # rawData['total_time'] = float(raw_time[:len(raw_time)-1]) * 60.0
  350. # elif "h" in raw_time:
  351. # rawData['total_time'] = float(raw_time[:len(raw_time)-1]) * 3600.0
  352. if "requests in" in line:
  353. m = re.search("([0-9]+) requests in", line)
  354. if m != None:
  355. rawData['totalRequests'] = int(m.group(1))
  356. if "Socket errors" in line:
  357. if "connect" in line:
  358. m = re.search("connect ([0-9]+)", line)
  359. rawData['connect'] = int(m.group(1))
  360. if "read" in line:
  361. m = re.search("read ([0-9]+)", line)
  362. rawData['read'] = int(m.group(1))
  363. if "write" in line:
  364. m = re.search("write ([0-9]+)", line)
  365. rawData['write'] = int(m.group(1))
  366. if "timeout" in line:
  367. m = re.search("timeout ([0-9]+)", line)
  368. rawData['timeout'] = int(m.group(1))
  369. if "Non-2xx" in line:
  370. m = re.search("Non-2xx or 3xx responses: ([0-9]+)", line)
  371. if m != None:
  372. rawData['5xx'] = int(m.group(1))
  373. return results
  374. except IOError:
  375. return None
  376. ############################################################
  377. # End benchmark
  378. ############################################################
  379. ##########################################################################################
  380. # Private Methods
  381. ##########################################################################################
  382. ############################################################
  383. # __run_benchmark(script, output_file)
  384. # Runs a single benchmark using the script which is a bash
  385. # template that uses weighttp to run the test. All the results
  386. # outputed to the output_file.
  387. ############################################################
  388. def __run_benchmark(self, script, output_file):
  389. with open(output_file, 'w') as raw_file:
  390. p = subprocess.Popen(self.benchmarker.ssh_string.split(" "), stdin=subprocess.PIPE, stdout=raw_file, stderr=raw_file)
  391. p.communicate(script)
  392. ############################################################
  393. # End __run_benchmark
  394. ############################################################
  395. ############################################################
  396. # __generate_concurrency_script(url, port)
  397. # Generates the string containing the bash script that will
  398. # be run on the client to benchmark a single test. This
  399. # specifically works for the variable concurrency tests (JSON
  400. # and DB)
  401. ############################################################
  402. def __generate_concurrency_script(self, url, port, wrk_command="wrk", intervals=[]):
  403. if len(intervals) == 0:
  404. intervals = self.benchmarker.concurrency_levels
  405. return self.concurrency_template.format(max_concurrency=self.benchmarker.max_concurrency,
  406. max_threads=self.benchmarker.max_threads, name=self.name, duration=self.benchmarker.duration,
  407. interval=" ".join("{}".format(item) for item in intervals),
  408. server_host=self.benchmarker.server_host, port=port, url=url, headers=self.headers, wrk=wrk_command)
  409. ############################################################
  410. # End __generate_concurrency_script
  411. ############################################################
  412. ############################################################
  413. # __generate_query_script(url, port)
  414. # Generates the string containing the bash script that will
  415. # be run on the client to benchmark a single test. This
  416. # specifically works for the variable query tests (Query)
  417. ############################################################
  418. def __generate_query_script(self, url, port):
  419. return self.query_template.format(max_concurrency=self.benchmarker.max_concurrency,
  420. max_threads=self.benchmarker.max_threads, name=self.name, duration=self.benchmarker.duration,
  421. interval=" ".join("{}".format(item) for item in self.benchmarker.query_intervals),
  422. server_host=self.benchmarker.server_host, port=port, url=url, headers=self.headers)
  423. ############################################################
  424. # End __generate_query_script
  425. ############################################################
  426. ##########################################################################################
  427. # Constructor
  428. ##########################################################################################
  429. def __init__(self, name, directory, benchmarker, args):
  430. self.name = name
  431. self.directory = directory
  432. self.benchmarker = benchmarker
  433. self.__dict__.update(args)
  434. # ensure diretory has __init__.py file so that we can use it as a pythong package
  435. if not os.path.exists(os.path.join(directory, "__init__.py")):
  436. open(os.path.join(directory, "__init__.py"), 'w').close()
  437. self.setup_module = setup_module = importlib.import_module(directory + '.' + self.setup_file)
  438. ############################################################
  439. # End __init__
  440. ############################################################
  441. ############################################################
  442. # End FrameworkTest
  443. ############################################################
  444. ##########################################################################################
  445. # Static methods
  446. ##########################################################################################
  447. ##############################################################
  448. # parse_config(config, directory, benchmarker)
  449. # parses a config file and returns a list of FrameworkTest
  450. # objects based on that config file.
  451. ##############################################################
  452. def parse_config(config, directory, benchmarker):
  453. tests = []
  454. # The config object can specify multiple tests, we neep to loop
  455. # over them and parse them out
  456. for test in config['tests']:
  457. for key, value in test.iteritems():
  458. test_name = config['framework']
  459. # if the test uses the 'defualt' keywork, then we don't
  460. # append anything to it's name. All configs should only have 1 default
  461. if key != 'default':
  462. # we need to use the key in the test_name
  463. test_name = test_name + "-" + key
  464. tests.append(FrameworkTest(test_name, directory, benchmarker, value))
  465. return tests
  466. ##############################################################
  467. # End parse_config
  468. ##############################################################