framework_test.py 35 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858
  1. from benchmark.fortune_html_parser import FortuneHTMLParser
  2. import importlib
  3. import os
  4. import subprocess
  5. import time
  6. import re
  7. import pprint
  8. import sys
  9. import traceback
  10. import json
  11. import textwrap
  12. class FrameworkTest:
  13. ##########################################################################################
  14. # Class variables
  15. ##########################################################################################
  16. headers_template = "-H 'Host: localhost' -H '{accept}' -H 'Connection: keep-alive'"
  17. headers_full_template = "-H 'Host: localhost' -H '{accept}' -H 'Accept-Language: en-US,en;q=0.5' -H 'User-Agent: Mozilla/5.0 (X11; Linux x86_64) Gecko/20130501 Firefox/30.0 AppleWebKit/600.00 Chrome/30.0.0000.0 Trident/10.0 Safari/600.00' -H 'Cookie: uid=12345678901234567890; __utma=1.1234567890.1234567890.1234567890.1234567890.12; wd=2560x1600' -H 'Connection: keep-alive'"
  18. accept_json = "Accept: application/json,text/html;q=0.9,application/xhtml+xml;q=0.9,application/xml;q=0.8,*/*;q=0.7"
  19. accept_html = "Accept: text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8"
  20. accept_plaintext = "Accept: text/plain,text/html;q=0.9,application/xhtml+xml;q=0.9,application/xml;q=0.8,*/*;q=0.7"
  21. concurrency_template = """
  22. echo ""
  23. echo "---------------------------------------------------------"
  24. echo " Running Primer {name}"
  25. echo " {wrk} {headers} -d 5 -c 8 -t 8 \"http://{server_host}:{port}{url}\""
  26. echo "---------------------------------------------------------"
  27. echo ""
  28. {wrk} {headers} -d 5 -c 8 -t 8 "http://{server_host}:{port}{url}"
  29. sleep 5
  30. echo ""
  31. echo "---------------------------------------------------------"
  32. echo " Running Warmup {name}"
  33. echo " {wrk} {headers} -d {duration} -c {max_concurrency} -t {max_threads} \"http://{server_host}:{port}{url}\""
  34. echo "---------------------------------------------------------"
  35. echo ""
  36. {wrk} {headers} -d {duration} -c {max_concurrency} -t {max_threads} "http://{server_host}:{port}{url}"
  37. sleep 5
  38. for c in {interval}
  39. do
  40. echo ""
  41. echo "---------------------------------------------------------"
  42. echo " Concurrency: $c for {name}"
  43. echo " {wrk} {headers} {pipeline} -d {duration} -c $c -t $(($c>{max_threads}?{max_threads}:$c)) \"http://{server_host}:{port}{url}\""
  44. echo "---------------------------------------------------------"
  45. echo ""
  46. {wrk} {headers} {pipeline} -d {duration} -c "$c" -t "$(($c>{max_threads}?{max_threads}:$c))" http://{server_host}:{port}{url}
  47. sleep 2
  48. done
  49. """
  50. query_template = """
  51. echo ""
  52. echo "---------------------------------------------------------"
  53. echo " Running Primer {name}"
  54. echo " wrk {headers} -d 5 -c 8 -t 8 \"http://{server_host}:{port}{url}2\""
  55. echo "---------------------------------------------------------"
  56. echo ""
  57. wrk {headers} -d 5 -c 8 -t 8 "http://{server_host}:{port}{url}2"
  58. sleep 5
  59. echo ""
  60. echo "---------------------------------------------------------"
  61. echo " Running Warmup {name}"
  62. echo " wrk {headers} -d {duration} -c {max_concurrency} -t {max_threads} \"http://{server_host}:{port}{url}2\""
  63. echo "---------------------------------------------------------"
  64. echo ""
  65. wrk {headers} -d {duration} -c {max_concurrency} -t {max_threads} "http://{server_host}:{port}{url}2"
  66. sleep 5
  67. for c in {interval}
  68. do
  69. echo ""
  70. echo "---------------------------------------------------------"
  71. echo " Queries: $c for {name}"
  72. echo " wrk {headers} -d {duration} -c {max_concurrency} -t {max_threads} \"http://{server_host}:{port}{url}$c\""
  73. echo "---------------------------------------------------------"
  74. echo ""
  75. wrk {headers} -d {duration} -c {max_concurrency} -t {max_threads} "http://{server_host}:{port}{url}$c"
  76. sleep 2
  77. done
  78. """
  79. language = None
  80. platform = None
  81. webserver = None
  82. classification = None
  83. database = None
  84. approach = None
  85. orm = None
  86. framework = None
  87. os = None
  88. database_os = None
  89. display_name = None
  90. notes = None
  91. versus = None
  92. ############################################################
  93. # Test Variables
  94. ############################################################
  95. JSON = "json"
  96. DB = "db"
  97. QUERY = "query"
  98. FORTUNE = "fortune"
  99. UPDATE = "update"
  100. PLAINTEXT = "plaintext"
  101. ##########################################################################################
  102. # Public Methods
  103. ##########################################################################################
  104. ############################################################
  105. # Validates the jsonString is a JSON object with a 'message'
  106. # key with the value "hello, world!" (case-insensitive).
  107. ############################################################
  108. def validateJson(self, jsonString, out, err):
  109. try:
  110. obj = json.loads(jsonString)
  111. if not obj:
  112. return False
  113. if not obj["message"]:
  114. return False
  115. if not obj["message"].lower() == "hello, world!":
  116. return False
  117. return True
  118. except:
  119. err.write(textwrap.dedent("""
  120. -----------------------------------------------------
  121. Error: validateJson raised exception
  122. -----------------------------------------------------
  123. {trace}
  124. """.format( trace=sys.exc_info()[:2])))
  125. return False
  126. ############################################################
  127. # Validates the jsonString is a JSON object that has an "id"
  128. # and a "randomNumber" key, and that both keys map to
  129. # integers.
  130. ############################################################
  131. def validateDb(self, jsonString, out, err):
  132. try:
  133. obj = json.loads(jsonString)
  134. if not obj:
  135. return False
  136. if type(obj) != dict:
  137. return False
  138. if not obj["id"] or type(obj["id"]) != int:
  139. return False
  140. if not obj["randomNumber"] or type(obj["randomNumber"]) != int:
  141. return False
  142. return True
  143. except:
  144. err.write(textwrap.dedent("""
  145. -----------------------------------------------------
  146. Error: validateDb raised exception
  147. -----------------------------------------------------
  148. {trace}
  149. """.format( trace=sys.exc_info()[:2])))
  150. return False
  151. ############################################################
  152. # Validates the jsonString is an array with a length of
  153. # 2, that each entry in the array is a JSON object, that
  154. # each object has an "id" and a "randomNumber" key, and that
  155. # both keys map to integers.
  156. ############################################################
  157. def validateQuery(self, jsonString, out, err):
  158. try:
  159. arr = json.loads(jsonString)
  160. if not arr or len(arr) != 2 or type(arr[0]) != dict or type(arr[1]) != dict:
  161. return False
  162. if not arr[0]["id"] or type(arr[0]["id"]) != int:
  163. return False
  164. if not arr[0]["randomNumber"] or type(arr[0]["randomNumber"]) != int:
  165. return False
  166. if not arr[1]["id"] or type(arr[1]["id"]) != int:
  167. return False
  168. if not arr[1]["randomNumber"] or type(arr[1]["randomNumber"]) != int:
  169. return False
  170. return True
  171. except:
  172. err.write(textwrap.dedent("""
  173. -----------------------------------------------------
  174. Error: validateQuery raised exception
  175. -----------------------------------------------------
  176. {trace}
  177. """.format( trace=sys.exc_info()[:2])))
  178. return False
  179. ############################################################
  180. # Parses the given HTML string and asks a FortuneHTMLParser
  181. # whether the parsed string is a valid fortune return.
  182. ############################################################
  183. def validateFortune(self, htmlString, out, err):
  184. try:
  185. parser = FortuneHTMLParser()
  186. parser.feed(htmlString)
  187. return parser.isValidFortune()
  188. except:
  189. err.write(textwrap.dedent("""
  190. -----------------------------------------------------
  191. Error: validateFortune raised exception
  192. -----------------------------------------------------
  193. {trace}
  194. """.format( trace=sys.exc_info()[:2])))
  195. return False
  196. ############################################################
  197. # Validates the jsonString is an array with a length of
  198. # 2, that each entry in the array is a JSON object, that
  199. # each object has an "id" and a "randomNumber" key, and that
  200. # both keys map to integers.
  201. ############################################################
  202. def validateUpdate(self, jsonString, out, err):
  203. try:
  204. arr = json.loads(jsonString)
  205. if not arr or len(arr) != 2 or type(arr[0]) != dict or type(arr[1]) != dict:
  206. return False
  207. if not arr[0]["id"] or type(arr[0]["id"]) != int:
  208. return False
  209. if not arr[0]["randomNumber"] or type(arr[0]["randomNumber"]) != int:
  210. return False
  211. if not arr[1]["id"] or type(arr[1]["id"]) != int:
  212. return False
  213. if not arr[1]["randomNumber"] or type(arr[1]["randomNumber"]) != int:
  214. return False
  215. return True
  216. except:
  217. err.write(textwrap.dedent("""
  218. -----------------------------------------------------
  219. Error: validateUpdate raised exception
  220. -----------------------------------------------------
  221. {trace}
  222. """.format( trace=sys.exc_info()[:2])))
  223. return False
  224. ############################################################
  225. #
  226. ############################################################
  227. def validatePlaintext(self, jsonString, out, err):
  228. try:
  229. return jsonString.lower().strip() == "hello, world!"
  230. except:
  231. err.write(textwrap.dedent("""
  232. -----------------------------------------------------
  233. Error: validatePlaintext raised exception
  234. -----------------------------------------------------
  235. {trace}
  236. """.format( trace=sys.exc_info()[:2])))
  237. return False
  238. ############################################################
  239. # start(benchmarker)
  240. # Start the test using it's setup file
  241. ############################################################
  242. def start(self, out, err):
  243. return self.setup_module.start(self.benchmarker, out, err)
  244. ############################################################
  245. # End start
  246. ############################################################
  247. ############################################################
  248. # stop(benchmarker)
  249. # Stops the test using it's setup file
  250. ############################################################
  251. def stop(self, out, err):
  252. return self.setup_module.stop(out, err)
  253. ############################################################
  254. # End stop
  255. ############################################################
  256. ############################################################
  257. # verify_urls
  258. # Verifys each of the URLs for this test. THis will sinply
  259. # curl the URL and check for it's return status.
  260. # For each url, a flag will be set on this object for whether
  261. # or not it passed
  262. ############################################################
  263. def verify_urls(self, out, err):
  264. # JSON
  265. if self.runTests[self.JSON]:
  266. out.write( "VERIFYING JSON (" + self.json_url + ") ...\n" )
  267. out.flush()
  268. try:
  269. url = self.benchmarker.generate_url(self.json_url, self.port)
  270. output = self.__curl_url(url, self.JSON, out, err)
  271. if self.validateJson(output, out, err):
  272. self.json_url_passed = True
  273. else:
  274. self.json_url_passed = False
  275. except (AttributeError, subprocess.CalledProcessError) as e:
  276. self.json_url_passed = False
  277. out.write("VALIDATING JSON ... ")
  278. if self.json_url_passed:
  279. out.write("PASS\n\n")
  280. else:
  281. out.write("FAIL\n\n")
  282. out.flush
  283. # DB
  284. if self.runTests[self.DB]:
  285. out.write( "VERIFYING DB (" + self.db_url + ") ...\n" )
  286. out.flush()
  287. try:
  288. url = self.benchmarker.generate_url(self.db_url, self.port)
  289. output = self.__curl_url(url, self.DB, out, err)
  290. if self.validateDb(output, out, err):
  291. self.db_url_passed = True
  292. else:
  293. self.db_url_passed = False
  294. except (AttributeError, subprocess.CalledProcessError) as e:
  295. self.db_url_passed = False
  296. out.write("VALIDATING DB ... ")
  297. if self.db_url_passed:
  298. out.write("PASS\n\n")
  299. else:
  300. out.write("FAIL\n\n")
  301. out.flush
  302. # Query
  303. if self.runTests[self.QUERY]:
  304. out.write( "VERIFYING QUERY (" + self.query_url + "2) ...\n" )
  305. out.flush()
  306. try:
  307. url = self.benchmarker.generate_url(self.query_url + "2", self.port)
  308. output = self.__curl_url(url, self.QUERY, out, err)
  309. if self.validateQuery(output, out, err):
  310. self.query_url_passed = True
  311. else:
  312. self.query_url_passed = False
  313. except (AttributeError, subprocess.CalledProcessError) as e:
  314. self.query_url_passed = False
  315. out.write("VALIDATING QUERY ... ")
  316. if self.query_url_passed:
  317. out.write("PASS\n\n")
  318. else:
  319. out.write("FAIL\n\n")
  320. out.flush
  321. # Fortune
  322. if self.runTests[self.FORTUNE]:
  323. out.write( "VERIFYING FORTUNE (" + self.fortune_url + ") ...\n" )
  324. out.flush()
  325. try:
  326. url = self.benchmarker.generate_url(self.fortune_url, self.port)
  327. output = self.__curl_url(url, self.FORTUNE, out, err)
  328. if self.validateFortune(output, out, err):
  329. self.fortune_url_passed = True
  330. else:
  331. self.fortune_url_passed = False
  332. except (AttributeError, subprocess.CalledProcessError) as e:
  333. self.fortune_url_passed = False
  334. out.write("VALIDATING FORTUNE ... ")
  335. if self.fortune_url_passed:
  336. out.write("PASS\n\n")
  337. else:
  338. out.write("FAIL\n\n")
  339. out.flush
  340. # Update
  341. if self.runTests[self.UPDATE]:
  342. out.write( "VERIFYING UPDATE (" + self.update_url + "2) ...\n" )
  343. out.flush()
  344. try:
  345. url = self.benchmarker.generate_url(self.update_url + "2", self.port)
  346. output = self.__curl_url(url, self.UPDATE, out, err)
  347. if self.validateUpdate(output, out, err):
  348. self.update_url_passed = True
  349. else:
  350. self.update_url_passed = False
  351. except (AttributeError, subprocess.CalledProcessError) as e:
  352. self.update_url_passed = False
  353. out.write("VALIDATING UPDATE ... ")
  354. if self.update_url_passed:
  355. out.write("PASS\n\n")
  356. else:
  357. out.write("FAIL\n\n")
  358. out.flush
  359. # plaintext
  360. if self.runTests[self.PLAINTEXT]:
  361. out.write( "VERIFYING PLAINTEXT (" + self.plaintext_url + ") ...\n" )
  362. out.flush()
  363. try:
  364. url = self.benchmarker.generate_url(self.plaintext_url, self.port)
  365. output = self.__curl_url(url, self.PLAINTEXT, out, err)
  366. if self.validatePlaintext(output, out, err):
  367. self.plaintext_url_passed = True
  368. else:
  369. self.plaintext_url_passed = False
  370. except (AttributeError, subprocess.CalledProcessError) as e:
  371. self.plaintext_url_passed = False
  372. out.write("VALIDATING PLAINTEXT ... ")
  373. if self.plaintext_url_passed:
  374. out.write("PASS\n\n")
  375. else:
  376. out.write("FAIL\n\n")
  377. out.flush
  378. ############################################################
  379. # End verify_urls
  380. ############################################################
  381. ############################################################
  382. # contains_type(type)
  383. # true if this test contains an implementation of the given
  384. # test type (json, db, etc.)
  385. ############################################################
  386. def contains_type(self, type):
  387. try:
  388. if type == self.JSON and self.json_url != None:
  389. return True
  390. if type == self.DB and self.db_url != None:
  391. return True
  392. if type == self.QUERY and self.query_url != None:
  393. return True
  394. if type == self.FORTUNE and self.fortune_url != None:
  395. return True
  396. if type == self.UPDATE and self.update_url != None:
  397. return True
  398. if type == self.PLAINTEXT and self.plaintext_url != None:
  399. return True
  400. except AttributeError:
  401. pass
  402. return False
  403. ############################################################
  404. # End stop
  405. ############################################################
  406. ############################################################
  407. # benchmark
  408. # Runs the benchmark for each type of test that it implements
  409. # JSON/DB/Query.
  410. ############################################################
  411. def benchmark(self, out, err):
  412. # JSON
  413. if self.runTests[self.JSON]:
  414. try:
  415. if self.benchmarker.type == "all" or self.benchmarker.type == self.JSON:
  416. out.write("BENCHMARKING JSON ... ")
  417. out.flush()
  418. results = None
  419. output_file = self.benchmarker.output_file(self.name, self.JSON)
  420. if self.json_url_passed:
  421. remote_script = self.__generate_concurrency_script(self.json_url, self.port, self.accept_json)
  422. self.__run_benchmark(remote_script, output_file, err)
  423. results = self.__parse_test(self.JSON)
  424. self.benchmarker.report_results(framework=self, test=self.JSON, results=results['results'])
  425. out.write( "Complete\n" )
  426. out.flush()
  427. except AttributeError:
  428. pass
  429. # DB
  430. if self.runTests[self.DB]:
  431. try:
  432. if self.benchmarker.type == "all" or self.benchmarker.type == self.DB:
  433. out.write("BENCHMARKING DB ... ")
  434. out.flush()
  435. results = None
  436. output_file = self.benchmarker.output_file(self.name, self.DB)
  437. if self.db_url_passed:
  438. remote_script = self.__generate_concurrency_script(self.db_url, self.port, self.accept_json)
  439. self.__run_benchmark(remote_script, output_file, err)
  440. results = self.__parse_test(self.DB)
  441. self.benchmarker.report_results(framework=self, test=self.DB, results=results['results'])
  442. out.write( "Complete\n" )
  443. except AttributeError:
  444. pass
  445. # Query
  446. if self.runTests[self.QUERY]:
  447. try:
  448. if self.benchmarker.type == "all" or self.benchmarker.type == self.QUERY:
  449. out.write("BENCHMARKING Query ... ")
  450. out.flush()
  451. results = None
  452. output_file = self.benchmarker.output_file(self.name, self.QUERY)
  453. if self.query_url_passed:
  454. remote_script = self.__generate_query_script(self.query_url, self.port, self.accept_json)
  455. self.__run_benchmark(remote_script, output_file, err)
  456. results = self.__parse_test(self.QUERY)
  457. self.benchmarker.report_results(framework=self, test=self.QUERY, results=results['results'])
  458. out.write( "Complete\n" )
  459. out.flush()
  460. except AttributeError:
  461. pass
  462. # fortune
  463. if self.runTests[self.FORTUNE]:
  464. try:
  465. if self.benchmarker.type == "all" or self.benchmarker.type == self.FORTUNE:
  466. out.write("BENCHMARKING Fortune ... ")
  467. out.flush()
  468. results = None
  469. output_file = self.benchmarker.output_file(self.name, self.FORTUNE)
  470. if self.fortune_url_passed:
  471. remote_script = self.__generate_concurrency_script(self.fortune_url, self.port, self.accept_html)
  472. self.__run_benchmark(remote_script, output_file, err)
  473. results = self.__parse_test(self.FORTUNE)
  474. self.benchmarker.report_results(framework=self, test=self.FORTUNE, results=results['results'])
  475. out.write( "Complete\n" )
  476. out.flush()
  477. except AttributeError:
  478. pass
  479. # update
  480. if self.runTests[self.UPDATE]:
  481. try:
  482. if self.benchmarker.type == "all" or self.benchmarker.type == self.UPDATE:
  483. out.write("BENCHMARKING Update ... ")
  484. out.flush()
  485. results = None
  486. output_file = self.benchmarker.output_file(self.name, self.UPDATE)
  487. if self.update_url_passed:
  488. remote_script = self.__generate_query_script(self.update_url, self.port, self.accept_json)
  489. self.__run_benchmark(remote_script, output_file, err)
  490. results = self.__parse_test(self.UPDATE)
  491. self.benchmarker.report_results(framework=self, test=self.UPDATE, results=results['results'])
  492. out.write( "Complete\n" )
  493. out.flush()
  494. except AttributeError:
  495. pass
  496. # plaintext
  497. if self.runTests[self.PLAINTEXT]:
  498. try:
  499. if self.benchmarker.type == "all" or self.benchmarker.type == self.PLAINTEXT:
  500. out.write("BENCHMARKING Plaintext ... ")
  501. out.flush()
  502. results = None
  503. output_file = self.benchmarker.output_file(self.name, self.PLAINTEXT)
  504. if self.plaintext_url_passed:
  505. remote_script = self.__generate_concurrency_script(self.plaintext_url, self.port, self.accept_plaintext, wrk_command="wrk-pipeline", intervals=[256,1024,4096,16384], pipeline="--pipeline 16")
  506. self.__run_benchmark(remote_script, output_file, err)
  507. results = self.__parse_test(self.PLAINTEXT)
  508. self.benchmarker.report_results(framework=self, test=self.PLAINTEXT, results=results['results'])
  509. out.write( "Complete\n" )
  510. out.flush()
  511. except AttributeError:
  512. traceback.print_exc()
  513. pass
  514. ############################################################
  515. # End benchmark
  516. ############################################################
  517. ############################################################
  518. # parse_all
  519. # Method meant to be run for a given timestamp
  520. ############################################################
  521. def parse_all(self):
  522. # JSON
  523. if os.path.exists(self.benchmarker.get_output_file(self.name, self.JSON)):
  524. results = self.__parse_test(self.JSON)
  525. self.benchmarker.report_results(framework=self, test=self.JSON, results=results['results'])
  526. # DB
  527. if os.path.exists(self.benchmarker.get_output_file(self.name, self.DB)):
  528. results = self.__parse_test(self.DB)
  529. self.benchmarker.report_results(framework=self, test=self.DB, results=results['results'])
  530. # Query
  531. if os.path.exists(self.benchmarker.get_output_file(self.name, self.QUERY)):
  532. results = self.__parse_test(self.QUERY)
  533. self.benchmarker.report_results(framework=self, test=self.QUERY, results=results['results'])
  534. # Fortune
  535. if os.path.exists(self.benchmarker.get_output_file(self.name, self.FORTUNE)):
  536. results = self.__parse_test(self.FORTUNE)
  537. self.benchmarker.report_results(framework=self, test=self.FORTUNE, results=results['results'])
  538. # Update
  539. if os.path.exists(self.benchmarker.get_output_file(self.name, self.UPDATE)):
  540. results = self.__parse_test(self.UPDATE)
  541. self.benchmarker.report_results(framework=self, test=self.UPDATE, results=results['results'])
  542. # Plaintext
  543. if os.path.exists(self.benchmarker.get_output_file(self.name, self.PLAINTEXT)):
  544. results = self.__parse_test(self.PLAINTEXT)
  545. self.benchmarker.report_results(framework=self, test=self.PLAINTEXT, results=results['results'])
  546. ############################################################
  547. # End parse_all
  548. ############################################################
  549. ############################################################
  550. # __parse_test(test_type)
  551. ############################################################
  552. def __parse_test(self, test_type):
  553. try:
  554. results = dict()
  555. results['results'] = []
  556. if os.path.exists(self.benchmarker.get_output_file(self.name, test_type)):
  557. with open(self.benchmarker.output_file(self.name, test_type)) as raw_data:
  558. is_warmup = True
  559. rawData = None
  560. for line in raw_data:
  561. if "Queries:" in line or "Concurrency:" in line:
  562. is_warmup = False
  563. rawData = None
  564. continue
  565. if "Warmup" in line or "Primer" in line:
  566. is_warmup = True
  567. continue
  568. if not is_warmup:
  569. if rawData == None:
  570. rawData = dict()
  571. results['results'].append(rawData)
  572. #if "Requests/sec:" in line:
  573. # m = re.search("Requests/sec:\s+([0-9]+)", line)
  574. # rawData['reportedResults'] = m.group(1)
  575. # search for weighttp data such as succeeded and failed.
  576. if "Latency" in line:
  577. m = re.findall("([0-9]+\.*[0-9]*[us|ms|s|m|%]+)", line)
  578. if len(m) == 4:
  579. rawData['latencyAvg'] = m[0]
  580. rawData['latencyStdev'] = m[1]
  581. rawData['latencyMax'] = m[2]
  582. # rawData['latencyStdevPercent'] = m[3]
  583. #if "Req/Sec" in line:
  584. # m = re.findall("([0-9]+\.*[0-9]*[k|%]*)", line)
  585. # if len(m) == 4:
  586. # rawData['requestsAvg'] = m[0]
  587. # rawData['requestsStdev'] = m[1]
  588. # rawData['requestsMax'] = m[2]
  589. # rawData['requestsStdevPercent'] = m[3]
  590. #if "requests in" in line:
  591. # m = re.search("requests in ([0-9]+\.*[0-9]*[ms|s|m|h]+)", line)
  592. # if m != None:
  593. # # parse out the raw time, which may be in minutes or seconds
  594. # raw_time = m.group(1)
  595. # if "ms" in raw_time:
  596. # rawData['total_time'] = float(raw_time[:len(raw_time)-2]) / 1000.0
  597. # elif "s" in raw_time:
  598. # rawData['total_time'] = float(raw_time[:len(raw_time)-1])
  599. # elif "m" in raw_time:
  600. # rawData['total_time'] = float(raw_time[:len(raw_time)-1]) * 60.0
  601. # elif "h" in raw_time:
  602. # rawData['total_time'] = float(raw_time[:len(raw_time)-1]) * 3600.0
  603. if "requests in" in line:
  604. m = re.search("([0-9]+) requests in", line)
  605. if m != None:
  606. rawData['totalRequests'] = int(m.group(1))
  607. if "Socket errors" in line:
  608. if "connect" in line:
  609. m = re.search("connect ([0-9]+)", line)
  610. rawData['connect'] = int(m.group(1))
  611. if "read" in line:
  612. m = re.search("read ([0-9]+)", line)
  613. rawData['read'] = int(m.group(1))
  614. if "write" in line:
  615. m = re.search("write ([0-9]+)", line)
  616. rawData['write'] = int(m.group(1))
  617. if "timeout" in line:
  618. m = re.search("timeout ([0-9]+)", line)
  619. rawData['timeout'] = int(m.group(1))
  620. if "Non-2xx" in line:
  621. m = re.search("Non-2xx or 3xx responses: ([0-9]+)", line)
  622. if m != None:
  623. rawData['5xx'] = int(m.group(1))
  624. return results
  625. except IOError:
  626. return None
  627. ############################################################
  628. # End benchmark
  629. ############################################################
  630. ##########################################################################################
  631. # Private Methods
  632. ##########################################################################################
  633. ############################################################
  634. # __run_benchmark(script, output_file)
  635. # Runs a single benchmark using the script which is a bash
  636. # template that uses weighttp to run the test. All the results
  637. # outputed to the output_file.
  638. ############################################################
  639. def __run_benchmark(self, script, output_file, err):
  640. with open(output_file, 'w') as raw_file:
  641. p = subprocess.Popen(self.benchmarker.client_ssh_string.split(" "), stdin=subprocess.PIPE, stdout=raw_file, stderr=err)
  642. p.communicate(script)
  643. err.flush()
  644. ############################################################
  645. # End __run_benchmark
  646. ############################################################
  647. ############################################################
  648. # __generate_concurrency_script(url, port)
  649. # Generates the string containing the bash script that will
  650. # be run on the client to benchmark a single test. This
  651. # specifically works for the variable concurrency tests (JSON
  652. # and DB)
  653. ############################################################
  654. def __generate_concurrency_script(self, url, port, accept_header, wrk_command="wrk", intervals=[], pipeline=""):
  655. if len(intervals) == 0:
  656. intervals = self.benchmarker.concurrency_levels
  657. headers = self.__get_request_headers(accept_header)
  658. return self.concurrency_template.format(max_concurrency=self.benchmarker.max_concurrency,
  659. max_threads=self.benchmarker.max_threads, name=self.name, duration=self.benchmarker.duration,
  660. interval=" ".join("{}".format(item) for item in intervals),
  661. server_host=self.benchmarker.server_host, port=port, url=url, headers=headers, wrk=wrk_command,
  662. pipeline=pipeline)
  663. ############################################################
  664. # End __generate_concurrency_script
  665. ############################################################
  666. ############################################################
  667. # __generate_query_script(url, port)
  668. # Generates the string containing the bash script that will
  669. # be run on the client to benchmark a single test. This
  670. # specifically works for the variable query tests (Query)
  671. ############################################################
  672. def __generate_query_script(self, url, port, accept_header):
  673. headers = self.__get_request_headers(accept_header)
  674. return self.query_template.format(max_concurrency=self.benchmarker.max_concurrency,
  675. max_threads=self.benchmarker.max_threads, name=self.name, duration=self.benchmarker.duration,
  676. interval=" ".join("{}".format(item) for item in self.benchmarker.query_intervals),
  677. server_host=self.benchmarker.server_host, port=port, url=url, headers=headers)
  678. ############################################################
  679. # End __generate_query_script
  680. ############################################################
  681. ############################################################
  682. # __get_request_headers(accept_header)
  683. # Generates the complete HTTP header string
  684. ############################################################
  685. def __get_request_headers(self, accept_header):
  686. return self.headers_template.format(accept=accept_header)
  687. ############################################################
  688. # End __format_request_headers
  689. ############################################################
  690. ############################################################
  691. # __curl_url
  692. # Dump HTTP response and headers. Throw exception if there
  693. # is an HTTP error.
  694. ############################################################
  695. def __curl_url(self, url, testType, out, err):
  696. # Use -i to output response with headers.
  697. # Don't use -f so that the HTTP response code is ignored.
  698. # Use --stderr - to redirect stderr to stdout so we get
  699. # error output for sure in stdout.
  700. # Use -sS to hide progress bar, but show errors.
  701. subprocess.check_call(["curl", "-i", "-sS", url], stderr=err, stdout=out)
  702. out.flush()
  703. err.flush()
  704. # HTTP output may not end in a newline, so add that here.
  705. out.write( "\n" )
  706. out.flush()
  707. # We need to get the respond body from the curl and return it.
  708. p = subprocess.Popen(["curl", "-s", url], stdout=subprocess.PIPE)
  709. output = p.communicate()
  710. # In the curl invocation above we could not use -f because
  711. # then the HTTP response would not be output, so use -f in
  712. # an additional invocation so that if there is an HTTP error,
  713. # subprocess.CalledProcessError will be thrown. Note that this
  714. # uses check_output() instead of check_call() so that we can
  715. # ignore the HTTP response because we already output that in
  716. # the first curl invocation.
  717. subprocess.check_output(["curl", "-fsS", url], stderr=err)
  718. err.flush()
  719. # HTTP output may not end in a newline, so add that here.
  720. out.write( "\n" )
  721. out.flush()
  722. if output:
  723. # We have the response body - return it
  724. return output[0]
  725. ##############################################################
  726. # End __curl_url
  727. ##############################################################
  728. ##########################################################################################
  729. # Constructor
  730. ##########################################################################################
  731. def __init__(self, name, directory, benchmarker, runTests, args):
  732. self.name = name
  733. self.directory = directory
  734. self.benchmarker = benchmarker
  735. self.runTests = runTests
  736. self.__dict__.update(args)
  737. # ensure directory has __init__.py file so that we can use it as a Python package
  738. if not os.path.exists(os.path.join(directory, "__init__.py")):
  739. open(os.path.join(directory, "__init__.py"), 'w').close()
  740. self.setup_module = setup_module = importlib.import_module(directory + '.' + self.setup_file)
  741. ############################################################
  742. # End __init__
  743. ############################################################
  744. ############################################################
  745. # End FrameworkTest
  746. ############################################################
  747. ##########################################################################################
  748. # Static methods
  749. ##########################################################################################
  750. ##############################################################
  751. # parse_config(config, directory, benchmarker)
  752. # parses a config file and returns a list of FrameworkTest
  753. # objects based on that config file.
  754. ##############################################################
  755. def parse_config(config, directory, benchmarker):
  756. tests = []
  757. # The config object can specify multiple tests, we neep to loop
  758. # over them and parse them out
  759. for test in config['tests']:
  760. for key, value in test.iteritems():
  761. test_name = config['framework']
  762. runTests = dict()
  763. runTests["json"] = True if value.get("json_url", False) else False
  764. runTests["db"] = True if value.get("db_url", False) else False
  765. runTests["query"] = True if value.get("query_url", False) else False
  766. runTests["fortune"] = True if value.get("fortune_url", False) else False
  767. runTests["update"] = True if value.get("update_url", False) else False
  768. runTests["plaintext"] = True if value.get("plaintext_url", False) else False
  769. # if the test uses the 'defualt' keywork, then we don't
  770. # append anything to it's name. All configs should only have 1 default
  771. if key != 'default':
  772. # we need to use the key in the test_name
  773. test_name = test_name + "-" + key
  774. tests.append(FrameworkTest(test_name, directory, benchmarker, runTests, value))
  775. return tests
  776. ##############################################################
  777. # End parse_config
  778. ##############################################################