framework_test.py 35 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869
  1. from benchmark.fortune_html_parser import FortuneHTMLParser
  2. import importlib
  3. import os
  4. import subprocess
  5. import time
  6. import re
  7. import pprint
  8. import sys
  9. import traceback
  10. import json
  11. class FrameworkTest:
  12. ##########################################################################################
  13. # Class variables
  14. ##########################################################################################
  15. headers_template = "-H 'Host: localhost' -H '{accept}' -H 'Connection: keep-alive'"
  16. headers_full_template = "-H 'Host: localhost' -H '{accept}' -H 'Accept-Language: en-US,en;q=0.5' -H 'User-Agent: Mozilla/5.0 (X11; Linux x86_64) Gecko/20130501 Firefox/30.0 AppleWebKit/600.00 Chrome/30.0.0000.0 Trident/10.0 Safari/600.00' -H 'Cookie: uid=12345678901234567890; __utma=1.1234567890.1234567890.1234567890.1234567890.12; wd=2560x1600' -H 'Connection: keep-alive'"
  17. accept_json = "Accept: application/json,text/html;q=0.9,application/xhtml+xml;q=0.9,application/xml;q=0.8,*/*;q=0.7"
  18. accept_html = "Accept: text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8"
  19. accept_plaintext = "Accept: text/plain,text/html;q=0.9,application/xhtml+xml;q=0.9,application/xml;q=0.8,*/*;q=0.7"
  20. concurrency_template = """
  21. echo ""
  22. echo "---------------------------------------------------------"
  23. echo " Running Primer {name}"
  24. echo " {wrk} {headers} -d 5 -c 8 -t 8 \"http://{server_host}:{port}{url}\""
  25. echo "---------------------------------------------------------"
  26. echo ""
  27. {wrk} {headers} -d 5 -c 8 -t 8 "http://{server_host}:{port}{url}"
  28. sleep 5
  29. echo ""
  30. echo "---------------------------------------------------------"
  31. echo " Running Warmup {name}"
  32. echo " {wrk} {headers} -d {duration} -c {max_concurrency} -t {max_threads} \"http://{server_host}:{port}{url}\""
  33. echo "---------------------------------------------------------"
  34. echo ""
  35. {wrk} {headers} -d {duration} -c {max_concurrency} -t {max_threads} "http://{server_host}:{port}{url}"
  36. sleep 5
  37. for c in {interval}
  38. do
  39. echo ""
  40. echo "---------------------------------------------------------"
  41. echo " Concurrency: $c for {name}"
  42. echo " {wrk} {headers} {pipeline} -d {duration} -c $c -t $(($c>{max_threads}?{max_threads}:$c)) \"http://{server_host}:{port}{url}\""
  43. echo "---------------------------------------------------------"
  44. echo ""
  45. {wrk} {headers} {pipeline} -d {duration} -c "$c" -t "$(($c>{max_threads}?{max_threads}:$c))" http://{server_host}:{port}{url}
  46. sleep 2
  47. done
  48. """
  49. query_template = """
  50. echo ""
  51. echo "---------------------------------------------------------"
  52. echo " Running Primer {name}"
  53. echo " wrk {headers} -d 5 -c 8 -t 8 \"http://{server_host}:{port}{url}2\""
  54. echo "---------------------------------------------------------"
  55. echo ""
  56. wrk {headers} -d 5 -c 8 -t 8 "http://{server_host}:{port}{url}2"
  57. sleep 5
  58. echo ""
  59. echo "---------------------------------------------------------"
  60. echo " Running Warmup {name}"
  61. echo " wrk {headers} -d {duration} -c {max_concurrency} -t {max_threads} \"http://{server_host}:{port}{url}2\""
  62. echo "---------------------------------------------------------"
  63. echo ""
  64. wrk {headers} -d {duration} -c {max_concurrency} -t {max_threads} "http://{server_host}:{port}{url}2"
  65. sleep 5
  66. for c in {interval}
  67. do
  68. echo ""
  69. echo "---------------------------------------------------------"
  70. echo " Queries: $c for {name}"
  71. echo " wrk {headers} -d {duration} -c {max_concurrency} -t {max_threads} \"http://{server_host}:{port}{url}$c\""
  72. echo "---------------------------------------------------------"
  73. echo ""
  74. wrk {headers} -d {duration} -c {max_concurrency} -t {max_threads} "http://{server_host}:{port}{url}$c"
  75. sleep 2
  76. done
  77. """
  78. language = None
  79. platform = None
  80. webserver = None
  81. classification = None
  82. database = None
  83. approach = None
  84. orm = None
  85. framework = None
  86. os = None
  87. database_os = None
  88. display_name = None
  89. notes = None
  90. versus = None
  91. ############################################################
  92. # Test Variables
  93. ############################################################
  94. JSON = "json"
  95. DB = "db"
  96. QUERY = "query"
  97. FORTUNE = "fortune"
  98. UPDATE = "update"
  99. PLAINTEXT = "plaintext"
  100. ##########################################################################################
  101. # Public Methods
  102. ##########################################################################################
  103. ############################################################
  104. # Validates the jsonString is a JSON object with a 'message'
  105. # key with the value "hello, world!" (case-insensitive).
  106. ############################################################
  107. def validateJson(self, jsonString, out, err):
  108. try:
  109. obj = json.loads(jsonString)
  110. if not obj:
  111. return False
  112. if not obj["message"]:
  113. return False
  114. if not obj["message"].lower() == "hello, world!":
  115. return False
  116. return True
  117. except:
  118. err.write(textwrap.dedent("""
  119. -----------------------------------------------------
  120. Error: validateJson raised exception
  121. -----------------------------------------------------
  122. {trace}
  123. """.format( trace=sys.exc_info()[:2])))
  124. return False
  125. ############################################################
  126. # Validates the jsonString is a JSON object that has an "id"
  127. # and a "randomNumber" key, and that both keys map to
  128. # integers.
  129. ############################################################
  130. def validateDb(self, jsonString, out, err):
  131. try:
  132. obj = json.loads(jsonString)
  133. if not obj:
  134. return False
  135. if type(obj) != dict:
  136. return False
  137. if not obj["id"] or type(obj["id"]) != int:
  138. return False
  139. if not obj["randomNumber"] or type(obj["randomNumber"]) != int:
  140. return False
  141. return True
  142. except:
  143. err.write(textwrap.dedent("""
  144. -----------------------------------------------------
  145. Error: validateDb raised exception
  146. -----------------------------------------------------
  147. {trace}
  148. """.format( trace=sys.exc_info()[:2])))
  149. return False
  150. ############################################################
  151. # Validates the jsonString is an array with a length of
  152. # 2, that each entry in the array is a JSON object, that
  153. # each object has an "id" and a "randomNumber" key, and that
  154. # both keys map to integers.
  155. ############################################################
  156. def validateQuery(self, jsonString, out, err):
  157. try:
  158. arr = json.loads(jsonString)
  159. if not arr or len(arr) != 2 or type(arr[0]) != dict or type(arr[1]) != dict:
  160. return False
  161. if not arr[0]["id"] or type(arr[0]["id"]) != int:
  162. return False
  163. if not arr[0]["randomNumber"] or type(arr[0]["randomNumber"]) != int:
  164. return False
  165. if not arr[1]["id"] or type(arr[1]["id"]) != int:
  166. return False
  167. if not arr[1]["randomNumber"] or type(arr[1]["randomNumber"]) != int:
  168. return False
  169. return True
  170. except:
  171. err.write(textwrap.dedent("""
  172. -----------------------------------------------------
  173. Error: validateQuery raised exception
  174. -----------------------------------------------------
  175. {trace}
  176. """.format( trace=sys.exc_info()[:2])))
  177. return False
  178. ############################################################
  179. # Parses the given HTML string and asks a FortuneHTMLParser
  180. # whether the parsed string is a valid fortune return.
  181. ############################################################
  182. def validateFortune(self, htmlString, out, err):
  183. try:
  184. parser = FortuneHTMLParser()
  185. parser.feed(htmlString)
  186. return parser.isValidFortune()
  187. except:
  188. err.write(textwrap.dedent("""
  189. -----------------------------------------------------
  190. Error: validateFortune raised exception
  191. -----------------------------------------------------
  192. {trace}
  193. """.format( trace=sys.exc_info()[:2])))
  194. return False
  195. ############################################################
  196. # Validates the jsonString is an array with a length of
  197. # 2, that each entry in the array is a JSON object, that
  198. # each object has an "id" and a "randomNumber" key, and that
  199. # both keys map to integers.
  200. ############################################################
  201. def validateUpdate(self, jsonString, out, err):
  202. try:
  203. arr = json.loads(jsonString)
  204. if not arr or len(arr) != 2 or type(arr[0]) != dict or type(arr[1]) != dict:
  205. return False
  206. if not arr[0]["id"] or type(arr[0]["id"]) != int:
  207. return False
  208. if not arr[0]["randomNumber"] or type(arr[0]["randomNumber"]) != int:
  209. return False
  210. if not arr[1]["id"] or type(arr[1]["id"]) != int:
  211. return False
  212. if not arr[1]["randomNumber"] or type(arr[1]["randomNumber"]) != int:
  213. return False
  214. return True
  215. except:
  216. err.write(textwrap.dedent("""
  217. -----------------------------------------------------
  218. Error: validateUpdate raised exception
  219. -----------------------------------------------------
  220. {trace}
  221. """.format( trace=sys.exc_info()[:2])))
  222. return False
  223. ############################################################
  224. #
  225. ############################################################
  226. def validatePlaintext(self, jsonString, out, err):
  227. try:
  228. return jsonString.lower().strip() == "hello, world!"
  229. except:
  230. err.write(textwrap.dedent("""
  231. -----------------------------------------------------
  232. Error: validatePlaintext raised exception
  233. -----------------------------------------------------
  234. {trace}
  235. """.format( trace=sys.exc_info()[:2])))
  236. return False
  237. ############################################################
  238. # start(benchmarker)
  239. # Start the test using it's setup file
  240. ############################################################
  241. def start(self, out, err):
  242. return self.setup_module.start(self.benchmarker, out, err)
  243. ############################################################
  244. # End start
  245. ############################################################
  246. ############################################################
  247. # stop(benchmarker)
  248. # Stops the test using it's setup file
  249. ############################################################
  250. def stop(self, out, err):
  251. return self.setup_module.stop(out, err)
  252. ############################################################
  253. # End stop
  254. ############################################################
  255. ############################################################
  256. # verify_urls
  257. # Verifys each of the URLs for this test. THis will sinply
  258. # curl the URL and check for it's return status.
  259. # For each url, a flag will be set on this object for whether
  260. # or not it passed
  261. ############################################################
  262. def verify_urls(self, out, err):
  263. # JSON
  264. if self.runTests[self.JSON]:
  265. out.write( "VERIFYING JSON (" + self.json_url + ") ...\n" )
  266. out.flush()
  267. try:
  268. url = self.benchmarker.generate_url(self.json_url, self.port)
  269. output = self.__curl_url(url, self.JSON, out, err)
  270. if self.validateJson(output, out, err):
  271. self.json_url_passed = True
  272. else:
  273. self.json_url_passed = False
  274. except (AttributeError, subprocess.CalledProcessError) as e:
  275. self.json_url_passed = False
  276. out.write("VALIDATING JSON ... ")
  277. if self.json_url_passed:
  278. out.write("PASS\n")
  279. else:
  280. out.write("FAIL\n")
  281. out.flush
  282. # DB
  283. if self.runTests[self.DB]:
  284. out.write( "VERIFYING DB (" + self.db_url + ") ...\n" )
  285. out.flush()
  286. try:
  287. url = self.benchmarker.generate_url(self.db_url, self.port)
  288. output = self.__curl_url(url, self.DB, out, err)
  289. if self.validateDb(output, out, err):
  290. self.db_url_passed = True
  291. else:
  292. self.db_url_passed = False
  293. except (AttributeError, subprocess.CalledProcessError) as e:
  294. self.db_url_passed = False
  295. out.write("VALIDATING DB ... ")
  296. if self.db_url_passed:
  297. out.write("PASS\n")
  298. else:
  299. out.write("FAIL\n")
  300. out.flush
  301. # Query
  302. if self.runTests[self.QUERY]:
  303. out.write( "VERIFYING QUERY (" + self.query_url + "2) ...\n" )
  304. out.flush()
  305. try:
  306. url = self.benchmarker.generate_url(self.query_url + "2", self.port)
  307. output = self.__curl_url(url, self.QUERY, out, err)
  308. if self.validateQuery(output, out, err):
  309. self.query_url_passed = True
  310. else:
  311. self.query_url_passed = False
  312. except (AttributeError, subprocess.CalledProcessError) as e:
  313. self.query_url_passed = False
  314. out.write("VALIDATING QUERY ... ")
  315. if self.query_url_passed:
  316. out.write("PASS\n")
  317. else:
  318. out.write("FAIL\n")
  319. out.flush
  320. # Fortune
  321. if self.runTests[self.FORTUNE]:
  322. out.write( "VERIFYING FORTUNE (" + self.fortune_url + ") ...\n" )
  323. out.flush()
  324. try:
  325. url = self.benchmarker.generate_url(self.fortune_url, self.port)
  326. output = self.__curl_url(url, self.FORTUNE, out, err)
  327. if self.validateFortune(output, out, err):
  328. self.fortune_url_passed = True
  329. else:
  330. self.fortune_url_passed = False
  331. except (AttributeError, subprocess.CalledProcessError) as e:
  332. self.fortune_url_passed = False
  333. out.write("VALIDATING FORTUNE ... ")
  334. if self.fortune_url_passed:
  335. out.write("PASS\n")
  336. else:
  337. out.write("FAIL\n")
  338. out.flush
  339. # Update
  340. if self.runTests[self.UPDATE]:
  341. out.write( "VERIFYING UPDATE (" + self.update_url + "2) ...\n" )
  342. out.flush()
  343. try:
  344. url = self.benchmarker.generate_url(self.update_url + "2", self.port)
  345. output = self.__curl_url(url, self.UPDATE, out, err)
  346. if self.validateUpdate(output, out, err):
  347. self.update_url_passed = True
  348. else:
  349. self.update_url_passed = False
  350. except (AttributeError, subprocess.CalledProcessError) as e:
  351. self.update_url_passed = False
  352. out.write("VALIDATING UPDATE ... ")
  353. if self.update_url_passed:
  354. out.write("PASS\n")
  355. else:
  356. out.write("FAIL\n")
  357. out.flush
  358. # plaintext
  359. if self.runTests[self.PLAINTEXT]:
  360. out.write( "VERIFYING PLAINTEXT (" + self.plaintext_url + ") ...\n" )
  361. out.flush()
  362. try:
  363. url = self.benchmarker.generate_url(self.plaintext_url, self.port)
  364. output = self.__curl_url(url, self.PLAINTEXT, out, err)
  365. if self.validatePlaintext(output, out, err):
  366. self.plaintext_url_passed = True
  367. else:
  368. self.plaintext_url_passed = False
  369. except (AttributeError, subprocess.CalledProcessError) as e:
  370. self.plaintext_url_passed = False
  371. out.write("VALIDATING PLAINTEXT ... ")
  372. if self.plaintext_url_passed:
  373. out.write("PASS\n")
  374. else:
  375. out.write("FAIL\n")
  376. out.flush
  377. ############################################################
  378. # End verify_urls
  379. ############################################################
  380. ############################################################
  381. # contains_type(type)
  382. # true if this test contains an implementation of the given
  383. # test type (json, db, etc.)
  384. ############################################################
  385. def contains_type(self, type):
  386. try:
  387. if type == self.JSON and self.json_url != None:
  388. return True
  389. if type == self.DB and self.db_url != None:
  390. return True
  391. if type == self.QUERY and self.query_url != None:
  392. return True
  393. if type == self.FORTUNE and self.fortune_url != None:
  394. return True
  395. if type == self.UPDATE and self.update_url != None:
  396. return True
  397. if type == self.PLAINTEXT and self.plaintext_url != None:
  398. return True
  399. except AttributeError:
  400. pass
  401. return False
  402. ############################################################
  403. # End stop
  404. ############################################################
  405. ############################################################
  406. # benchmark
  407. # Runs the benchmark for each type of test that it implements
  408. # JSON/DB/Query.
  409. ############################################################
  410. def benchmark(self, out, err):
  411. # JSON
  412. if self.runTests[self.JSON]:
  413. try:
  414. if self.benchmarker.type == "all" or self.benchmarker.type == self.JSON:
  415. out.write("BENCHMARKING JSON ... ")
  416. out.flush()
  417. results = None
  418. if self.json_url_passed:
  419. remote_script = self.__generate_concurrency_script(self.json_url, self.port, self.accept_json)
  420. self.__run_benchmark(remote_script, self.benchmarker.output_file(self.name, self.JSON), err)
  421. results = self.__parse_test(self.JSON)
  422. else:
  423. results = dict()
  424. results['results'] = []
  425. self.benchmarker.report_results(framework=self, test=self.JSON, results=results['results'], passed=self.json_url_passed)
  426. out.write( "Complete\n" )
  427. out.flush()
  428. except AttributeError:
  429. pass
  430. # DB
  431. if self.runTests[self.DB]:
  432. try:
  433. if self.benchmarker.type == "all" or self.benchmarker.type == self.DB:
  434. out.write("BENCHMARKING DB ... ")
  435. out.flush()
  436. results = None
  437. if self.db_url_passed:
  438. remote_script = self.__generate_concurrency_script(self.db_url, self.port, self.accept_json)
  439. self.__run_benchmark(remote_script, self.benchmarker.output_file(self.name, self.DB), err)
  440. results = self.__parse_test(self.DB)
  441. else:
  442. results = dict()
  443. results['results'] = []
  444. self.benchmarker.report_results(framework=self, test=self.DB, results=results['results'], passed=self.db_url_passed)
  445. out.write( "Complete\n" )
  446. except AttributeError:
  447. pass
  448. # Query
  449. if self.runTests[self.QUERY]:
  450. try:
  451. if self.benchmarker.type == "all" or self.benchmarker.type == self.QUERY:
  452. out.write("BENCHMARKING Query ... ")
  453. out.flush()
  454. results = None
  455. if self.query_url_passed:
  456. remote_script = self.__generate_query_script(self.query_url, self.port, self.accept_json)
  457. self.__run_benchmark(remote_script, self.benchmarker.output_file(self.name, self.QUERY), err)
  458. results = self.__parse_test(self.QUERY)
  459. else:
  460. results = dict()
  461. results['results'] = []
  462. self.benchmarker.report_results(framework=self, test=self.QUERY, results=results['results'], passed=self.query_url_passed)
  463. out.write( "Complete\n" )
  464. out.flush()
  465. except AttributeError:
  466. pass
  467. # fortune
  468. if self.runTests[self.FORTUNE]:
  469. try:
  470. if self.benchmarker.type == "all" or self.benchmarker.type == self.FORTUNE:
  471. out.write("BENCHMARKING Fortune ... ")
  472. out.flush()
  473. results = None
  474. if self.fortune_url_passed:
  475. remote_script = self.__generate_concurrency_script(self.fortune_url, self.port, self.accept_html)
  476. self.__run_benchmark(remote_script, self.benchmarker.output_file(self.name, self.FORTUNE), err)
  477. results = self.__parse_test(self.FORTUNE)
  478. else:
  479. results = dict()
  480. results['results'] = []
  481. self.benchmarker.report_results(framework=self, test=self.FORTUNE, results=results['results'], passed=self.fortune_url_passed)
  482. out.write( "Complete\n" )
  483. out.flush()
  484. except AttributeError:
  485. pass
  486. # update
  487. if self.runTests[self.UPDATE]:
  488. try:
  489. if self.benchmarker.type == "all" or self.benchmarker.type == self.UPDATE:
  490. out.write("BENCHMARKING Update ... ")
  491. out.flush()
  492. results = None
  493. if self.update_url_passed:
  494. remote_script = self.__generate_query_script(self.update_url, self.port, self.accept_json)
  495. self.__run_benchmark(remote_script, self.benchmarker.output_file(self.name, self.UPDATE), err)
  496. results = self.__parse_test(self.UPDATE)
  497. else:
  498. results = dict()
  499. results['results'] = []
  500. self.benchmarker.report_results(framework=self, test=self.UPDATE, results=results['results'], passed=self.update_url_passed)
  501. out.write( "Complete\n" )
  502. out.flush()
  503. except AttributeError:
  504. pass
  505. # plaintext
  506. if self.runTests[self.PLAINTEXT]:
  507. try:
  508. if self.benchmarker.type == "all" or self.benchmarker.type == self.PLAINTEXT:
  509. out.write("BENCHMARKING Plaintext ... ")
  510. out.flush()
  511. results = None
  512. if self.plaintext_url_passed:
  513. remote_script = self.__generate_concurrency_script(self.plaintext_url, self.port, self.accept_plaintext, wrk_command="wrk-pipeline", intervals=[256,1024,4096,16384], pipeline="--pipeline 16")
  514. self.__run_benchmark(remote_script, self.benchmarker.output_file(self.name, self.PLAINTEXT), err)
  515. results = self.__parse_test(self.PLAINTEXT)
  516. else:
  517. results = dict()
  518. results['results'] = []
  519. self.benchmarker.report_results(framework=self, test=self.PLAINTEXT, results=results['results'], passed=self.plaintext_url_passed)
  520. out.write( "Complete\n" )
  521. out.flush()
  522. except AttributeError:
  523. traceback.print_exc()
  524. pass
  525. ############################################################
  526. # End benchmark
  527. ############################################################
  528. ############################################################
  529. # parse_all
  530. # Method meant to be run for a given timestamp
  531. ############################################################
  532. def parse_all(self):
  533. # JSON
  534. if os.path.exists(self.benchmarker.output_file(self.name, self.JSON)):
  535. results = self.__parse_test(self.JSON)
  536. self.benchmarker.report_results(framework=self, test=self.JSON, results=results['results'])
  537. # DB
  538. if os.path.exists(self.benchmarker.output_file(self.name, self.DB)):
  539. results = self.__parse_test(self.DB)
  540. self.benchmarker.report_results(framework=self, test=self.DB, results=results['results'])
  541. # Query
  542. if os.path.exists(self.benchmarker.output_file(self.name, self.QUERY)):
  543. results = self.__parse_test(self.QUERY)
  544. self.benchmarker.report_results(framework=self, test=self.QUERY, results=results['results'])
  545. # Fortune
  546. if os.path.exists(self.benchmarker.output_file(self.name, self.FORTUNE)):
  547. results = self.__parse_test(self.FORTUNE)
  548. self.benchmarker.report_results(framework=self, test=self.FORTUNE, results=results['results'])
  549. # Update
  550. if os.path.exists(self.benchmarker.output_file(self.name, self.UPDATE)):
  551. results = self.__parse_test(self.UPDATE)
  552. self.benchmarker.report_results(framework=self, test=self.UPDATE, results=results['results'])
  553. # Plaintext
  554. if os.path.exists(self.benchmarker.output_file(self.name, self.PLAINTEXT)):
  555. results = self.__parse_test(self.PLAINTEXT)
  556. self.benchmarker.report_results(framework=self, test=self.PLAINTEXT, results=results['results'])
  557. ############################################################
  558. # End parse_all
  559. ############################################################
  560. ############################################################
  561. # __parse_test(test_type)
  562. ############################################################
  563. def __parse_test(self, test_type):
  564. try:
  565. results = dict()
  566. results['results'] = []
  567. with open(self.benchmarker.output_file(self.name, test_type)) as raw_data:
  568. is_warmup = True
  569. rawData = None
  570. for line in raw_data:
  571. if "Queries:" in line or "Concurrency:" in line:
  572. is_warmup = False
  573. rawData = None
  574. continue
  575. if "Warmup" in line or "Primer" in line:
  576. is_warmup = True
  577. continue
  578. if not is_warmup:
  579. if rawData == None:
  580. rawData = dict()
  581. results['results'].append(rawData)
  582. #if "Requests/sec:" in line:
  583. # m = re.search("Requests/sec:\s+([0-9]+)", line)
  584. # rawData['reportedResults'] = m.group(1)
  585. # search for weighttp data such as succeeded and failed.
  586. if "Latency" in line:
  587. m = re.findall("([0-9]+\.*[0-9]*[us|ms|s|m|%]+)", line)
  588. if len(m) == 4:
  589. rawData['latencyAvg'] = m[0]
  590. rawData['latencyStdev'] = m[1]
  591. rawData['latencyMax'] = m[2]
  592. # rawData['latencyStdevPercent'] = m[3]
  593. #if "Req/Sec" in line:
  594. # m = re.findall("([0-9]+\.*[0-9]*[k|%]*)", line)
  595. # if len(m) == 4:
  596. # rawData['requestsAvg'] = m[0]
  597. # rawData['requestsStdev'] = m[1]
  598. # rawData['requestsMax'] = m[2]
  599. # rawData['requestsStdevPercent'] = m[3]
  600. #if "requests in" in line:
  601. # m = re.search("requests in ([0-9]+\.*[0-9]*[ms|s|m|h]+)", line)
  602. # if m != None:
  603. # # parse out the raw time, which may be in minutes or seconds
  604. # raw_time = m.group(1)
  605. # if "ms" in raw_time:
  606. # rawData['total_time'] = float(raw_time[:len(raw_time)-2]) / 1000.0
  607. # elif "s" in raw_time:
  608. # rawData['total_time'] = float(raw_time[:len(raw_time)-1])
  609. # elif "m" in raw_time:
  610. # rawData['total_time'] = float(raw_time[:len(raw_time)-1]) * 60.0
  611. # elif "h" in raw_time:
  612. # rawData['total_time'] = float(raw_time[:len(raw_time)-1]) * 3600.0
  613. if "requests in" in line:
  614. m = re.search("([0-9]+) requests in", line)
  615. if m != None:
  616. rawData['totalRequests'] = int(m.group(1))
  617. if "Socket errors" in line:
  618. if "connect" in line:
  619. m = re.search("connect ([0-9]+)", line)
  620. rawData['connect'] = int(m.group(1))
  621. if "read" in line:
  622. m = re.search("read ([0-9]+)", line)
  623. rawData['read'] = int(m.group(1))
  624. if "write" in line:
  625. m = re.search("write ([0-9]+)", line)
  626. rawData['write'] = int(m.group(1))
  627. if "timeout" in line:
  628. m = re.search("timeout ([0-9]+)", line)
  629. rawData['timeout'] = int(m.group(1))
  630. if "Non-2xx" in line:
  631. m = re.search("Non-2xx or 3xx responses: ([0-9]+)", line)
  632. if m != None:
  633. rawData['5xx'] = int(m.group(1))
  634. return results
  635. except IOError:
  636. return None
  637. ############################################################
  638. # End benchmark
  639. ############################################################
  640. ##########################################################################################
  641. # Private Methods
  642. ##########################################################################################
  643. ############################################################
  644. # __run_benchmark(script, output_file)
  645. # Runs a single benchmark using the script which is a bash
  646. # template that uses weighttp to run the test. All the results
  647. # outputed to the output_file.
  648. ############################################################
  649. def __run_benchmark(self, script, output_file, err):
  650. with open(output_file, 'w') as raw_file:
  651. p = subprocess.Popen(self.benchmarker.client_ssh_string.split(" "), stdin=subprocess.PIPE, stdout=raw_file, stderr=err)
  652. p.communicate(script)
  653. err.flush()
  654. ############################################################
  655. # End __run_benchmark
  656. ############################################################
  657. ############################################################
  658. # __generate_concurrency_script(url, port)
  659. # Generates the string containing the bash script that will
  660. # be run on the client to benchmark a single test. This
  661. # specifically works for the variable concurrency tests (JSON
  662. # and DB)
  663. ############################################################
  664. def __generate_concurrency_script(self, url, port, accept_header, wrk_command="wrk", intervals=[], pipeline=""):
  665. if len(intervals) == 0:
  666. intervals = self.benchmarker.concurrency_levels
  667. headers = self.__get_request_headers(accept_header)
  668. return self.concurrency_template.format(max_concurrency=self.benchmarker.max_concurrency,
  669. max_threads=self.benchmarker.max_threads, name=self.name, duration=self.benchmarker.duration,
  670. interval=" ".join("{}".format(item) for item in intervals),
  671. server_host=self.benchmarker.server_host, port=port, url=url, headers=headers, wrk=wrk_command,
  672. pipeline=pipeline)
  673. ############################################################
  674. # End __generate_concurrency_script
  675. ############################################################
  676. ############################################################
  677. # __generate_query_script(url, port)
  678. # Generates the string containing the bash script that will
  679. # be run on the client to benchmark a single test. This
  680. # specifically works for the variable query tests (Query)
  681. ############################################################
  682. def __generate_query_script(self, url, port, accept_header):
  683. headers = self.__get_request_headers(accept_header)
  684. return self.query_template.format(max_concurrency=self.benchmarker.max_concurrency,
  685. max_threads=self.benchmarker.max_threads, name=self.name, duration=self.benchmarker.duration,
  686. interval=" ".join("{}".format(item) for item in self.benchmarker.query_intervals),
  687. server_host=self.benchmarker.server_host, port=port, url=url, headers=headers)
  688. ############################################################
  689. # End __generate_query_script
  690. ############################################################
  691. ############################################################
  692. # __get_request_headers(accept_header)
  693. # Generates the complete HTTP header string
  694. ############################################################
  695. def __get_request_headers(self, accept_header):
  696. return self.headers_template.format(accept=accept_header)
  697. ############################################################
  698. # End __format_request_headers
  699. ############################################################
  700. ############################################################
  701. # __curl_url
  702. # Dump HTTP response and headers. Throw exception if there
  703. # is an HTTP error.
  704. ############################################################
  705. def __curl_url(self, url, testType, out, err):
  706. # Use -i to output response with headers.
  707. # Don't use -f so that the HTTP response code is ignored.
  708. # Use --stderr - to redirect stderr to stdout so we get
  709. # error output for sure in stdout.
  710. # Use -sS to hide progress bar, but show errors.
  711. subprocess.check_call(["curl", "-i", "-sS", url], stderr=err, stdout=out)
  712. out.flush()
  713. err.flush()
  714. # HTTP output may not end in a newline, so add that here.
  715. out.write( "\n" )
  716. out.flush()
  717. # We need to get the respond body from the curl and return it.
  718. p = subprocess.Popen(["curl", "-s", url], stdout=subprocess.PIPE)
  719. output = p.communicate()
  720. # In the curl invocation above we could not use -f because
  721. # then the HTTP response would not be output, so use -f in
  722. # an additional invocation so that if there is an HTTP error,
  723. # subprocess.CalledProcessError will be thrown. Note that this
  724. # uses check_output() instead of check_call() so that we can
  725. # ignore the HTTP response because we already output that in
  726. # the first curl invocation.
  727. subprocess.check_output(["curl", "-fsS", url], stderr=err)
  728. out.flush()
  729. err.flush()
  730. # HTTP output may not end in a newline, so add that here.
  731. out.write( "\n" )
  732. out.flush()
  733. if output:
  734. # We have the response body - return it
  735. return output[0]
  736. ##############################################################
  737. # End __curl_url
  738. ##############################################################
  739. ##########################################################################################
  740. # Constructor
  741. ##########################################################################################
  742. def __init__(self, name, directory, benchmarker, runTests, args):
  743. self.name = name
  744. self.directory = directory
  745. self.benchmarker = benchmarker
  746. self.runTests = runTests
  747. self.__dict__.update(args)
  748. # ensure directory has __init__.py file so that we can use it as a Python package
  749. if not os.path.exists(os.path.join(directory, "__init__.py")):
  750. open(os.path.join(directory, "__init__.py"), 'w').close()
  751. self.setup_module = setup_module = importlib.import_module(directory + '.' + self.setup_file)
  752. ############################################################
  753. # End __init__
  754. ############################################################
  755. ############################################################
  756. # End FrameworkTest
  757. ############################################################
  758. ##########################################################################################
  759. # Static methods
  760. ##########################################################################################
  761. ##############################################################
  762. # parse_config(config, directory, benchmarker)
  763. # parses a config file and returns a list of FrameworkTest
  764. # objects based on that config file.
  765. ##############################################################
  766. def parse_config(config, directory, benchmarker):
  767. tests = []
  768. # The config object can specify multiple tests, we neep to loop
  769. # over them and parse them out
  770. for test in config['tests']:
  771. for key, value in test.iteritems():
  772. test_name = config['framework']
  773. runTests = dict()
  774. runTests["json"] = True if value.get("json_url", False) else False
  775. runTests["db"] = True if value.get("db_url", False) else False
  776. runTests["query"] = True if value.get("query_url", False) else False
  777. runTests["fortune"] = True if value.get("fortune_url", False) else False
  778. runTests["update"] = True if value.get("update_url", False) else False
  779. runTests["plaintext"] = True if value.get("plaintext_url", False) else False
  780. # if the test uses the 'defualt' keywork, then we don't
  781. # append anything to it's name. All configs should only have 1 default
  782. if key != 'default':
  783. # we need to use the key in the test_name
  784. test_name = test_name + "-" + key
  785. tests.append(FrameworkTest(test_name, directory, benchmarker, runTests, value))
  786. return tests
  787. ##############################################################
  788. # End parse_config
  789. ##############################################################