framework_test.py 39 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971
  1. from benchmark.fortune_html_parser import FortuneHTMLParser
  2. import importlib
  3. import os
  4. import subprocess
  5. import time
  6. import re
  7. import pprint
  8. import sys
  9. import traceback
  10. import json
  11. import textwrap
  12. class FrameworkTest:
  13. ##########################################################################################
  14. # Class variables
  15. ##########################################################################################
  16. headers_template = "-H 'Host: localhost' -H '{accept}' -H 'Connection: keep-alive'"
  17. headers_full_template = "-H 'Host: localhost' -H '{accept}' -H 'Accept-Language: en-US,en;q=0.5' -H 'User-Agent: Mozilla/5.0 (X11; Linux x86_64) Gecko/20130501 Firefox/30.0 AppleWebKit/600.00 Chrome/30.0.0000.0 Trident/10.0 Safari/600.00' -H 'Cookie: uid=12345678901234567890; __utma=1.1234567890.1234567890.1234567890.1234567890.12; wd=2560x1600' -H 'Connection: keep-alive'"
  18. accept_json = "Accept: application/json,text/html;q=0.9,application/xhtml+xml;q=0.9,application/xml;q=0.8,*/*;q=0.7"
  19. accept_html = "Accept: text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8"
  20. accept_plaintext = "Accept: text/plain,text/html;q=0.9,application/xhtml+xml;q=0.9,application/xml;q=0.8,*/*;q=0.7"
  21. concurrency_template = """
  22. echo ""
  23. echo "---------------------------------------------------------"
  24. echo " Running Primer {name}"
  25. echo " {wrk} {headers} -d 5 -c 8 -t 8 \"http://{server_host}:{port}{url}\""
  26. echo "---------------------------------------------------------"
  27. echo ""
  28. {wrk} {headers} -d 5 -c 8 -t 8 "http://{server_host}:{port}{url}"
  29. sleep 5
  30. echo ""
  31. echo "---------------------------------------------------------"
  32. echo " Running Warmup {name}"
  33. echo " {wrk} {headers} -d {duration} -c {max_concurrency} -t {max_threads} \"http://{server_host}:{port}{url}\""
  34. echo "---------------------------------------------------------"
  35. echo ""
  36. {wrk} {headers} -d {duration} -c {max_concurrency} -t {max_threads} "http://{server_host}:{port}{url}"
  37. sleep 5
  38. for c in {interval}
  39. do
  40. echo ""
  41. echo "---------------------------------------------------------"
  42. echo " Concurrency: $c for {name}"
  43. echo " {wrk} {headers} {pipeline} -d {duration} -c $c -t $(($c>{max_threads}?{max_threads}:$c)) \"http://{server_host}:{port}{url}\""
  44. echo "---------------------------------------------------------"
  45. echo ""
  46. {wrk} {headers} {pipeline} -d {duration} -c "$c" -t "$(($c>{max_threads}?{max_threads}:$c))" http://{server_host}:{port}{url}
  47. sleep 2
  48. done
  49. """
  50. query_template = """
  51. echo ""
  52. echo "---------------------------------------------------------"
  53. echo " Running Primer {name}"
  54. echo " wrk {headers} -d 5 -c 8 -t 8 \"http://{server_host}:{port}{url}2\""
  55. echo "---------------------------------------------------------"
  56. echo ""
  57. wrk {headers} -d 5 -c 8 -t 8 "http://{server_host}:{port}{url}2"
  58. sleep 5
  59. echo ""
  60. echo "---------------------------------------------------------"
  61. echo " Running Warmup {name}"
  62. echo " wrk {headers} -d {duration} -c {max_concurrency} -t {max_threads} \"http://{server_host}:{port}{url}2\""
  63. echo "---------------------------------------------------------"
  64. echo ""
  65. wrk {headers} -d {duration} -c {max_concurrency} -t {max_threads} "http://{server_host}:{port}{url}2"
  66. sleep 5
  67. for c in {interval}
  68. do
  69. echo ""
  70. echo "---------------------------------------------------------"
  71. echo " Queries: $c for {name}"
  72. echo " wrk {headers} -d {duration} -c {max_concurrency} -t {max_threads} \"http://{server_host}:{port}{url}$c\""
  73. echo "---------------------------------------------------------"
  74. echo ""
  75. wrk {headers} -d {duration} -c {max_concurrency} -t {max_threads} "http://{server_host}:{port}{url}$c"
  76. sleep 2
  77. done
  78. """
  79. language = None
  80. platform = None
  81. webserver = None
  82. classification = None
  83. database = None
  84. approach = None
  85. orm = None
  86. framework = None
  87. os = None
  88. database_os = None
  89. display_name = None
  90. notes = None
  91. versus = None
  92. ############################################################
  93. # Test Variables
  94. ############################################################
  95. JSON = "json"
  96. DB = "db"
  97. QUERY = "query"
  98. FORTUNE = "fortune"
  99. UPDATE = "update"
  100. PLAINTEXT = "plaintext"
  101. ##########################################################################################
  102. # Public Methods
  103. ##########################################################################################
  104. ############################################################
  105. # Validates the jsonString is a JSON object with a 'message'
  106. # key with the value "hello, world!" (case-insensitive).
  107. ############################################################
  108. def validateJson(self, jsonString, out, err):
  109. try:
  110. obj = json.loads(jsonString)
  111. if obj["message"].lower() == "hello, world!":
  112. return True
  113. except:
  114. err.write(textwrap.dedent("""
  115. -----------------------------------------------------
  116. Error: validateJson raised exception
  117. -----------------------------------------------------
  118. {trace}
  119. """.format( trace=sys.exc_info()[:2])))
  120. return False
  121. ############################################################
  122. # Validates the jsonString is a JSON object that has an "id"
  123. # and a "randomNumber" key, and that both keys map to
  124. # integers.
  125. ############################################################
  126. def validateDb(self, jsonString, out, err):
  127. try:
  128. obj = json.loads(jsonString)
  129. # We are allowing the single-object array for the DB
  130. # test for now, but will likely remove this later.
  131. if type(obj) == list:
  132. obj = obj[0]
  133. # This will error out of the value could not parsed to a
  134. # float (this will work with ints, but it will turn them
  135. # into their float equivalent; i.e. "123" => 123.0)
  136. if (type(float(obj["id"])) == float and
  137. type(float(obj["randomNumber"])) == float):
  138. return True
  139. except:
  140. err.write(textwrap.dedent("""
  141. -----------------------------------------------------
  142. Error: validateDb raised exception
  143. -----------------------------------------------------
  144. {trace}
  145. """.format( trace=sys.exc_info()[:2])))
  146. return False
  147. ############################################################
  148. # Validates the jsonString is an array with a length of
  149. # 2, that each entry in the array is a JSON object, that
  150. # each object has an "id" and a "randomNumber" key, and that
  151. # both keys map to integers.
  152. ############################################################
  153. def validateQuery(self, jsonString, out, err):
  154. try:
  155. arr = json.loads(jsonString)
  156. if (type(float(arr[0]["id"])) == float and
  157. type(float(arr[0]["randomNumber"])) == float and
  158. type(float(arr[1]["id"])) == float and
  159. type(float(arr[1]["randomNumber"])) == float):
  160. return True
  161. except:
  162. err.write(textwrap.dedent("""
  163. -----------------------------------------------------
  164. Error: validateQuery raised exception
  165. -----------------------------------------------------
  166. {trace}
  167. """.format( trace=sys.exc_info()[:2])))
  168. return False
  169. ############################################################
  170. # Validates the jsonString is an array with a length of
  171. # 1, that each entry in the array is a JSON object, that
  172. # each object has an "id" and a "randomNumber" key, and that
  173. # both keys map to integers.
  174. ############################################################
  175. def validateQueryOneOrLess(self, jsonString, out, err):
  176. try:
  177. arr = json.loads(jsonString)
  178. if len(arr) != 1:
  179. return False
  180. for obj in arr:
  181. if (type(float(obj["id"])) != float or
  182. type(float(obj["randomNumber"])) != float or
  183. type(float(obj["id"])) != float or
  184. type(float(obj["randomNumber"])) != float):
  185. return False
  186. # By here, it's passed validation
  187. return True
  188. except:
  189. err.write(textwrap.dedent("""
  190. -----------------------------------------------------
  191. Error: validateQuery raised exception
  192. -----------------------------------------------------
  193. {trace}
  194. """.format( trace=sys.exc_info()[:2])))
  195. return False
  196. ############################################################
  197. # Validates the jsonString is an array with a length of
  198. # 500, that each entry in the array is a JSON object, that
  199. # each object has an "id" and a "randomNumber" key, and that
  200. # both keys map to integers.
  201. ############################################################
  202. def validateQueryFiveHundredOrMore(self, jsonString, out, err):
  203. try:
  204. arr = json.loads(jsonString)
  205. if len(arr) != 500:
  206. return False
  207. for obj in arr:
  208. if (type(float(obj["id"])) != float or
  209. type(float(obj["randomNumber"])) != float or
  210. type(float(obj["id"])) != float or
  211. type(float(obj["randomNumber"])) != float):
  212. return False
  213. # By here, it's passed validation
  214. return True
  215. except:
  216. err.write(textwrap.dedent("""
  217. -----------------------------------------------------
  218. Error: validateQuery raised exception
  219. -----------------------------------------------------
  220. {trace}
  221. """.format( trace=sys.exc_info()[:2])))
  222. return False
  223. ############################################################
  224. # Parses the given HTML string and asks a FortuneHTMLParser
  225. # whether the parsed string is a valid fortune return.
  226. ############################################################
  227. def validateFortune(self, htmlString, out, err):
  228. try:
  229. parser = FortuneHTMLParser()
  230. parser.feed(htmlString)
  231. return parser.isValidFortune()
  232. except:
  233. err.write(textwrap.dedent("""
  234. -----------------------------------------------------
  235. Error: validateFortune raised exception
  236. -----------------------------------------------------
  237. {trace}
  238. """.format( trace=sys.exc_info()[:2])))
  239. return False
  240. ############################################################
  241. # Validates the jsonString is an array with a length of
  242. # 2, that each entry in the array is a JSON object, that
  243. # each object has an "id" and a "randomNumber" key, and that
  244. # both keys map to integers.
  245. ############################################################
  246. def validateUpdate(self, jsonString, out, err):
  247. try:
  248. arr = json.loads(jsonString)
  249. if (type(float(arr[0]["id"])) == float and
  250. type(float(arr[0]["randomNumber"])) == float and
  251. type(float(arr[1]["id"])) == float and
  252. type(float(arr[1]["randomNumber"])) == float):
  253. return True
  254. except:
  255. err.write(textwrap.dedent("""
  256. -----------------------------------------------------
  257. Error: validateUpdate raised exception
  258. -----------------------------------------------------
  259. {trace}
  260. """.format( trace=sys.exc_info()[:2])))
  261. return False
  262. ############################################################
  263. #
  264. ############################################################
  265. def validatePlaintext(self, jsonString, out, err):
  266. try:
  267. return jsonString.lower().strip() == "hello, world!"
  268. except:
  269. err.write(textwrap.dedent("""
  270. -----------------------------------------------------
  271. Error: validatePlaintext raised exception
  272. -----------------------------------------------------
  273. {trace}
  274. """.format( trace=sys.exc_info()[:2])))
  275. return False
  276. ############################################################
  277. # start(benchmarker)
  278. # Start the test using it's setup file
  279. ############################################################
  280. def start(self, out, err):
  281. return self.setup_module.start(self.benchmarker, out, err)
  282. ############################################################
  283. # End start
  284. ############################################################
  285. ############################################################
  286. # stop(benchmarker)
  287. # Stops the test using it's setup file
  288. ############################################################
  289. def stop(self, out, err):
  290. return self.setup_module.stop(out, err)
  291. ############################################################
  292. # End stop
  293. ############################################################
  294. ############################################################
  295. # verify_urls
  296. # Verifys each of the URLs for this test. THis will sinply
  297. # curl the URL and check for it's return status.
  298. # For each url, a flag will be set on this object for whether
  299. # or not it passed
  300. ############################################################
  301. def verify_urls(self, out, err):
  302. # JSON
  303. if self.runTests[self.JSON]:
  304. out.write( "VERIFYING JSON (" + self.json_url + ") ...\n" )
  305. out.flush()
  306. try:
  307. url = self.benchmarker.generate_url(self.json_url, self.port)
  308. output = self.__curl_url(url, self.JSON, out, err)
  309. if self.validateJson(output, out, err):
  310. self.json_url_passed = True
  311. else:
  312. self.json_url_passed = False
  313. except (AttributeError, subprocess.CalledProcessError):
  314. err.write(textwrap.dedent("""
  315. -----------------------------------------------------
  316. Error: verify_urls raised exception (JSON)
  317. -----------------------------------------------------
  318. {trace}
  319. """.format( trace=sys.exc_info()[:2])))
  320. err.flush()
  321. self.json_url_passed = False
  322. out.write("VALIDATING JSON ... ")
  323. if self.json_url_passed:
  324. out.write("PASS\n\n")
  325. else:
  326. out.write("FAIL\n\n")
  327. out.flush
  328. # DB
  329. if self.runTests[self.DB]:
  330. out.write( "VERIFYING DB (" + self.db_url + ") ...\n" )
  331. out.flush()
  332. try:
  333. url = self.benchmarker.generate_url(self.db_url, self.port)
  334. output = self.__curl_url(url, self.DB, out, err)
  335. if self.validateDb(output, out, err):
  336. self.db_url_passed = True
  337. else:
  338. self.db_url_passed = False
  339. except (AttributeError, subprocess.CalledProcessError):
  340. err.write(textwrap.dedent("""
  341. -----------------------------------------------------
  342. Error: verify_urls raised exception (DB)
  343. -----------------------------------------------------
  344. {trace}
  345. """.format( trace=sys.exc_info()[:2])))
  346. err.flush()
  347. self.db_url_passed = False
  348. out.write("VALIDATING DB ... ")
  349. if self.db_url_passed:
  350. out.write("PASS\n\n")
  351. else:
  352. out.write("FAIL\n\n")
  353. out.flush
  354. # Query
  355. if self.runTests[self.QUERY]:
  356. out.write( "VERIFYING QUERY (" + self.query_url + "2) ...\n" )
  357. out.flush()
  358. try:
  359. url = self.benchmarker.generate_url(self.query_url + "2", self.port)
  360. output = self.__curl_url(url, self.QUERY, out, err)
  361. url2 = self.benchmarker.generate_url(self.query_url + "0", self.port)
  362. output2 = self.__curl_url(url2, self.QUERY, out, err)
  363. url3 = self.benchmarker.generate_url(self.query_url + "501", self.port)
  364. output3 = self.__curl_url(url3, self.QUERY, out, err)
  365. if self.validateQuery(output, out, err):
  366. self.query_url_passed = True
  367. else:
  368. self.query_url_passed = False
  369. if (not self.validateQueryOneOrLess(output2, out, err) or
  370. not self.validateQueryFiveHundredOrMore(output3, out, err)):
  371. self.query_url_warn = True
  372. else:
  373. self.query_url_warn = False
  374. except (AttributeError, subprocess.CalledProcessError):
  375. err.write(textwrap.dedent("""
  376. -----------------------------------------------------
  377. Error: verify_urls raised exception (QUERY)
  378. -----------------------------------------------------
  379. {trace}
  380. """.format( trace=sys.exc_info()[:2])))
  381. err.flush()
  382. self.query_url_passed = False
  383. out.write("VALIDATING QUERY ... ")
  384. if self.query_url_passed:
  385. out.write("PASS\n\n")
  386. else:
  387. out.write("FAIL\n\n")
  388. out.flush
  389. # Fortune
  390. if self.runTests[self.FORTUNE]:
  391. out.write( "VERIFYING FORTUNE (" + self.fortune_url + ") ...\n" )
  392. out.flush()
  393. try:
  394. url = self.benchmarker.generate_url(self.fortune_url, self.port)
  395. output = self.__curl_url(url, self.FORTUNE, out, err)
  396. if self.validateFortune(output, out, err):
  397. self.fortune_url_passed = True
  398. else:
  399. self.fortune_url_passed = False
  400. except (AttributeError, subprocess.CalledProcessError):
  401. err.write(textwrap.dedent("""
  402. -----------------------------------------------------
  403. Error: verify_urls raised exception (FORTUNE)
  404. -----------------------------------------------------
  405. {trace}
  406. """.format( trace=sys.exc_info()[:2])))
  407. err.flush()
  408. self.fortune_url_passed = False
  409. out.write("VALIDATING FORTUNE ... ")
  410. if self.fortune_url_passed:
  411. out.write("PASS\n\n")
  412. else:
  413. out.write("FAIL\n\n")
  414. out.flush
  415. # Update
  416. if self.runTests[self.UPDATE]:
  417. out.write( "VERIFYING UPDATE (" + self.update_url + "2) ...\n" )
  418. out.flush()
  419. try:
  420. url = self.benchmarker.generate_url(self.update_url + "2", self.port)
  421. output = self.__curl_url(url, self.UPDATE, out, err)
  422. if self.validateUpdate(output, out, err):
  423. self.update_url_passed = True
  424. else:
  425. self.update_url_passed = False
  426. except (AttributeError, subprocess.CalledProcessError):
  427. err.write(textwrap.dedent("""
  428. -----------------------------------------------------
  429. Error: verify_urls raised exception (UPDATE)
  430. -----------------------------------------------------
  431. {trace}
  432. """.format( trace=sys.exc_info()[:2])))
  433. err.flush()
  434. self.update_url_passed = False
  435. out.write("VALIDATING UPDATE ... ")
  436. if self.update_url_passed:
  437. out.write("PASS\n\n")
  438. else:
  439. out.write("FAIL\n\n")
  440. out.flush
  441. # plaintext
  442. if self.runTests[self.PLAINTEXT]:
  443. out.write( "VERIFYING PLAINTEXT (" + self.plaintext_url + ") ...\n" )
  444. out.flush()
  445. try:
  446. url = self.benchmarker.generate_url(self.plaintext_url, self.port)
  447. output = self.__curl_url(url, self.PLAINTEXT, out, err)
  448. if self.validatePlaintext(output, out, err):
  449. self.plaintext_url_passed = True
  450. else:
  451. self.plaintext_url_passed = False
  452. except (AttributeError, subprocess.CalledProcessError):
  453. err.write(textwrap.dedent("""
  454. -----------------------------------------------------
  455. Error: verify_urls raised exception (PLAINTEXT)
  456. -----------------------------------------------------
  457. {trace}
  458. """.format( trace=sys.exc_info()[:2])))
  459. err.flush()
  460. self.plaintext_url_passed = False
  461. out.write("VALIDATING PLAINTEXT ... ")
  462. if self.plaintext_url_passed:
  463. out.write("PASS\n\n")
  464. else:
  465. out.write("FAIL\n\n")
  466. out.flush
  467. ############################################################
  468. # End verify_urls
  469. ############################################################
  470. ############################################################
  471. # contains_type(type)
  472. # true if this test contains an implementation of the given
  473. # test type (json, db, etc.)
  474. ############################################################
  475. def contains_type(self, type):
  476. try:
  477. if type == self.JSON and self.json_url != None:
  478. return True
  479. if type == self.DB and self.db_url != None:
  480. return True
  481. if type == self.QUERY and self.query_url != None:
  482. return True
  483. if type == self.FORTUNE and self.fortune_url != None:
  484. return True
  485. if type == self.UPDATE and self.update_url != None:
  486. return True
  487. if type == self.PLAINTEXT and self.plaintext_url != None:
  488. return True
  489. except AttributeError:
  490. pass
  491. return False
  492. ############################################################
  493. # End stop
  494. ############################################################
  495. ############################################################
  496. # benchmark
  497. # Runs the benchmark for each type of test that it implements
  498. # JSON/DB/Query.
  499. ############################################################
  500. def benchmark(self, out, err):
  501. # JSON
  502. if self.runTests[self.JSON]:
  503. try:
  504. out.write("BENCHMARKING JSON ... ")
  505. out.flush()
  506. results = None
  507. output_file = self.benchmarker.output_file(self.name, self.JSON)
  508. if not os.path.exists(output_file):
  509. with open(output_file, 'w'):
  510. # Simply opening the file in write mode should create the empty file.
  511. pass
  512. if self.json_url_passed:
  513. remote_script = self.__generate_concurrency_script(self.json_url, self.port, self.accept_json)
  514. self.__run_benchmark(remote_script, output_file, err)
  515. results = self.__parse_test(self.JSON)
  516. self.benchmarker.report_results(framework=self, test=self.JSON, results=results['results'])
  517. out.write( "Complete\n" )
  518. out.flush()
  519. except AttributeError:
  520. pass
  521. # DB
  522. if self.runTests[self.DB]:
  523. try:
  524. out.write("BENCHMARKING DB ... ")
  525. out.flush()
  526. results = None
  527. output_file = self.benchmarker.output_file(self.name, self.DB)
  528. if not os.path.exists(output_file):
  529. with open(output_file, 'w'):
  530. # Simply opening the file in write mode should create the empty file.
  531. pass
  532. if self.db_url_passed:
  533. remote_script = self.__generate_concurrency_script(self.db_url, self.port, self.accept_json)
  534. self.__run_benchmark(remote_script, output_file, err)
  535. results = self.__parse_test(self.DB)
  536. self.benchmarker.report_results(framework=self, test=self.DB, results=results['results'])
  537. out.write( "Complete\n" )
  538. except AttributeError:
  539. pass
  540. # Query
  541. if self.runTests[self.QUERY]:
  542. try:
  543. out.write("BENCHMARKING Query ... ")
  544. out.flush()
  545. results = None
  546. output_file = self.benchmarker.output_file(self.name, self.QUERY)
  547. if not os.path.exists(output_file):
  548. with open(output_file, 'w'):
  549. # Simply opening the file in write mode should create the empty file.
  550. pass
  551. if self.query_url_passed:
  552. remote_script = self.__generate_query_script(self.query_url, self.port, self.accept_json)
  553. self.__run_benchmark(remote_script, output_file, err)
  554. results = self.__parse_test(self.QUERY)
  555. self.benchmarker.report_results(framework=self, test=self.QUERY, results=results['results'])
  556. out.write( "Complete\n" )
  557. out.flush()
  558. except AttributeError:
  559. pass
  560. # fortune
  561. if self.runTests[self.FORTUNE]:
  562. try:
  563. out.write("BENCHMARKING Fortune ... ")
  564. out.flush()
  565. results = None
  566. output_file = self.benchmarker.output_file(self.name, self.FORTUNE)
  567. if not os.path.exists(output_file):
  568. with open(output_file, 'w'):
  569. # Simply opening the file in write mode should create the empty file.
  570. pass
  571. if self.fortune_url_passed:
  572. remote_script = self.__generate_concurrency_script(self.fortune_url, self.port, self.accept_html)
  573. self.__run_benchmark(remote_script, output_file, err)
  574. results = self.__parse_test(self.FORTUNE)
  575. self.benchmarker.report_results(framework=self, test=self.FORTUNE, results=results['results'])
  576. out.write( "Complete\n" )
  577. out.flush()
  578. except AttributeError:
  579. pass
  580. # update
  581. if self.runTests[self.UPDATE]:
  582. try:
  583. out.write("BENCHMARKING Update ... ")
  584. out.flush()
  585. results = None
  586. output_file = self.benchmarker.output_file(self.name, self.UPDATE)
  587. if not os.path.exists(output_file):
  588. with open(output_file, 'w'):
  589. # Simply opening the file in write mode should create the empty file.
  590. pass
  591. if self.update_url_passed:
  592. remote_script = self.__generate_query_script(self.update_url, self.port, self.accept_json)
  593. self.__run_benchmark(remote_script, output_file, err)
  594. results = self.__parse_test(self.UPDATE)
  595. self.benchmarker.report_results(framework=self, test=self.UPDATE, results=results['results'])
  596. out.write( "Complete\n" )
  597. out.flush()
  598. except AttributeError:
  599. pass
  600. # plaintext
  601. if self.runTests[self.PLAINTEXT]:
  602. try:
  603. out.write("BENCHMARKING Plaintext ... ")
  604. out.flush()
  605. results = None
  606. output_file = self.benchmarker.output_file(self.name, self.PLAINTEXT)
  607. if not os.path.exists(output_file):
  608. with open(output_file, 'w'):
  609. # Simply opening the file in write mode should create the empty file.
  610. pass
  611. if self.plaintext_url_passed:
  612. remote_script = self.__generate_concurrency_script(self.plaintext_url, self.port, self.accept_plaintext, wrk_command="wrk-pipeline", intervals=[256,1024,4096,16384], pipeline="--pipeline 16")
  613. self.__run_benchmark(remote_script, output_file, err)
  614. results = self.__parse_test(self.PLAINTEXT)
  615. self.benchmarker.report_results(framework=self, test=self.PLAINTEXT, results=results['results'])
  616. out.write( "Complete\n" )
  617. out.flush()
  618. except AttributeError:
  619. traceback.print_exc()
  620. pass
  621. ############################################################
  622. # End benchmark
  623. ############################################################
  624. ############################################################
  625. # parse_all
  626. # Method meant to be run for a given timestamp
  627. ############################################################
  628. def parse_all(self):
  629. # JSON
  630. if os.path.exists(self.benchmarker.get_output_file(self.name, self.JSON)):
  631. results = self.__parse_test(self.JSON)
  632. self.benchmarker.report_results(framework=self, test=self.JSON, results=results['results'])
  633. # DB
  634. if os.path.exists(self.benchmarker.get_output_file(self.name, self.DB)):
  635. results = self.__parse_test(self.DB)
  636. self.benchmarker.report_results(framework=self, test=self.DB, results=results['results'])
  637. # Query
  638. if os.path.exists(self.benchmarker.get_output_file(self.name, self.QUERY)):
  639. results = self.__parse_test(self.QUERY)
  640. self.benchmarker.report_results(framework=self, test=self.QUERY, results=results['results'])
  641. # Fortune
  642. if os.path.exists(self.benchmarker.get_output_file(self.name, self.FORTUNE)):
  643. results = self.__parse_test(self.FORTUNE)
  644. self.benchmarker.report_results(framework=self, test=self.FORTUNE, results=results['results'])
  645. # Update
  646. if os.path.exists(self.benchmarker.get_output_file(self.name, self.UPDATE)):
  647. results = self.__parse_test(self.UPDATE)
  648. self.benchmarker.report_results(framework=self, test=self.UPDATE, results=results['results'])
  649. # Plaintext
  650. if os.path.exists(self.benchmarker.get_output_file(self.name, self.PLAINTEXT)):
  651. results = self.__parse_test(self.PLAINTEXT)
  652. self.benchmarker.report_results(framework=self, test=self.PLAINTEXT, results=results['results'])
  653. ############################################################
  654. # End parse_all
  655. ############################################################
  656. ############################################################
  657. # __parse_test(test_type)
  658. ############################################################
  659. def __parse_test(self, test_type):
  660. try:
  661. results = dict()
  662. results['results'] = []
  663. if os.path.exists(self.benchmarker.get_output_file(self.name, test_type)):
  664. with open(self.benchmarker.output_file(self.name, test_type)) as raw_data:
  665. is_warmup = True
  666. rawData = None
  667. for line in raw_data:
  668. if "Queries:" in line or "Concurrency:" in line:
  669. is_warmup = False
  670. rawData = None
  671. continue
  672. if "Warmup" in line or "Primer" in line:
  673. is_warmup = True
  674. continue
  675. if not is_warmup:
  676. if rawData == None:
  677. rawData = dict()
  678. results['results'].append(rawData)
  679. #if "Requests/sec:" in line:
  680. # m = re.search("Requests/sec:\s+([0-9]+)", line)
  681. # rawData['reportedResults'] = m.group(1)
  682. # search for weighttp data such as succeeded and failed.
  683. if "Latency" in line:
  684. m = re.findall("([0-9]+\.*[0-9]*[us|ms|s|m|%]+)", line)
  685. if len(m) == 4:
  686. rawData['latencyAvg'] = m[0]
  687. rawData['latencyStdev'] = m[1]
  688. rawData['latencyMax'] = m[2]
  689. # rawData['latencyStdevPercent'] = m[3]
  690. #if "Req/Sec" in line:
  691. # m = re.findall("([0-9]+\.*[0-9]*[k|%]*)", line)
  692. # if len(m) == 4:
  693. # rawData['requestsAvg'] = m[0]
  694. # rawData['requestsStdev'] = m[1]
  695. # rawData['requestsMax'] = m[2]
  696. # rawData['requestsStdevPercent'] = m[3]
  697. #if "requests in" in line:
  698. # m = re.search("requests in ([0-9]+\.*[0-9]*[ms|s|m|h]+)", line)
  699. # if m != None:
  700. # # parse out the raw time, which may be in minutes or seconds
  701. # raw_time = m.group(1)
  702. # if "ms" in raw_time:
  703. # rawData['total_time'] = float(raw_time[:len(raw_time)-2]) / 1000.0
  704. # elif "s" in raw_time:
  705. # rawData['total_time'] = float(raw_time[:len(raw_time)-1])
  706. # elif "m" in raw_time:
  707. # rawData['total_time'] = float(raw_time[:len(raw_time)-1]) * 60.0
  708. # elif "h" in raw_time:
  709. # rawData['total_time'] = float(raw_time[:len(raw_time)-1]) * 3600.0
  710. if "requests in" in line:
  711. m = re.search("([0-9]+) requests in", line)
  712. if m != None:
  713. rawData['totalRequests'] = int(m.group(1))
  714. if "Socket errors" in line:
  715. if "connect" in line:
  716. m = re.search("connect ([0-9]+)", line)
  717. rawData['connect'] = int(m.group(1))
  718. if "read" in line:
  719. m = re.search("read ([0-9]+)", line)
  720. rawData['read'] = int(m.group(1))
  721. if "write" in line:
  722. m = re.search("write ([0-9]+)", line)
  723. rawData['write'] = int(m.group(1))
  724. if "timeout" in line:
  725. m = re.search("timeout ([0-9]+)", line)
  726. rawData['timeout'] = int(m.group(1))
  727. if "Non-2xx" in line:
  728. m = re.search("Non-2xx or 3xx responses: ([0-9]+)", line)
  729. if m != None:
  730. rawData['5xx'] = int(m.group(1))
  731. return results
  732. except IOError:
  733. return None
  734. ############################################################
  735. # End benchmark
  736. ############################################################
  737. ##########################################################################################
  738. # Private Methods
  739. ##########################################################################################
  740. ############################################################
  741. # __run_benchmark(script, output_file)
  742. # Runs a single benchmark using the script which is a bash
  743. # template that uses weighttp to run the test. All the results
  744. # outputed to the output_file.
  745. ############################################################
  746. def __run_benchmark(self, script, output_file, err):
  747. with open(output_file, 'w') as raw_file:
  748. p = subprocess.Popen(self.benchmarker.client_ssh_string.split(" "), stdin=subprocess.PIPE, stdout=raw_file, stderr=err)
  749. p.communicate(script)
  750. err.flush()
  751. ############################################################
  752. # End __run_benchmark
  753. ############################################################
  754. ############################################################
  755. # __generate_concurrency_script(url, port)
  756. # Generates the string containing the bash script that will
  757. # be run on the client to benchmark a single test. This
  758. # specifically works for the variable concurrency tests (JSON
  759. # and DB)
  760. ############################################################
  761. def __generate_concurrency_script(self, url, port, accept_header, wrk_command="wrk", intervals=[], pipeline=""):
  762. if len(intervals) == 0:
  763. intervals = self.benchmarker.concurrency_levels
  764. headers = self.__get_request_headers(accept_header)
  765. return self.concurrency_template.format(max_concurrency=self.benchmarker.max_concurrency,
  766. max_threads=self.benchmarker.max_threads, name=self.name, duration=self.benchmarker.duration,
  767. interval=" ".join("{}".format(item) for item in intervals),
  768. server_host=self.benchmarker.server_host, port=port, url=url, headers=headers, wrk=wrk_command,
  769. pipeline=pipeline)
  770. ############################################################
  771. # End __generate_concurrency_script
  772. ############################################################
  773. ############################################################
  774. # __generate_query_script(url, port)
  775. # Generates the string containing the bash script that will
  776. # be run on the client to benchmark a single test. This
  777. # specifically works for the variable query tests (Query)
  778. ############################################################
  779. def __generate_query_script(self, url, port, accept_header):
  780. headers = self.__get_request_headers(accept_header)
  781. return self.query_template.format(max_concurrency=self.benchmarker.max_concurrency,
  782. max_threads=self.benchmarker.max_threads, name=self.name, duration=self.benchmarker.duration,
  783. interval=" ".join("{}".format(item) for item in self.benchmarker.query_intervals),
  784. server_host=self.benchmarker.server_host, port=port, url=url, headers=headers)
  785. ############################################################
  786. # End __generate_query_script
  787. ############################################################
  788. ############################################################
  789. # __get_request_headers(accept_header)
  790. # Generates the complete HTTP header string
  791. ############################################################
  792. def __get_request_headers(self, accept_header):
  793. return self.headers_template.format(accept=accept_header)
  794. ############################################################
  795. # End __format_request_headers
  796. ############################################################
  797. ############################################################
  798. # __curl_url
  799. # Dump HTTP response and headers. Throw exception if there
  800. # is an HTTP error.
  801. ############################################################
  802. def __curl_url(self, url, testType, out, err):
  803. # Use -i to output response with headers.
  804. # Don't use -f so that the HTTP response code is ignored.
  805. # Use --stderr - to redirect stderr to stdout so we get
  806. # error output for sure in stdout.
  807. # Use -sS to hide progress bar, but show errors.
  808. subprocess.check_call(["curl", "-i", "-sS", url], stderr=err, stdout=out)
  809. # HTTP output may not end in a newline, so add that here.
  810. out.write( "\n\n" )
  811. out.flush()
  812. err.flush()
  813. # We need to get the respond body from the curl and return it.
  814. p = subprocess.Popen(["curl", "-s", url], stdout=subprocess.PIPE)
  815. output = p.communicate()
  816. # In the curl invocation above we could not use -f because
  817. # then the HTTP response would not be output, so use -f in
  818. # an additional invocation so that if there is an HTTP error,
  819. # subprocess.CalledProcessError will be thrown. Note that this
  820. # uses check_output() instead of check_call() so that we can
  821. # ignore the HTTP response because we already output that in
  822. # the first curl invocation.
  823. subprocess.check_output(["curl", "-fsS", url], stderr=err)
  824. err.flush()
  825. if output:
  826. # We have the response body - return it
  827. return output[0]
  828. ##############################################################
  829. # End __curl_url
  830. ##############################################################
  831. ##########################################################################################
  832. # Constructor
  833. ##########################################################################################
  834. def __init__(self, name, directory, benchmarker, runTests, args):
  835. self.name = name
  836. self.directory = directory
  837. self.benchmarker = benchmarker
  838. self.runTests = runTests
  839. self.__dict__.update(args)
  840. # ensure directory has __init__.py file so that we can use it as a Python package
  841. if not os.path.exists(os.path.join(directory, "__init__.py")):
  842. open(os.path.join(directory, "__init__.py"), 'w').close()
  843. self.setup_module = setup_module = importlib.import_module(directory + '.' + self.setup_file)
  844. ############################################################
  845. # End __init__
  846. ############################################################
  847. ############################################################
  848. # End FrameworkTest
  849. ############################################################
  850. ##########################################################################################
  851. # Static methods
  852. ##########################################################################################
  853. ##############################################################
  854. # parse_config(config, directory, benchmarker)
  855. # parses a config file and returns a list of FrameworkTest
  856. # objects based on that config file.
  857. ##############################################################
  858. def parse_config(config, directory, benchmarker):
  859. tests = []
  860. # The config object can specify multiple tests, we neep to loop
  861. # over them and parse them out
  862. for test in config['tests']:
  863. for key, value in test.iteritems():
  864. test_name = config['framework']
  865. runTests = dict()
  866. runTests["json"] = (benchmarker.type == "all" or benchmarker.type == "json") and value.get("json_url", False)
  867. runTests["db"] = (benchmarker.type == "all" or benchmarker.type == "db") and value.get("db_url", False)
  868. runTests["query"] = (benchmarker.type == "all" or benchmarker.type == "query") and value.get("query_url", False)
  869. runTests["fortune"] = (benchmarker.type == "all" or benchmarker.type == "fortune") and value.get("fortune_url", False)
  870. runTests["update"] = (benchmarker.type == "all" or benchmarker.type == "update") and value.get("update_url", False)
  871. runTests["plaintext"] = (benchmarker.type == "all" or benchmarker.type == "plaintext") and value.get("plaintext_url", False)
  872. # if the test uses the 'defualt' keywork, then we don't
  873. # append anything to it's name. All configs should only have 1 default
  874. if key != 'default':
  875. # we need to use the key in the test_name
  876. test_name = test_name + "-" + key
  877. tests.append(FrameworkTest(test_name, directory, benchmarker, runTests, value))
  878. return tests
  879. ##############################################################
  880. # End parse_config
  881. ##############################################################