framework_test.py 37 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921
  1. from benchmark.fortune_html_parser import FortuneHTMLParser
  2. import importlib
  3. import os
  4. import subprocess
  5. import time
  6. import re
  7. import pprint
  8. import sys
  9. import traceback
  10. import json
  11. import textwrap
  12. class FrameworkTest:
  13. ##########################################################################################
  14. # Class variables
  15. ##########################################################################################
  16. headers_template = "-H 'Host: localhost' -H '{accept}' -H 'Connection: keep-alive'"
  17. headers_full_template = "-H 'Host: localhost' -H '{accept}' -H 'Accept-Language: en-US,en;q=0.5' -H 'User-Agent: Mozilla/5.0 (X11; Linux x86_64) Gecko/20130501 Firefox/30.0 AppleWebKit/600.00 Chrome/30.0.0000.0 Trident/10.0 Safari/600.00' -H 'Cookie: uid=12345678901234567890; __utma=1.1234567890.1234567890.1234567890.1234567890.12; wd=2560x1600' -H 'Connection: keep-alive'"
  18. accept_json = "Accept: application/json,text/html;q=0.9,application/xhtml+xml;q=0.9,application/xml;q=0.8,*/*;q=0.7"
  19. accept_html = "Accept: text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8"
  20. accept_plaintext = "Accept: text/plain,text/html;q=0.9,application/xhtml+xml;q=0.9,application/xml;q=0.8,*/*;q=0.7"
  21. concurrency_template = """
  22. echo ""
  23. echo "---------------------------------------------------------"
  24. echo " Running Primer {name}"
  25. echo " {wrk} {headers} -d 5 -c 8 -t 8 \"http://{server_host}:{port}{url}\""
  26. echo "---------------------------------------------------------"
  27. echo ""
  28. {wrk} {headers} -d 5 -c 8 -t 8 "http://{server_host}:{port}{url}"
  29. sleep 5
  30. echo ""
  31. echo "---------------------------------------------------------"
  32. echo " Running Warmup {name}"
  33. echo " {wrk} {headers} -d {duration} -c {max_concurrency} -t {max_threads} \"http://{server_host}:{port}{url}\""
  34. echo "---------------------------------------------------------"
  35. echo ""
  36. {wrk} {headers} -d {duration} -c {max_concurrency} -t {max_threads} "http://{server_host}:{port}{url}"
  37. sleep 5
  38. for c in {interval}
  39. do
  40. echo ""
  41. echo "---------------------------------------------------------"
  42. echo " Concurrency: $c for {name}"
  43. echo " {wrk} {headers} {pipeline} -d {duration} -c $c -t $(($c>{max_threads}?{max_threads}:$c)) \"http://{server_host}:{port}{url}\""
  44. echo "---------------------------------------------------------"
  45. echo ""
  46. {wrk} {headers} {pipeline} -d {duration} -c "$c" -t "$(($c>{max_threads}?{max_threads}:$c))" http://{server_host}:{port}{url}
  47. sleep 2
  48. done
  49. """
  50. query_template = """
  51. echo ""
  52. echo "---------------------------------------------------------"
  53. echo " Running Primer {name}"
  54. echo " wrk {headers} -d 5 -c 8 -t 8 \"http://{server_host}:{port}{url}2\""
  55. echo "---------------------------------------------------------"
  56. echo ""
  57. wrk {headers} -d 5 -c 8 -t 8 "http://{server_host}:{port}{url}2"
  58. sleep 5
  59. echo ""
  60. echo "---------------------------------------------------------"
  61. echo " Running Warmup {name}"
  62. echo " wrk {headers} -d {duration} -c {max_concurrency} -t {max_threads} \"http://{server_host}:{port}{url}2\""
  63. echo "---------------------------------------------------------"
  64. echo ""
  65. wrk {headers} -d {duration} -c {max_concurrency} -t {max_threads} "http://{server_host}:{port}{url}2"
  66. sleep 5
  67. for c in {interval}
  68. do
  69. echo ""
  70. echo "---------------------------------------------------------"
  71. echo " Queries: $c for {name}"
  72. echo " wrk {headers} -d {duration} -c {max_concurrency} -t {max_threads} \"http://{server_host}:{port}{url}$c\""
  73. echo "---------------------------------------------------------"
  74. echo ""
  75. wrk {headers} -d {duration} -c {max_concurrency} -t {max_threads} "http://{server_host}:{port}{url}$c"
  76. sleep 2
  77. done
  78. """
  79. language = None
  80. platform = None
  81. webserver = None
  82. classification = None
  83. database = None
  84. approach = None
  85. orm = None
  86. framework = None
  87. os = None
  88. database_os = None
  89. display_name = None
  90. notes = None
  91. versus = None
  92. ############################################################
  93. # Test Variables
  94. ############################################################
  95. JSON = "json"
  96. DB = "db"
  97. QUERY = "query"
  98. FORTUNE = "fortune"
  99. UPDATE = "update"
  100. PLAINTEXT = "plaintext"
  101. ##########################################################################################
  102. # Public Methods
  103. ##########################################################################################
  104. ############################################################
  105. # Validates the jsonString is a JSON object with a 'message'
  106. # key with the value "hello, world!" (case-insensitive).
  107. ############################################################
  108. def validateJson(self, jsonString, out, err):
  109. try:
  110. obj = json.loads(jsonString)
  111. if obj["message"].lower() == "hello, world!":
  112. return True
  113. except:
  114. err.write(textwrap.dedent("""
  115. -----------------------------------------------------
  116. Error: validateJson raised exception
  117. -----------------------------------------------------
  118. {trace}
  119. """.format( trace=sys.exc_info()[:2])))
  120. return False
  121. ############################################################
  122. # Validates the jsonString is a JSON object that has an "id"
  123. # and a "randomNumber" key, and that both keys map to
  124. # integers.
  125. ############################################################
  126. def validateDb(self, jsonString, out, err):
  127. try:
  128. obj = json.loads(jsonString)
  129. # This will error out of the value could not parsed to a
  130. # float (this will work with ints, but it will turn them
  131. # into their float equivalent; i.e. "123" => 123.0)
  132. if (type(float(obj["id"])) == float and
  133. type(float(obj["randomNumber"])) == float):
  134. return True
  135. except:
  136. err.write(textwrap.dedent("""
  137. -----------------------------------------------------
  138. Error: validateDb raised exception
  139. -----------------------------------------------------
  140. {trace}
  141. """.format( trace=sys.exc_info()[:2])))
  142. return False
  143. ############################################################
  144. # Validates the jsonString is an array with a length of
  145. # 2, that each entry in the array is a JSON object, that
  146. # each object has an "id" and a "randomNumber" key, and that
  147. # both keys map to integers.
  148. ############################################################
  149. def validateQuery(self, jsonString, out, err):
  150. try:
  151. arr = json.loads(jsonString)
  152. if (type(float(arr[0]["id"])) == float and
  153. type(float(arr[0]["randomNumber"])) == float and
  154. type(float(arr[1]["id"])) == float and
  155. type(float(arr[1]["randomNumber"])) == float):
  156. return True
  157. except:
  158. err.write(textwrap.dedent("""
  159. -----------------------------------------------------
  160. Error: validateQuery raised exception
  161. -----------------------------------------------------
  162. {trace}
  163. """.format( trace=sys.exc_info()[:2])))
  164. return False
  165. ############################################################
  166. # Validates the jsonString is an array with a length of
  167. # 1, that each entry in the array is a JSON object, that
  168. # each object has an "id" and a "randomNumber" key, and that
  169. # both keys map to integers.
  170. ############################################################
  171. def validateQueryOneOrLess(self, jsonString, out, err):
  172. try:
  173. arr = json.loads(jsonString)
  174. if len(arr) != 1:
  175. return False
  176. for obj in arr:
  177. if (type(float(obj["id"])) != float or
  178. type(float(obj["randomNumber"])) != float or
  179. type(float(obj["id"])) != float or
  180. type(float(obj["randomNumber"])) != float):
  181. return False
  182. # By here, it's passed validation
  183. return True
  184. except:
  185. err.write(textwrap.dedent("""
  186. -----------------------------------------------------
  187. Error: validateQuery raised exception
  188. -----------------------------------------------------
  189. {trace}
  190. """.format( trace=sys.exc_info()[:2])))
  191. return False
  192. ############################################################
  193. # Validates the jsonString is an array with a length of
  194. # 500, that each entry in the array is a JSON object, that
  195. # each object has an "id" and a "randomNumber" key, and that
  196. # both keys map to integers.
  197. ############################################################
  198. def validateQueryFiveHundredOrMore(self, jsonString, out, err):
  199. try:
  200. arr = json.loads(jsonString)
  201. if len(arr) != 500:
  202. return False
  203. for obj in arr:
  204. if (type(float(obj["id"])) != float or
  205. type(float(obj["randomNumber"])) != float or
  206. type(float(obj["id"])) != float or
  207. type(float(obj["randomNumber"])) != float):
  208. return False
  209. # By here, it's passed validation
  210. return True
  211. except:
  212. err.write(textwrap.dedent("""
  213. -----------------------------------------------------
  214. Error: validateQuery raised exception
  215. -----------------------------------------------------
  216. {trace}
  217. """.format( trace=sys.exc_info()[:2])))
  218. return False
  219. ############################################################
  220. # Parses the given HTML string and asks a FortuneHTMLParser
  221. # whether the parsed string is a valid fortune return.
  222. ############################################################
  223. def validateFortune(self, htmlString, out, err):
  224. try:
  225. parser = FortuneHTMLParser()
  226. parser.feed(htmlString)
  227. return parser.isValidFortune()
  228. except:
  229. err.write(textwrap.dedent("""
  230. -----------------------------------------------------
  231. Error: validateFortune raised exception
  232. -----------------------------------------------------
  233. {trace}
  234. """.format( trace=sys.exc_info()[:2])))
  235. return False
  236. ############################################################
  237. # Validates the jsonString is an array with a length of
  238. # 2, that each entry in the array is a JSON object, that
  239. # each object has an "id" and a "randomNumber" key, and that
  240. # both keys map to integers.
  241. ############################################################
  242. def validateUpdate(self, jsonString, out, err):
  243. try:
  244. arr = json.loads(jsonString)
  245. if (type(float(arr[0]["id"])) == float and
  246. type(float(arr[0]["randomNumber"])) == float and
  247. type(float(arr[1]["id"])) == float and
  248. type(float(arr[1]["randomNumber"])) == float):
  249. return True
  250. except:
  251. err.write(textwrap.dedent("""
  252. -----------------------------------------------------
  253. Error: validateUpdate raised exception
  254. -----------------------------------------------------
  255. {trace}
  256. """.format( trace=sys.exc_info()[:2])))
  257. return False
  258. ############################################################
  259. #
  260. ############################################################
  261. def validatePlaintext(self, jsonString, out, err):
  262. try:
  263. return jsonString.lower().strip() == "hello, world!"
  264. except:
  265. err.write(textwrap.dedent("""
  266. -----------------------------------------------------
  267. Error: validatePlaintext raised exception
  268. -----------------------------------------------------
  269. {trace}
  270. """.format( trace=sys.exc_info()[:2])))
  271. return False
  272. ############################################################
  273. # start(benchmarker)
  274. # Start the test using it's setup file
  275. ############################################################
  276. def start(self, out, err):
  277. return self.setup_module.start(self.benchmarker, out, err)
  278. ############################################################
  279. # End start
  280. ############################################################
  281. ############################################################
  282. # stop(benchmarker)
  283. # Stops the test using it's setup file
  284. ############################################################
  285. def stop(self, out, err):
  286. return self.setup_module.stop(out, err)
  287. ############################################################
  288. # End stop
  289. ############################################################
  290. ############################################################
  291. # verify_urls
  292. # Verifys each of the URLs for this test. THis will sinply
  293. # curl the URL and check for it's return status.
  294. # For each url, a flag will be set on this object for whether
  295. # or not it passed
  296. ############################################################
  297. def verify_urls(self, out, err):
  298. # JSON
  299. if self.runTests[self.JSON]:
  300. out.write( "VERIFYING JSON (" + self.json_url + ") ...\n" )
  301. out.flush()
  302. try:
  303. url = self.benchmarker.generate_url(self.json_url, self.port)
  304. output = self.__curl_url(url, self.JSON, out, err)
  305. if self.validateJson(output, out, err):
  306. self.json_url_passed = True
  307. else:
  308. self.json_url_passed = False
  309. except (AttributeError, subprocess.CalledProcessError) as e:
  310. self.json_url_passed = False
  311. out.write("VALIDATING JSON ... ")
  312. if self.json_url_passed:
  313. out.write("PASS\n\n")
  314. else:
  315. out.write("FAIL\n\n")
  316. out.flush
  317. # DB
  318. if self.runTests[self.DB]:
  319. out.write( "VERIFYING DB (" + self.db_url + ") ...\n" )
  320. out.flush()
  321. try:
  322. url = self.benchmarker.generate_url(self.db_url, self.port)
  323. output = self.__curl_url(url, self.DB, out, err)
  324. if self.validateDb(output, out, err):
  325. self.db_url_passed = True
  326. else:
  327. self.db_url_passed = False
  328. except (AttributeError, subprocess.CalledProcessError) as e:
  329. self.db_url_passed = False
  330. out.write("VALIDATING DB ... ")
  331. if self.db_url_passed:
  332. out.write("PASS\n\n")
  333. else:
  334. out.write("FAIL\n\n")
  335. out.flush
  336. # Query
  337. if self.runTests[self.QUERY]:
  338. out.write( "VERIFYING QUERY (" + self.query_url + "2) ...\n" )
  339. out.flush()
  340. try:
  341. url = self.benchmarker.generate_url(self.query_url + "2", self.port)
  342. output = self.__curl_url(url, self.QUERY, out, err)
  343. url2 = self.benchmarker.generate_url(self.query_url + "0", self.port)
  344. output2 = self.__curl_url(url2, self.QUERY, out, err)
  345. url3 = self.benchmarker.generate_url(self.query_url + "501", self.port)
  346. output3 = self.__curl_url(url3, self.QUERY, out, err)
  347. if (self.validateQuery(output, out, err) and
  348. self.validateQueryOneOrLess(output2, out, err) and
  349. self.validateQueryFiveHundredOrMore(output3, out, err)):
  350. self.query_url_passed = True
  351. else:
  352. self.query_url_passed = False
  353. except (AttributeError, subprocess.CalledProcessError) as e:
  354. self.query_url_passed = False
  355. out.write("VALIDATING QUERY ... ")
  356. if self.query_url_passed:
  357. out.write("PASS\n\n")
  358. else:
  359. out.write("FAIL\n\n")
  360. out.flush
  361. # Fortune
  362. if self.runTests[self.FORTUNE]:
  363. out.write( "VERIFYING FORTUNE (" + self.fortune_url + ") ...\n" )
  364. out.flush()
  365. try:
  366. url = self.benchmarker.generate_url(self.fortune_url, self.port)
  367. output = self.__curl_url(url, self.FORTUNE, out, err)
  368. if self.validateFortune(output, out, err):
  369. self.fortune_url_passed = True
  370. else:
  371. self.fortune_url_passed = False
  372. except (AttributeError, subprocess.CalledProcessError) as e:
  373. self.fortune_url_passed = False
  374. out.write("VALIDATING FORTUNE ... ")
  375. if self.fortune_url_passed:
  376. out.write("PASS\n\n")
  377. else:
  378. out.write("FAIL\n\n")
  379. out.flush
  380. # Update
  381. if self.runTests[self.UPDATE]:
  382. out.write( "VERIFYING UPDATE (" + self.update_url + "2) ...\n" )
  383. out.flush()
  384. try:
  385. url = self.benchmarker.generate_url(self.update_url + "2", self.port)
  386. output = self.__curl_url(url, self.UPDATE, out, err)
  387. if self.validateUpdate(output, out, err):
  388. self.update_url_passed = True
  389. else:
  390. self.update_url_passed = False
  391. except (AttributeError, subprocess.CalledProcessError) as e:
  392. self.update_url_passed = False
  393. out.write("VALIDATING UPDATE ... ")
  394. if self.update_url_passed:
  395. out.write("PASS\n\n")
  396. else:
  397. out.write("FAIL\n\n")
  398. out.flush
  399. # plaintext
  400. if self.runTests[self.PLAINTEXT]:
  401. out.write( "VERIFYING PLAINTEXT (" + self.plaintext_url + ") ...\n" )
  402. out.flush()
  403. try:
  404. url = self.benchmarker.generate_url(self.plaintext_url, self.port)
  405. output = self.__curl_url(url, self.PLAINTEXT, out, err)
  406. if self.validatePlaintext(output, out, err):
  407. self.plaintext_url_passed = True
  408. else:
  409. self.plaintext_url_passed = False
  410. except (AttributeError, subprocess.CalledProcessError) as e:
  411. self.plaintext_url_passed = False
  412. out.write("VALIDATING PLAINTEXT ... ")
  413. if self.plaintext_url_passed:
  414. out.write("PASS\n\n")
  415. else:
  416. out.write("FAIL\n\n")
  417. out.flush
  418. ############################################################
  419. # End verify_urls
  420. ############################################################
  421. ############################################################
  422. # contains_type(type)
  423. # true if this test contains an implementation of the given
  424. # test type (json, db, etc.)
  425. ############################################################
  426. def contains_type(self, type):
  427. try:
  428. if type == self.JSON and self.json_url != None:
  429. return True
  430. if type == self.DB and self.db_url != None:
  431. return True
  432. if type == self.QUERY and self.query_url != None:
  433. return True
  434. if type == self.FORTUNE and self.fortune_url != None:
  435. return True
  436. if type == self.UPDATE and self.update_url != None:
  437. return True
  438. if type == self.PLAINTEXT and self.plaintext_url != None:
  439. return True
  440. except AttributeError:
  441. pass
  442. return False
  443. ############################################################
  444. # End stop
  445. ############################################################
  446. ############################################################
  447. # benchmark
  448. # Runs the benchmark for each type of test that it implements
  449. # JSON/DB/Query.
  450. ############################################################
  451. def benchmark(self, out, err):
  452. # JSON
  453. if self.runTests[self.JSON]:
  454. try:
  455. out.write("BENCHMARKING JSON ... ")
  456. out.flush()
  457. results = None
  458. output_file = self.benchmarker.output_file(self.name, self.JSON)
  459. if not os.path.exists(output_file):
  460. with open(output_file, 'w'):
  461. # Simply opening the file in write mode should create the empty file.
  462. pass
  463. if self.json_url_passed:
  464. remote_script = self.__generate_concurrency_script(self.json_url, self.port, self.accept_json)
  465. self.__run_benchmark(remote_script, output_file, err)
  466. results = self.__parse_test(self.JSON)
  467. self.benchmarker.report_results(framework=self, test=self.JSON, results=results['results'])
  468. out.write( "Complete\n" )
  469. out.flush()
  470. except AttributeError:
  471. pass
  472. # DB
  473. if self.runTests[self.DB]:
  474. try:
  475. out.write("BENCHMARKING DB ... ")
  476. out.flush()
  477. results = None
  478. output_file = self.benchmarker.output_file(self.name, self.DB)
  479. if not os.path.exists(output_file):
  480. with open(output_file, 'w'):
  481. # Simply opening the file in write mode should create the empty file.
  482. pass
  483. if self.db_url_passed:
  484. remote_script = self.__generate_concurrency_script(self.db_url, self.port, self.accept_json)
  485. self.__run_benchmark(remote_script, output_file, err)
  486. results = self.__parse_test(self.DB)
  487. self.benchmarker.report_results(framework=self, test=self.DB, results=results['results'])
  488. out.write( "Complete\n" )
  489. except AttributeError:
  490. pass
  491. # Query
  492. if self.runTests[self.QUERY]:
  493. try:
  494. out.write("BENCHMARKING Query ... ")
  495. out.flush()
  496. results = None
  497. output_file = self.benchmarker.output_file(self.name, self.QUERY)
  498. if not os.path.exists(output_file):
  499. with open(output_file, 'w'):
  500. # Simply opening the file in write mode should create the empty file.
  501. pass
  502. if self.query_url_passed:
  503. remote_script = self.__generate_query_script(self.query_url, self.port, self.accept_json)
  504. self.__run_benchmark(remote_script, output_file, err)
  505. results = self.__parse_test(self.QUERY)
  506. self.benchmarker.report_results(framework=self, test=self.QUERY, results=results['results'])
  507. out.write( "Complete\n" )
  508. out.flush()
  509. except AttributeError:
  510. pass
  511. # fortune
  512. if self.runTests[self.FORTUNE]:
  513. try:
  514. out.write("BENCHMARKING Fortune ... ")
  515. out.flush()
  516. results = None
  517. output_file = self.benchmarker.output_file(self.name, self.FORTUNE)
  518. if not os.path.exists(output_file):
  519. with open(output_file, 'w'):
  520. # Simply opening the file in write mode should create the empty file.
  521. pass
  522. if self.fortune_url_passed:
  523. remote_script = self.__generate_concurrency_script(self.fortune_url, self.port, self.accept_html)
  524. self.__run_benchmark(remote_script, output_file, err)
  525. results = self.__parse_test(self.FORTUNE)
  526. self.benchmarker.report_results(framework=self, test=self.FORTUNE, results=results['results'])
  527. out.write( "Complete\n" )
  528. out.flush()
  529. except AttributeError:
  530. pass
  531. # update
  532. if self.runTests[self.UPDATE]:
  533. try:
  534. out.write("BENCHMARKING Update ... ")
  535. out.flush()
  536. results = None
  537. output_file = self.benchmarker.output_file(self.name, self.UPDATE)
  538. if not os.path.exists(output_file):
  539. with open(output_file, 'w'):
  540. # Simply opening the file in write mode should create the empty file.
  541. pass
  542. if self.update_url_passed:
  543. remote_script = self.__generate_query_script(self.update_url, self.port, self.accept_json)
  544. self.__run_benchmark(remote_script, output_file, err)
  545. results = self.__parse_test(self.UPDATE)
  546. self.benchmarker.report_results(framework=self, test=self.UPDATE, results=results['results'])
  547. out.write( "Complete\n" )
  548. out.flush()
  549. except AttributeError:
  550. pass
  551. # plaintext
  552. if self.runTests[self.PLAINTEXT]:
  553. try:
  554. out.write("BENCHMARKING Plaintext ... ")
  555. out.flush()
  556. results = None
  557. output_file = self.benchmarker.output_file(self.name, self.PLAINTEXT)
  558. if not os.path.exists(output_file):
  559. with open(output_file, 'w'):
  560. # Simply opening the file in write mode should create the empty file.
  561. pass
  562. if self.plaintext_url_passed:
  563. remote_script = self.__generate_concurrency_script(self.plaintext_url, self.port, self.accept_plaintext, wrk_command="wrk-pipeline", intervals=[256,1024,4096,16384], pipeline="--pipeline 16")
  564. self.__run_benchmark(remote_script, output_file, err)
  565. results = self.__parse_test(self.PLAINTEXT)
  566. self.benchmarker.report_results(framework=self, test=self.PLAINTEXT, results=results['results'])
  567. out.write( "Complete\n" )
  568. out.flush()
  569. except AttributeError:
  570. traceback.print_exc()
  571. pass
  572. ############################################################
  573. # End benchmark
  574. ############################################################
  575. ############################################################
  576. # parse_all
  577. # Method meant to be run for a given timestamp
  578. ############################################################
  579. def parse_all(self):
  580. # JSON
  581. if os.path.exists(self.benchmarker.get_output_file(self.name, self.JSON)):
  582. results = self.__parse_test(self.JSON)
  583. self.benchmarker.report_results(framework=self, test=self.JSON, results=results['results'])
  584. # DB
  585. if os.path.exists(self.benchmarker.get_output_file(self.name, self.DB)):
  586. results = self.__parse_test(self.DB)
  587. self.benchmarker.report_results(framework=self, test=self.DB, results=results['results'])
  588. # Query
  589. if os.path.exists(self.benchmarker.get_output_file(self.name, self.QUERY)):
  590. results = self.__parse_test(self.QUERY)
  591. self.benchmarker.report_results(framework=self, test=self.QUERY, results=results['results'])
  592. # Fortune
  593. if os.path.exists(self.benchmarker.get_output_file(self.name, self.FORTUNE)):
  594. results = self.__parse_test(self.FORTUNE)
  595. self.benchmarker.report_results(framework=self, test=self.FORTUNE, results=results['results'])
  596. # Update
  597. if os.path.exists(self.benchmarker.get_output_file(self.name, self.UPDATE)):
  598. results = self.__parse_test(self.UPDATE)
  599. self.benchmarker.report_results(framework=self, test=self.UPDATE, results=results['results'])
  600. # Plaintext
  601. if os.path.exists(self.benchmarker.get_output_file(self.name, self.PLAINTEXT)):
  602. results = self.__parse_test(self.PLAINTEXT)
  603. self.benchmarker.report_results(framework=self, test=self.PLAINTEXT, results=results['results'])
  604. ############################################################
  605. # End parse_all
  606. ############################################################
  607. ############################################################
  608. # __parse_test(test_type)
  609. ############################################################
  610. def __parse_test(self, test_type):
  611. try:
  612. results = dict()
  613. results['results'] = []
  614. if os.path.exists(self.benchmarker.get_output_file(self.name, test_type)):
  615. with open(self.benchmarker.output_file(self.name, test_type)) as raw_data:
  616. is_warmup = True
  617. rawData = None
  618. for line in raw_data:
  619. if "Queries:" in line or "Concurrency:" in line:
  620. is_warmup = False
  621. rawData = None
  622. continue
  623. if "Warmup" in line or "Primer" in line:
  624. is_warmup = True
  625. continue
  626. if not is_warmup:
  627. if rawData == None:
  628. rawData = dict()
  629. results['results'].append(rawData)
  630. #if "Requests/sec:" in line:
  631. # m = re.search("Requests/sec:\s+([0-9]+)", line)
  632. # rawData['reportedResults'] = m.group(1)
  633. # search for weighttp data such as succeeded and failed.
  634. if "Latency" in line:
  635. m = re.findall("([0-9]+\.*[0-9]*[us|ms|s|m|%]+)", line)
  636. if len(m) == 4:
  637. rawData['latencyAvg'] = m[0]
  638. rawData['latencyStdev'] = m[1]
  639. rawData['latencyMax'] = m[2]
  640. # rawData['latencyStdevPercent'] = m[3]
  641. #if "Req/Sec" in line:
  642. # m = re.findall("([0-9]+\.*[0-9]*[k|%]*)", line)
  643. # if len(m) == 4:
  644. # rawData['requestsAvg'] = m[0]
  645. # rawData['requestsStdev'] = m[1]
  646. # rawData['requestsMax'] = m[2]
  647. # rawData['requestsStdevPercent'] = m[3]
  648. #if "requests in" in line:
  649. # m = re.search("requests in ([0-9]+\.*[0-9]*[ms|s|m|h]+)", line)
  650. # if m != None:
  651. # # parse out the raw time, which may be in minutes or seconds
  652. # raw_time = m.group(1)
  653. # if "ms" in raw_time:
  654. # rawData['total_time'] = float(raw_time[:len(raw_time)-2]) / 1000.0
  655. # elif "s" in raw_time:
  656. # rawData['total_time'] = float(raw_time[:len(raw_time)-1])
  657. # elif "m" in raw_time:
  658. # rawData['total_time'] = float(raw_time[:len(raw_time)-1]) * 60.0
  659. # elif "h" in raw_time:
  660. # rawData['total_time'] = float(raw_time[:len(raw_time)-1]) * 3600.0
  661. if "requests in" in line:
  662. m = re.search("([0-9]+) requests in", line)
  663. if m != None:
  664. rawData['totalRequests'] = int(m.group(1))
  665. if "Socket errors" in line:
  666. if "connect" in line:
  667. m = re.search("connect ([0-9]+)", line)
  668. rawData['connect'] = int(m.group(1))
  669. if "read" in line:
  670. m = re.search("read ([0-9]+)", line)
  671. rawData['read'] = int(m.group(1))
  672. if "write" in line:
  673. m = re.search("write ([0-9]+)", line)
  674. rawData['write'] = int(m.group(1))
  675. if "timeout" in line:
  676. m = re.search("timeout ([0-9]+)", line)
  677. rawData['timeout'] = int(m.group(1))
  678. if "Non-2xx" in line:
  679. m = re.search("Non-2xx or 3xx responses: ([0-9]+)", line)
  680. if m != None:
  681. rawData['5xx'] = int(m.group(1))
  682. return results
  683. except IOError:
  684. return None
  685. ############################################################
  686. # End benchmark
  687. ############################################################
  688. ##########################################################################################
  689. # Private Methods
  690. ##########################################################################################
  691. ############################################################
  692. # __run_benchmark(script, output_file)
  693. # Runs a single benchmark using the script which is a bash
  694. # template that uses weighttp to run the test. All the results
  695. # outputed to the output_file.
  696. ############################################################
  697. def __run_benchmark(self, script, output_file, err):
  698. with open(output_file, 'w') as raw_file:
  699. p = subprocess.Popen(self.benchmarker.client_ssh_string.split(" "), stdin=subprocess.PIPE, stdout=raw_file, stderr=err)
  700. p.communicate(script)
  701. err.flush()
  702. ############################################################
  703. # End __run_benchmark
  704. ############################################################
  705. ############################################################
  706. # __generate_concurrency_script(url, port)
  707. # Generates the string containing the bash script that will
  708. # be run on the client to benchmark a single test. This
  709. # specifically works for the variable concurrency tests (JSON
  710. # and DB)
  711. ############################################################
  712. def __generate_concurrency_script(self, url, port, accept_header, wrk_command="wrk", intervals=[], pipeline=""):
  713. if len(intervals) == 0:
  714. intervals = self.benchmarker.concurrency_levels
  715. headers = self.__get_request_headers(accept_header)
  716. return self.concurrency_template.format(max_concurrency=self.benchmarker.max_concurrency,
  717. max_threads=self.benchmarker.max_threads, name=self.name, duration=self.benchmarker.duration,
  718. interval=" ".join("{}".format(item) for item in intervals),
  719. server_host=self.benchmarker.server_host, port=port, url=url, headers=headers, wrk=wrk_command,
  720. pipeline=pipeline)
  721. ############################################################
  722. # End __generate_concurrency_script
  723. ############################################################
  724. ############################################################
  725. # __generate_query_script(url, port)
  726. # Generates the string containing the bash script that will
  727. # be run on the client to benchmark a single test. This
  728. # specifically works for the variable query tests (Query)
  729. ############################################################
  730. def __generate_query_script(self, url, port, accept_header):
  731. headers = self.__get_request_headers(accept_header)
  732. return self.query_template.format(max_concurrency=self.benchmarker.max_concurrency,
  733. max_threads=self.benchmarker.max_threads, name=self.name, duration=self.benchmarker.duration,
  734. interval=" ".join("{}".format(item) for item in self.benchmarker.query_intervals),
  735. server_host=self.benchmarker.server_host, port=port, url=url, headers=headers)
  736. ############################################################
  737. # End __generate_query_script
  738. ############################################################
  739. ############################################################
  740. # __get_request_headers(accept_header)
  741. # Generates the complete HTTP header string
  742. ############################################################
  743. def __get_request_headers(self, accept_header):
  744. return self.headers_template.format(accept=accept_header)
  745. ############################################################
  746. # End __format_request_headers
  747. ############################################################
  748. ############################################################
  749. # __curl_url
  750. # Dump HTTP response and headers. Throw exception if there
  751. # is an HTTP error.
  752. ############################################################
  753. def __curl_url(self, url, testType, out, err):
  754. # Use -i to output response with headers.
  755. # Don't use -f so that the HTTP response code is ignored.
  756. # Use --stderr - to redirect stderr to stdout so we get
  757. # error output for sure in stdout.
  758. # Use -sS to hide progress bar, but show errors.
  759. subprocess.check_call(["curl", "-i", "-sS", url], stderr=err, stdout=out)
  760. # HTTP output may not end in a newline, so add that here.
  761. out.write( "\n" )
  762. out.flush()
  763. err.flush()
  764. # We need to get the respond body from the curl and return it.
  765. p = subprocess.Popen(["curl", "-s", url], stdout=subprocess.PIPE)
  766. output = p.communicate()
  767. # In the curl invocation above we could not use -f because
  768. # then the HTTP response would not be output, so use -f in
  769. # an additional invocation so that if there is an HTTP error,
  770. # subprocess.CalledProcessError will be thrown. Note that this
  771. # uses check_output() instead of check_call() so that we can
  772. # ignore the HTTP response because we already output that in
  773. # the first curl invocation.
  774. subprocess.check_output(["curl", "-fsS", url], stderr=err)
  775. err.flush()
  776. if output:
  777. # We have the response body - return it
  778. return output[0]
  779. ##############################################################
  780. # End __curl_url
  781. ##############################################################
  782. ##########################################################################################
  783. # Constructor
  784. ##########################################################################################
  785. def __init__(self, name, directory, benchmarker, runTests, args):
  786. self.name = name
  787. self.directory = directory
  788. self.benchmarker = benchmarker
  789. self.runTests = runTests
  790. self.__dict__.update(args)
  791. # ensure directory has __init__.py file so that we can use it as a Python package
  792. if not os.path.exists(os.path.join(directory, "__init__.py")):
  793. open(os.path.join(directory, "__init__.py"), 'w').close()
  794. self.setup_module = setup_module = importlib.import_module(directory + '.' + self.setup_file)
  795. ############################################################
  796. # End __init__
  797. ############################################################
  798. ############################################################
  799. # End FrameworkTest
  800. ############################################################
  801. ##########################################################################################
  802. # Static methods
  803. ##########################################################################################
  804. ##############################################################
  805. # parse_config(config, directory, benchmarker)
  806. # parses a config file and returns a list of FrameworkTest
  807. # objects based on that config file.
  808. ##############################################################
  809. def parse_config(config, directory, benchmarker):
  810. tests = []
  811. # The config object can specify multiple tests, we neep to loop
  812. # over them and parse them out
  813. for test in config['tests']:
  814. for key, value in test.iteritems():
  815. test_name = config['framework']
  816. runTests = dict()
  817. runTests["json"] = (benchmarker.type == "all" or benchmarker.type == "json") and value.get("json_url", False)
  818. runTests["db"] = (benchmarker.type == "all" or benchmarker.type == "db") and value.get("db_url", False)
  819. runTests["query"] = (benchmarker.type == "all" or benchmarker.type == "query") and value.get("query_url", False)
  820. runTests["fortune"] = (benchmarker.type == "all" or benchmarker.type == "fortune") and value.get("fortune_url", False)
  821. runTests["update"] = (benchmarker.type == "all" or benchmarker.type == "update") and value.get("update_url", False)
  822. runTests["plaintext"] = (benchmarker.type == "all" or benchmarker.type == "plaintext") and value.get("plaintext_url", False)
  823. # if the test uses the 'defualt' keywork, then we don't
  824. # append anything to it's name. All configs should only have 1 default
  825. if key != 'default':
  826. # we need to use the key in the test_name
  827. test_name = test_name + "-" + key
  828. tests.append(FrameworkTest(test_name, directory, benchmarker, runTests, value))
  829. return tests
  830. ##############################################################
  831. # End parse_config
  832. ##############################################################