framework_test.py 35 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863
  1. from benchmark.fortune_html_parser import FortuneHTMLParser
  2. import importlib
  3. import os
  4. import subprocess
  5. import time
  6. import re
  7. import pprint
  8. import sys
  9. import traceback
  10. import json
  11. import textwrap
  12. ############################################################
  13. # Test Variables
  14. ############################################################
  15. JSON = "json"
  16. DB = "db"
  17. QUERY = "query"
  18. FORTUNE = "fortune"
  19. UPDATE = "update"
  20. PLAINTEXT = "plaintext"
  21. class FrameworkTest:
  22. ##########################################################################################
  23. # Class variables
  24. ##########################################################################################
  25. headers_template = "-H 'Host: localhost' -H '{accept}' -H 'Connection: keep-alive'"
  26. headers_full_template = "-H 'Host: localhost' -H '{accept}' -H 'Accept-Language: en-US,en;q=0.5' -H 'User-Agent: Mozilla/5.0 (X11; Linux x86_64) Gecko/20130501 Firefox/30.0 AppleWebKit/600.00 Chrome/30.0.0000.0 Trident/10.0 Safari/600.00' -H 'Cookie: uid=12345678901234567890; __utma=1.1234567890.1234567890.1234567890.1234567890.12; wd=2560x1600' -H 'Connection: keep-alive'"
  27. accept_json = "Accept: application/json,text/html;q=0.9,application/xhtml+xml;q=0.9,application/xml;q=0.8,*/*;q=0.7"
  28. accept_html = "Accept: text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8"
  29. accept_plaintext = "Accept: text/plain,text/html;q=0.9,application/xhtml+xml;q=0.9,application/xml;q=0.8,*/*;q=0.7"
  30. concurrency_template = """
  31. echo ""
  32. echo "---------------------------------------------------------"
  33. echo " Running Primer {name}"
  34. echo " {wrk} {headers} -d 5 -c 8 -t 8 \"http://{server_host}:{port}{url}\""
  35. echo "---------------------------------------------------------"
  36. echo ""
  37. {wrk} {headers} -d 5 -c 8 -t 8 "http://{server_host}:{port}{url}"
  38. sleep 5
  39. echo ""
  40. echo "---------------------------------------------------------"
  41. echo " Running Warmup {name}"
  42. echo " {wrk} {headers} -d {duration} -c {max_concurrency} -t {max_threads} \"http://{server_host}:{port}{url}\""
  43. echo "---------------------------------------------------------"
  44. echo ""
  45. {wrk} {headers} -d {duration} -c {max_concurrency} -t {max_threads} "http://{server_host}:{port}{url}"
  46. sleep 5
  47. for c in {interval}
  48. do
  49. echo ""
  50. echo "---------------------------------------------------------"
  51. echo " Concurrency: $c for {name}"
  52. echo " {wrk} {headers} {pipeline} -d {duration} -c $c -t $(($c>{max_threads}?{max_threads}:$c)) \"http://{server_host}:{port}{url}\""
  53. echo "---------------------------------------------------------"
  54. echo ""
  55. {wrk} {headers} {pipeline} -d {duration} -c "$c" -t "$(($c>{max_threads}?{max_threads}:$c))" http://{server_host}:{port}{url}
  56. sleep 2
  57. done
  58. """
  59. query_template = """
  60. echo ""
  61. echo "---------------------------------------------------------"
  62. echo " Running Primer {name}"
  63. echo " wrk {headers} -d 5 -c 8 -t 8 \"http://{server_host}:{port}{url}2\""
  64. echo "---------------------------------------------------------"
  65. echo ""
  66. wrk {headers} -d 5 -c 8 -t 8 "http://{server_host}:{port}{url}2"
  67. sleep 5
  68. echo ""
  69. echo "---------------------------------------------------------"
  70. echo " Running Warmup {name}"
  71. echo " wrk {headers} -d {duration} -c {max_concurrency} -t {max_threads} \"http://{server_host}:{port}{url}2\""
  72. echo "---------------------------------------------------------"
  73. echo ""
  74. wrk {headers} -d {duration} -c {max_concurrency} -t {max_threads} "http://{server_host}:{port}{url}2"
  75. sleep 5
  76. for c in {interval}
  77. do
  78. echo ""
  79. echo "---------------------------------------------------------"
  80. echo " Queries: $c for {name}"
  81. echo " wrk {headers} -d {duration} -c {max_concurrency} -t {max_threads} \"http://{server_host}:{port}{url}$c\""
  82. echo "---------------------------------------------------------"
  83. echo ""
  84. wrk {headers} -d {duration} -c {max_concurrency} -t {max_threads} "http://{server_host}:{port}{url}$c"
  85. sleep 2
  86. done
  87. """
  88. language = None
  89. platform = None
  90. webserver = None
  91. classification = None
  92. database = None
  93. approach = None
  94. orm = None
  95. framework = None
  96. os = None
  97. database_os = None
  98. display_name = None
  99. notes = None
  100. versus = None
  101. JSON = framework_test.JSON
  102. DB = framework_test.DB
  103. QUERY = framework_test.QUERY
  104. FORTUNE = framework_test.FORTUNE
  105. UPDATE = framework_test.UPDATE
  106. PLAINTEXT = framework_test.PLAINTEXT
  107. ##########################################################################################
  108. # Public Methods
  109. ##########################################################################################
  110. ############################################################
  111. # Validates the jsonString is a JSON object with a 'message'
  112. # key with the value "hello, world!" (case-insensitive).
  113. ############################################################
  114. def validateJson(self, jsonString, out, err):
  115. try:
  116. obj = json.loads(jsonString)
  117. if obj["message"].lower() == "hello, world!":
  118. return True
  119. except:
  120. err.write(textwrap.dedent("""
  121. -----------------------------------------------------
  122. Error: validateJson raised exception
  123. -----------------------------------------------------
  124. {trace}
  125. """.format( trace=sys.exc_info()[:2])))
  126. return False
  127. ############################################################
  128. # Validates the jsonString is a JSON object that has an "id"
  129. # and a "randomNumber" key, and that both keys map to
  130. # integers.
  131. ############################################################
  132. def validateDb(self, jsonString, out, err):
  133. try:
  134. obj = json.loads(jsonString)
  135. # This will error out of the value could not parsed to a
  136. # float (this will work with ints, but it will turn them
  137. # into their float equivalent; i.e. "123" => 123.0)
  138. if type(float(obj["id"])) == float and type(float(obj["randomNumber"])) == float:
  139. return True
  140. except:
  141. err.write(textwrap.dedent("""
  142. -----------------------------------------------------
  143. Error: validateDb raised exception
  144. -----------------------------------------------------
  145. {trace}
  146. """.format( trace=sys.exc_info()[:2])))
  147. return False
  148. ############################################################
  149. # Validates the jsonString is an array with a length of
  150. # 2, that each entry in the array is a JSON object, that
  151. # each object has an "id" and a "randomNumber" key, and that
  152. # both keys map to integers.
  153. ############################################################
  154. def validateQuery(self, jsonString, out, err):
  155. try:
  156. arr = json.loads(jsonString)
  157. if type(float(arr[0]["id"])) == float and type(float(arr[0]["randomNumber"])) == float and type(float(arr[1]["id"])) == float and type(float(arr[1]["randomNumber"])) == float:
  158. return True
  159. except:
  160. err.write(textwrap.dedent("""
  161. -----------------------------------------------------
  162. Error: validateQuery raised exception
  163. -----------------------------------------------------
  164. {trace}
  165. """.format( trace=sys.exc_info()[:2])))
  166. return False
  167. ############################################################
  168. # Parses the given HTML string and asks a FortuneHTMLParser
  169. # whether the parsed string is a valid fortune return.
  170. ############################################################
  171. def validateFortune(self, htmlString, out, err):
  172. try:
  173. parser = FortuneHTMLParser()
  174. parser.feed(htmlString)
  175. return parser.isValidFortune()
  176. except:
  177. err.write(textwrap.dedent("""
  178. -----------------------------------------------------
  179. Error: validateFortune raised exception
  180. -----------------------------------------------------
  181. {trace}
  182. """.format( trace=sys.exc_info()[:2])))
  183. return False
  184. ############################################################
  185. # Validates the jsonString is an array with a length of
  186. # 2, that each entry in the array is a JSON object, that
  187. # each object has an "id" and a "randomNumber" key, and that
  188. # both keys map to integers.
  189. ############################################################
  190. def validateUpdate(self, jsonString, out, err):
  191. try:
  192. arr = json.loads(jsonString)
  193. if type(float(arr[0]["id"])) == float and type(float(arr[0]["randomNumber"])) == float and type(float(arr[1]["id"])) == float and type(float(arr[1]["randomNumber"])) == float:
  194. return True
  195. except:
  196. err.write(textwrap.dedent("""
  197. -----------------------------------------------------
  198. Error: validateUpdate raised exception
  199. -----------------------------------------------------
  200. {trace}
  201. """.format( trace=sys.exc_info()[:2])))
  202. return False
  203. ############################################################
  204. #
  205. ############################################################
  206. def validatePlaintext(self, jsonString, out, err):
  207. try:
  208. return jsonString.lower().strip() == "hello, world!"
  209. except:
  210. err.write(textwrap.dedent("""
  211. -----------------------------------------------------
  212. Error: validatePlaintext raised exception
  213. -----------------------------------------------------
  214. {trace}
  215. """.format( trace=sys.exc_info()[:2])))
  216. return False
  217. ############################################################
  218. # start(benchmarker)
  219. # Start the test using it's setup file
  220. ############################################################
  221. def start(self, out, err):
  222. return self.setup_module.start(self.benchmarker, out, err)
  223. ############################################################
  224. # End start
  225. ############################################################
  226. ############################################################
  227. # stop(benchmarker)
  228. # Stops the test using it's setup file
  229. ############################################################
  230. def stop(self, out, err):
  231. return self.setup_module.stop(out, err)
  232. ############################################################
  233. # End stop
  234. ############################################################
  235. ############################################################
  236. # verify_urls
  237. # Verifys each of the URLs for this test. THis will sinply
  238. # curl the URL and check for it's return status.
  239. # For each url, a flag will be set on this object for whether
  240. # or not it passed
  241. ############################################################
  242. def verify_urls(self, out, err):
  243. # JSON
  244. if self.runTests[self.JSON]:
  245. out.write( "VERIFYING JSON (" + self.json_url + ") ...\n" )
  246. out.flush()
  247. try:
  248. url = self.benchmarker.generate_url(self.json_url, self.port)
  249. output = self.__curl_url(url, self.JSON, out, err)
  250. if self.validateJson(output, out, err):
  251. self.json_url_passed = True
  252. else:
  253. self.json_url_passed = False
  254. except (AttributeError, subprocess.CalledProcessError) as e:
  255. self.json_url_passed = False
  256. out.write("VALIDATING JSON ... ")
  257. if self.json_url_passed:
  258. out.write("PASS\n\n")
  259. else:
  260. out.write("FAIL\n\n")
  261. out.flush
  262. # DB
  263. if self.runTests[self.DB]:
  264. out.write( "VERIFYING DB (" + self.db_url + ") ...\n" )
  265. out.flush()
  266. try:
  267. url = self.benchmarker.generate_url(self.db_url, self.port)
  268. output = self.__curl_url(url, self.DB, out, err)
  269. if self.validateDb(output, out, err):
  270. self.db_url_passed = True
  271. else:
  272. self.db_url_passed = False
  273. except (AttributeError, subprocess.CalledProcessError) as e:
  274. self.db_url_passed = False
  275. out.write("VALIDATING DB ... ")
  276. if self.db_url_passed:
  277. out.write("PASS\n\n")
  278. else:
  279. out.write("FAIL\n\n")
  280. out.flush
  281. # Query
  282. if self.runTests[self.QUERY]:
  283. out.write( "VERIFYING QUERY (" + self.query_url + "2) ...\n" )
  284. out.flush()
  285. try:
  286. url = self.benchmarker.generate_url(self.query_url + "2", self.port)
  287. output = self.__curl_url(url, self.QUERY, out, err)
  288. if self.validateQuery(output, out, err):
  289. self.query_url_passed = True
  290. else:
  291. self.query_url_passed = False
  292. except (AttributeError, subprocess.CalledProcessError) as e:
  293. self.query_url_passed = False
  294. out.write("VALIDATING QUERY ... ")
  295. if self.query_url_passed:
  296. out.write("PASS\n\n")
  297. else:
  298. out.write("FAIL\n\n")
  299. out.flush
  300. # Fortune
  301. if self.runTests[self.FORTUNE]:
  302. out.write( "VERIFYING FORTUNE (" + self.fortune_url + ") ...\n" )
  303. out.flush()
  304. try:
  305. url = self.benchmarker.generate_url(self.fortune_url, self.port)
  306. output = self.__curl_url(url, self.FORTUNE, out, err)
  307. if self.validateFortune(output, out, err):
  308. self.fortune_url_passed = True
  309. else:
  310. self.fortune_url_passed = False
  311. except (AttributeError, subprocess.CalledProcessError) as e:
  312. self.fortune_url_passed = False
  313. out.write("VALIDATING FORTUNE ... ")
  314. if self.fortune_url_passed:
  315. out.write("PASS\n\n")
  316. else:
  317. out.write("FAIL\n\n")
  318. out.flush
  319. # Update
  320. if self.runTests[self.UPDATE]:
  321. out.write( "VERIFYING UPDATE (" + self.update_url + "2) ...\n" )
  322. out.flush()
  323. try:
  324. url = self.benchmarker.generate_url(self.update_url + "2", self.port)
  325. output = self.__curl_url(url, self.UPDATE, out, err)
  326. if self.validateUpdate(output, out, err):
  327. self.update_url_passed = True
  328. else:
  329. self.update_url_passed = False
  330. except (AttributeError, subprocess.CalledProcessError) as e:
  331. self.update_url_passed = False
  332. out.write("VALIDATING UPDATE ... ")
  333. if self.update_url_passed:
  334. out.write("PASS\n\n")
  335. else:
  336. out.write("FAIL\n\n")
  337. out.flush
  338. # plaintext
  339. if self.runTests[self.PLAINTEXT]:
  340. out.write( "VERIFYING PLAINTEXT (" + self.plaintext_url + ") ...\n" )
  341. out.flush()
  342. try:
  343. url = self.benchmarker.generate_url(self.plaintext_url, self.port)
  344. output = self.__curl_url(url, self.PLAINTEXT, out, err)
  345. if self.validatePlaintext(output, out, err):
  346. self.plaintext_url_passed = True
  347. else:
  348. self.plaintext_url_passed = False
  349. except (AttributeError, subprocess.CalledProcessError) as e:
  350. self.plaintext_url_passed = False
  351. out.write("VALIDATING PLAINTEXT ... ")
  352. if self.plaintext_url_passed:
  353. out.write("PASS\n\n")
  354. else:
  355. out.write("FAIL\n\n")
  356. out.flush
  357. ############################################################
  358. # End verify_urls
  359. ############################################################
  360. ############################################################
  361. # contains_type(type)
  362. # true if this test contains an implementation of the given
  363. # test type (json, db, etc.)
  364. ############################################################
  365. def contains_type(self, type):
  366. try:
  367. if type == self.JSON and self.json_url != None:
  368. return True
  369. if type == self.DB and self.db_url != None:
  370. return True
  371. if type == self.QUERY and self.query_url != None:
  372. return True
  373. if type == self.FORTUNE and self.fortune_url != None:
  374. return True
  375. if type == self.UPDATE and self.update_url != None:
  376. return True
  377. if type == self.PLAINTEXT and self.plaintext_url != None:
  378. return True
  379. except AttributeError:
  380. pass
  381. return False
  382. ############################################################
  383. # End stop
  384. ############################################################
  385. ############################################################
  386. # benchmark
  387. # Runs the benchmark for each type of test that it implements
  388. # JSON/DB/Query.
  389. ############################################################
  390. def benchmark(self, out, err):
  391. # JSON
  392. if self.runTests[self.JSON]:
  393. try:
  394. out.write("BENCHMARKING JSON ... ")
  395. out.flush()
  396. results = None
  397. output_file = self.benchmarker.output_file(self.name, self.JSON)
  398. if not os.path.exists(output_file):
  399. with open(output_file, 'w'):
  400. # Simply opening the file in write mode should create the empty file.
  401. pass
  402. if self.json_url_passed:
  403. remote_script = self.__generate_concurrency_script(self.json_url, self.port, self.accept_json)
  404. self.__run_benchmark(remote_script, output_file, err)
  405. results = self.__parse_test(self.JSON)
  406. self.benchmarker.report_results(framework=self, test=self.JSON, results=results['results'])
  407. out.write( "Complete\n" )
  408. out.flush()
  409. except AttributeError:
  410. pass
  411. # DB
  412. if self.runTests[self.DB]:
  413. try:
  414. out.write("BENCHMARKING DB ... ")
  415. out.flush()
  416. results = None
  417. output_file = self.benchmarker.output_file(self.name, self.DB)
  418. if not os.path.exists(output_file):
  419. with open(output_file, 'w'):
  420. # Simply opening the file in write mode should create the empty file.
  421. pass
  422. if self.db_url_passed:
  423. remote_script = self.__generate_concurrency_script(self.db_url, self.port, self.accept_json)
  424. self.__run_benchmark(remote_script, output_file, err)
  425. results = self.__parse_test(self.DB)
  426. self.benchmarker.report_results(framework=self, test=self.DB, results=results['results'])
  427. out.write( "Complete\n" )
  428. except AttributeError:
  429. pass
  430. # Query
  431. if self.runTests[self.QUERY]:
  432. try:
  433. out.write("BENCHMARKING Query ... ")
  434. out.flush()
  435. results = None
  436. output_file = self.benchmarker.output_file(self.name, self.QUERY)
  437. if not os.path.exists(output_file):
  438. with open(output_file, 'w'):
  439. # Simply opening the file in write mode should create the empty file.
  440. pass
  441. if self.query_url_passed:
  442. remote_script = self.__generate_query_script(self.query_url, self.port, self.accept_json)
  443. self.__run_benchmark(remote_script, output_file, err)
  444. results = self.__parse_test(self.QUERY)
  445. self.benchmarker.report_results(framework=self, test=self.QUERY, results=results['results'])
  446. out.write( "Complete\n" )
  447. out.flush()
  448. except AttributeError:
  449. pass
  450. # fortune
  451. if self.runTests[self.FORTUNE]:
  452. try:
  453. out.write("BENCHMARKING Fortune ... ")
  454. out.flush()
  455. results = None
  456. output_file = self.benchmarker.output_file(self.name, self.FORTUNE)
  457. if not os.path.exists(output_file):
  458. with open(output_file, 'w'):
  459. # Simply opening the file in write mode should create the empty file.
  460. pass
  461. if self.fortune_url_passed:
  462. remote_script = self.__generate_concurrency_script(self.fortune_url, self.port, self.accept_html)
  463. self.__run_benchmark(remote_script, output_file, err)
  464. results = self.__parse_test(self.FORTUNE)
  465. self.benchmarker.report_results(framework=self, test=self.FORTUNE, results=results['results'])
  466. out.write( "Complete\n" )
  467. out.flush()
  468. except AttributeError:
  469. pass
  470. # update
  471. if self.runTests[self.UPDATE]:
  472. try:
  473. out.write("BENCHMARKING Update ... ")
  474. out.flush()
  475. results = None
  476. output_file = self.benchmarker.output_file(self.name, self.UPDATE)
  477. if not os.path.exists(output_file):
  478. with open(output_file, 'w'):
  479. # Simply opening the file in write mode should create the empty file.
  480. pass
  481. if self.update_url_passed:
  482. remote_script = self.__generate_query_script(self.update_url, self.port, self.accept_json)
  483. self.__run_benchmark(remote_script, output_file, err)
  484. results = self.__parse_test(self.UPDATE)
  485. self.benchmarker.report_results(framework=self, test=self.UPDATE, results=results['results'])
  486. out.write( "Complete\n" )
  487. out.flush()
  488. except AttributeError:
  489. pass
  490. # plaintext
  491. if self.runTests[self.PLAINTEXT]:
  492. try:
  493. out.write("BENCHMARKING Plaintext ... ")
  494. out.flush()
  495. results = None
  496. output_file = self.benchmarker.output_file(self.name, self.PLAINTEXT)
  497. if not os.path.exists(output_file):
  498. with open(output_file, 'w'):
  499. # Simply opening the file in write mode should create the empty file.
  500. pass
  501. if self.plaintext_url_passed:
  502. remote_script = self.__generate_concurrency_script(self.plaintext_url, self.port, self.accept_plaintext, wrk_command="wrk-pipeline", intervals=[256,1024,4096,16384], pipeline="--pipeline 16")
  503. self.__run_benchmark(remote_script, output_file, err)
  504. results = self.__parse_test(self.PLAINTEXT)
  505. self.benchmarker.report_results(framework=self, test=self.PLAINTEXT, results=results['results'])
  506. out.write( "Complete\n" )
  507. out.flush()
  508. except AttributeError:
  509. traceback.print_exc()
  510. pass
  511. ############################################################
  512. # End benchmark
  513. ############################################################
  514. ############################################################
  515. # parse_all
  516. # Method meant to be run for a given timestamp
  517. ############################################################
  518. def parse_all(self):
  519. # JSON
  520. if os.path.exists(self.benchmarker.get_output_file(self.name, self.JSON)):
  521. results = self.__parse_test(self.JSON)
  522. self.benchmarker.report_results(framework=self, test=self.JSON, results=results['results'])
  523. # DB
  524. if os.path.exists(self.benchmarker.get_output_file(self.name, self.DB)):
  525. results = self.__parse_test(self.DB)
  526. self.benchmarker.report_results(framework=self, test=self.DB, results=results['results'])
  527. # Query
  528. if os.path.exists(self.benchmarker.get_output_file(self.name, self.QUERY)):
  529. results = self.__parse_test(self.QUERY)
  530. self.benchmarker.report_results(framework=self, test=self.QUERY, results=results['results'])
  531. # Fortune
  532. if os.path.exists(self.benchmarker.get_output_file(self.name, self.FORTUNE)):
  533. results = self.__parse_test(self.FORTUNE)
  534. self.benchmarker.report_results(framework=self, test=self.FORTUNE, results=results['results'])
  535. # Update
  536. if os.path.exists(self.benchmarker.get_output_file(self.name, self.UPDATE)):
  537. results = self.__parse_test(self.UPDATE)
  538. self.benchmarker.report_results(framework=self, test=self.UPDATE, results=results['results'])
  539. # Plaintext
  540. if os.path.exists(self.benchmarker.get_output_file(self.name, self.PLAINTEXT)):
  541. results = self.__parse_test(self.PLAINTEXT)
  542. self.benchmarker.report_results(framework=self, test=self.PLAINTEXT, results=results['results'])
  543. ############################################################
  544. # End parse_all
  545. ############################################################
  546. ############################################################
  547. # __parse_test(test_type)
  548. ############################################################
  549. def __parse_test(self, test_type):
  550. try:
  551. results = dict()
  552. results['results'] = []
  553. if os.path.exists(self.benchmarker.get_output_file(self.name, test_type)):
  554. with open(self.benchmarker.output_file(self.name, test_type)) as raw_data:
  555. is_warmup = True
  556. rawData = None
  557. for line in raw_data:
  558. if "Queries:" in line or "Concurrency:" in line:
  559. is_warmup = False
  560. rawData = None
  561. continue
  562. if "Warmup" in line or "Primer" in line:
  563. is_warmup = True
  564. continue
  565. if not is_warmup:
  566. if rawData == None:
  567. rawData = dict()
  568. results['results'].append(rawData)
  569. #if "Requests/sec:" in line:
  570. # m = re.search("Requests/sec:\s+([0-9]+)", line)
  571. # rawData['reportedResults'] = m.group(1)
  572. # search for weighttp data such as succeeded and failed.
  573. if "Latency" in line:
  574. m = re.findall("([0-9]+\.*[0-9]*[us|ms|s|m|%]+)", line)
  575. if len(m) == 4:
  576. rawData['latencyAvg'] = m[0]
  577. rawData['latencyStdev'] = m[1]
  578. rawData['latencyMax'] = m[2]
  579. # rawData['latencyStdevPercent'] = m[3]
  580. #if "Req/Sec" in line:
  581. # m = re.findall("([0-9]+\.*[0-9]*[k|%]*)", line)
  582. # if len(m) == 4:
  583. # rawData['requestsAvg'] = m[0]
  584. # rawData['requestsStdev'] = m[1]
  585. # rawData['requestsMax'] = m[2]
  586. # rawData['requestsStdevPercent'] = m[3]
  587. #if "requests in" in line:
  588. # m = re.search("requests in ([0-9]+\.*[0-9]*[ms|s|m|h]+)", line)
  589. # if m != None:
  590. # # parse out the raw time, which may be in minutes or seconds
  591. # raw_time = m.group(1)
  592. # if "ms" in raw_time:
  593. # rawData['total_time'] = float(raw_time[:len(raw_time)-2]) / 1000.0
  594. # elif "s" in raw_time:
  595. # rawData['total_time'] = float(raw_time[:len(raw_time)-1])
  596. # elif "m" in raw_time:
  597. # rawData['total_time'] = float(raw_time[:len(raw_time)-1]) * 60.0
  598. # elif "h" in raw_time:
  599. # rawData['total_time'] = float(raw_time[:len(raw_time)-1]) * 3600.0
  600. if "requests in" in line:
  601. m = re.search("([0-9]+) requests in", line)
  602. if m != None:
  603. rawData['totalRequests'] = int(m.group(1))
  604. if "Socket errors" in line:
  605. if "connect" in line:
  606. m = re.search("connect ([0-9]+)", line)
  607. rawData['connect'] = int(m.group(1))
  608. if "read" in line:
  609. m = re.search("read ([0-9]+)", line)
  610. rawData['read'] = int(m.group(1))
  611. if "write" in line:
  612. m = re.search("write ([0-9]+)", line)
  613. rawData['write'] = int(m.group(1))
  614. if "timeout" in line:
  615. m = re.search("timeout ([0-9]+)", line)
  616. rawData['timeout'] = int(m.group(1))
  617. if "Non-2xx" in line:
  618. m = re.search("Non-2xx or 3xx responses: ([0-9]+)", line)
  619. if m != None:
  620. rawData['5xx'] = int(m.group(1))
  621. return results
  622. except IOError:
  623. return None
  624. ############################################################
  625. # End benchmark
  626. ############################################################
  627. ##########################################################################################
  628. # Private Methods
  629. ##########################################################################################
  630. ############################################################
  631. # __run_benchmark(script, output_file)
  632. # Runs a single benchmark using the script which is a bash
  633. # template that uses weighttp to run the test. All the results
  634. # outputed to the output_file.
  635. ############################################################
  636. def __run_benchmark(self, script, output_file, err):
  637. with open(output_file, 'w') as raw_file:
  638. p = subprocess.Popen(self.benchmarker.client_ssh_string.split(" "), stdin=subprocess.PIPE, stdout=raw_file, stderr=err)
  639. p.communicate(script)
  640. err.flush()
  641. ############################################################
  642. # End __run_benchmark
  643. ############################################################
  644. ############################################################
  645. # __generate_concurrency_script(url, port)
  646. # Generates the string containing the bash script that will
  647. # be run on the client to benchmark a single test. This
  648. # specifically works for the variable concurrency tests (JSON
  649. # and DB)
  650. ############################################################
  651. def __generate_concurrency_script(self, url, port, accept_header, wrk_command="wrk", intervals=[], pipeline=""):
  652. if len(intervals) == 0:
  653. intervals = self.benchmarker.concurrency_levels
  654. headers = self.__get_request_headers(accept_header)
  655. return self.concurrency_template.format(max_concurrency=self.benchmarker.max_concurrency,
  656. max_threads=self.benchmarker.max_threads, name=self.name, duration=self.benchmarker.duration,
  657. interval=" ".join("{}".format(item) for item in intervals),
  658. server_host=self.benchmarker.server_host, port=port, url=url, headers=headers, wrk=wrk_command,
  659. pipeline=pipeline)
  660. ############################################################
  661. # End __generate_concurrency_script
  662. ############################################################
  663. ############################################################
  664. # __generate_query_script(url, port)
  665. # Generates the string containing the bash script that will
  666. # be run on the client to benchmark a single test. This
  667. # specifically works for the variable query tests (Query)
  668. ############################################################
  669. def __generate_query_script(self, url, port, accept_header):
  670. headers = self.__get_request_headers(accept_header)
  671. return self.query_template.format(max_concurrency=self.benchmarker.max_concurrency,
  672. max_threads=self.benchmarker.max_threads, name=self.name, duration=self.benchmarker.duration,
  673. interval=" ".join("{}".format(item) for item in self.benchmarker.query_intervals),
  674. server_host=self.benchmarker.server_host, port=port, url=url, headers=headers)
  675. ############################################################
  676. # End __generate_query_script
  677. ############################################################
  678. ############################################################
  679. # __get_request_headers(accept_header)
  680. # Generates the complete HTTP header string
  681. ############################################################
  682. def __get_request_headers(self, accept_header):
  683. return self.headers_template.format(accept=accept_header)
  684. ############################################################
  685. # End __format_request_headers
  686. ############################################################
  687. ############################################################
  688. # __curl_url
  689. # Dump HTTP response and headers. Throw exception if there
  690. # is an HTTP error.
  691. ############################################################
  692. def __curl_url(self, url, testType, out, err):
  693. # Use -i to output response with headers.
  694. # Don't use -f so that the HTTP response code is ignored.
  695. # Use --stderr - to redirect stderr to stdout so we get
  696. # error output for sure in stdout.
  697. # Use -sS to hide progress bar, but show errors.
  698. subprocess.check_call(["curl", "-i", "-sS", url], stderr=err, stdout=out)
  699. out.flush()
  700. err.flush()
  701. # HTTP output may not end in a newline, so add that here.
  702. out.write( "\n" )
  703. out.flush()
  704. # We need to get the respond body from the curl and return it.
  705. p = subprocess.Popen(["curl", "-s", url], stdout=subprocess.PIPE)
  706. output = p.communicate()
  707. # In the curl invocation above we could not use -f because
  708. # then the HTTP response would not be output, so use -f in
  709. # an additional invocation so that if there is an HTTP error,
  710. # subprocess.CalledProcessError will be thrown. Note that this
  711. # uses check_output() instead of check_call() so that we can
  712. # ignore the HTTP response because we already output that in
  713. # the first curl invocation.
  714. subprocess.check_output(["curl", "-fsS", url], stderr=err)
  715. err.flush()
  716. # HTTP output may not end in a newline, so add that here.
  717. out.write( "\n" )
  718. out.flush()
  719. if output:
  720. # We have the response body - return it
  721. return output[0]
  722. ##############################################################
  723. # End __curl_url
  724. ##############################################################
  725. ##########################################################################################
  726. # Constructor
  727. ##########################################################################################
  728. def __init__(self, name, directory, benchmarker, runTests, args):
  729. self.name = name
  730. self.directory = directory
  731. self.benchmarker = benchmarker
  732. self.runTests = runTests
  733. self.__dict__.update(args)
  734. # ensure directory has __init__.py file so that we can use it as a Python package
  735. if not os.path.exists(os.path.join(directory, "__init__.py")):
  736. open(os.path.join(directory, "__init__.py"), 'w').close()
  737. self.setup_module = setup_module = importlib.import_module(directory + '.' + self.setup_file)
  738. ############################################################
  739. # End __init__
  740. ############################################################
  741. ############################################################
  742. # End FrameworkTest
  743. ############################################################
  744. ##########################################################################################
  745. # Static methods
  746. ##########################################################################################
  747. ##############################################################
  748. # parse_config(config, directory, benchmarker)
  749. # parses a config file and returns a list of FrameworkTest
  750. # objects based on that config file.
  751. ##############################################################
  752. def parse_config(config, directory, benchmarker):
  753. tests = []
  754. # The config object can specify multiple tests, we neep to loop
  755. # over them and parse them out
  756. for test in config['tests']:
  757. for key, value in test.iteritems():
  758. test_name = config['framework']
  759. runTests = dict()
  760. if (benchmarker.type == "all" or benchmarker.type == JSON) and value.get("json_url", False):
  761. runTests["json"] = True
  762. if (benchmarker.type == "all" or benchmarker.type == DB) and value.get("db_url", False):
  763. runTests["db"] = True
  764. if (benchmarker.type == "all" or benchmarker.type == QUERY) and value.get("query_url", False):
  765. runTests["query"] = True
  766. if (benchmarker.type == "all" or benchmarker.type == FORTUNE) and value.get("fortune_url", False):
  767. runTests["fortune"] = True
  768. if (benchmarker.type == "all" or benchmarker.type == UPDATE) and value.get("update_url", False):
  769. runTests["update"] = True
  770. if (benchmarker.type == "all" or benchmarker.type == PLAINTEXT) and value.get("plaintext_url", False):
  771. runTests["plaintext"] = True
  772. # if the test uses the 'defualt' keywork, then we don't
  773. # append anything to it's name. All configs should only have 1 default
  774. if key != 'default':
  775. # we need to use the key in the test_name
  776. test_name = test_name + "-" + key
  777. tests.append(FrameworkTest(test_name, directory, benchmarker, runTests, value))
  778. return tests
  779. ##############################################################
  780. # End parse_config
  781. ##############################################################