framework_test.py 35 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856
  1. from benchmark.fortune_html_parser import FortuneHTMLParser
  2. import importlib
  3. import os
  4. import subprocess
  5. import time
  6. import re
  7. import pprint
  8. import sys
  9. import traceback
  10. import json
  11. import textwrap
  12. ############################################################
  13. # Test Variables
  14. ############################################################
  15. JSON = "json"
  16. DB = "db"
  17. QUERY = "query"
  18. FORTUNE = "fortune"
  19. UPDATE = "update"
  20. PLAINTEXT = "plaintext"
  21. class FrameworkTest:
  22. ##########################################################################################
  23. # Class variables
  24. ##########################################################################################
  25. headers_template = "-H 'Host: localhost' -H '{accept}' -H 'Connection: keep-alive'"
  26. headers_full_template = "-H 'Host: localhost' -H '{accept}' -H 'Accept-Language: en-US,en;q=0.5' -H 'User-Agent: Mozilla/5.0 (X11; Linux x86_64) Gecko/20130501 Firefox/30.0 AppleWebKit/600.00 Chrome/30.0.0000.0 Trident/10.0 Safari/600.00' -H 'Cookie: uid=12345678901234567890; __utma=1.1234567890.1234567890.1234567890.1234567890.12; wd=2560x1600' -H 'Connection: keep-alive'"
  27. accept_json = "Accept: application/json,text/html;q=0.9,application/xhtml+xml;q=0.9,application/xml;q=0.8,*/*;q=0.7"
  28. accept_html = "Accept: text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8"
  29. accept_plaintext = "Accept: text/plain,text/html;q=0.9,application/xhtml+xml;q=0.9,application/xml;q=0.8,*/*;q=0.7"
  30. concurrency_template = """
  31. echo ""
  32. echo "---------------------------------------------------------"
  33. echo " Running Primer {name}"
  34. echo " {wrk} {headers} -d 5 -c 8 -t 8 \"http://{server_host}:{port}{url}\""
  35. echo "---------------------------------------------------------"
  36. echo ""
  37. {wrk} {headers} -d 5 -c 8 -t 8 "http://{server_host}:{port}{url}"
  38. sleep 5
  39. echo ""
  40. echo "---------------------------------------------------------"
  41. echo " Running Warmup {name}"
  42. echo " {wrk} {headers} -d {duration} -c {max_concurrency} -t {max_threads} \"http://{server_host}:{port}{url}\""
  43. echo "---------------------------------------------------------"
  44. echo ""
  45. {wrk} {headers} -d {duration} -c {max_concurrency} -t {max_threads} "http://{server_host}:{port}{url}"
  46. sleep 5
  47. for c in {interval}
  48. do
  49. echo ""
  50. echo "---------------------------------------------------------"
  51. echo " Concurrency: $c for {name}"
  52. echo " {wrk} {headers} {pipeline} -d {duration} -c $c -t $(($c>{max_threads}?{max_threads}:$c)) \"http://{server_host}:{port}{url}\""
  53. echo "---------------------------------------------------------"
  54. echo ""
  55. {wrk} {headers} {pipeline} -d {duration} -c "$c" -t "$(($c>{max_threads}?{max_threads}:$c))" http://{server_host}:{port}{url}
  56. sleep 2
  57. done
  58. """
  59. query_template = """
  60. echo ""
  61. echo "---------------------------------------------------------"
  62. echo " Running Primer {name}"
  63. echo " wrk {headers} -d 5 -c 8 -t 8 \"http://{server_host}:{port}{url}2\""
  64. echo "---------------------------------------------------------"
  65. echo ""
  66. wrk {headers} -d 5 -c 8 -t 8 "http://{server_host}:{port}{url}2"
  67. sleep 5
  68. echo ""
  69. echo "---------------------------------------------------------"
  70. echo " Running Warmup {name}"
  71. echo " wrk {headers} -d {duration} -c {max_concurrency} -t {max_threads} \"http://{server_host}:{port}{url}2\""
  72. echo "---------------------------------------------------------"
  73. echo ""
  74. wrk {headers} -d {duration} -c {max_concurrency} -t {max_threads} "http://{server_host}:{port}{url}2"
  75. sleep 5
  76. for c in {interval}
  77. do
  78. echo ""
  79. echo "---------------------------------------------------------"
  80. echo " Queries: $c for {name}"
  81. echo " wrk {headers} -d {duration} -c {max_concurrency} -t {max_threads} \"http://{server_host}:{port}{url}$c\""
  82. echo "---------------------------------------------------------"
  83. echo ""
  84. wrk {headers} -d {duration} -c {max_concurrency} -t {max_threads} "http://{server_host}:{port}{url}$c"
  85. sleep 2
  86. done
  87. """
  88. language = None
  89. platform = None
  90. webserver = None
  91. classification = None
  92. database = None
  93. approach = None
  94. orm = None
  95. framework = None
  96. os = None
  97. database_os = None
  98. display_name = None
  99. notes = None
  100. versus = None
  101. ##########################################################################################
  102. # Public Methods
  103. ##########################################################################################
  104. ############################################################
  105. # Validates the jsonString is a JSON object with a 'message'
  106. # key with the value "hello, world!" (case-insensitive).
  107. ############################################################
  108. def validateJson(self, jsonString, out, err):
  109. try:
  110. obj = json.loads(jsonString)
  111. if obj["message"].lower() == "hello, world!":
  112. return True
  113. except:
  114. err.write(textwrap.dedent("""
  115. -----------------------------------------------------
  116. Error: validateJson raised exception
  117. -----------------------------------------------------
  118. {trace}
  119. """.format( trace=sys.exc_info()[:2])))
  120. return False
  121. ############################################################
  122. # Validates the jsonString is a JSON object that has an "id"
  123. # and a "randomNumber" key, and that both keys map to
  124. # integers.
  125. ############################################################
  126. def validateDb(self, jsonString, out, err):
  127. try:
  128. obj = json.loads(jsonString)
  129. # This will error out of the value could not parsed to a
  130. # float (this will work with ints, but it will turn them
  131. # into their float equivalent; i.e. "123" => 123.0)
  132. if type(float(obj["id"])) == float and type(float(obj["randomNumber"])) == float:
  133. return True
  134. except:
  135. err.write(textwrap.dedent("""
  136. -----------------------------------------------------
  137. Error: validateDb raised exception
  138. -----------------------------------------------------
  139. {trace}
  140. """.format( trace=sys.exc_info()[:2])))
  141. return False
  142. ############################################################
  143. # Validates the jsonString is an array with a length of
  144. # 2, that each entry in the array is a JSON object, that
  145. # each object has an "id" and a "randomNumber" key, and that
  146. # both keys map to integers.
  147. ############################################################
  148. def validateQuery(self, jsonString, out, err):
  149. try:
  150. arr = json.loads(jsonString)
  151. if type(float(arr[0]["id"])) == float and type(float(arr[0]["randomNumber"])) == float and type(float(arr[1]["id"])) == float and type(float(arr[1]["randomNumber"])) == float:
  152. return True
  153. except:
  154. err.write(textwrap.dedent("""
  155. -----------------------------------------------------
  156. Error: validateQuery raised exception
  157. -----------------------------------------------------
  158. {trace}
  159. """.format( trace=sys.exc_info()[:2])))
  160. return False
  161. ############################################################
  162. # Parses the given HTML string and asks a FortuneHTMLParser
  163. # whether the parsed string is a valid fortune return.
  164. ############################################################
  165. def validateFortune(self, htmlString, out, err):
  166. try:
  167. parser = FortuneHTMLParser()
  168. parser.feed(htmlString)
  169. return parser.isValidFortune()
  170. except:
  171. err.write(textwrap.dedent("""
  172. -----------------------------------------------------
  173. Error: validateFortune raised exception
  174. -----------------------------------------------------
  175. {trace}
  176. """.format( trace=sys.exc_info()[:2])))
  177. return False
  178. ############################################################
  179. # Validates the jsonString is an array with a length of
  180. # 2, that each entry in the array is a JSON object, that
  181. # each object has an "id" and a "randomNumber" key, and that
  182. # both keys map to integers.
  183. ############################################################
  184. def validateUpdate(self, jsonString, out, err):
  185. try:
  186. arr = json.loads(jsonString)
  187. if type(float(arr[0]["id"])) == float and type(float(arr[0]["randomNumber"])) == float and type(float(arr[1]["id"])) == float and type(float(arr[1]["randomNumber"])) == float:
  188. return True
  189. except:
  190. err.write(textwrap.dedent("""
  191. -----------------------------------------------------
  192. Error: validateUpdate raised exception
  193. -----------------------------------------------------
  194. {trace}
  195. """.format( trace=sys.exc_info()[:2])))
  196. return False
  197. ############################################################
  198. #
  199. ############################################################
  200. def validatePlaintext(self, jsonString, out, err):
  201. try:
  202. return jsonString.lower().strip() == "hello, world!"
  203. except:
  204. err.write(textwrap.dedent("""
  205. -----------------------------------------------------
  206. Error: validatePlaintext raised exception
  207. -----------------------------------------------------
  208. {trace}
  209. """.format( trace=sys.exc_info()[:2])))
  210. return False
  211. ############################################################
  212. # start(benchmarker)
  213. # Start the test using it's setup file
  214. ############################################################
  215. def start(self, out, err):
  216. return self.setup_module.start(self.benchmarker, out, err)
  217. ############################################################
  218. # End start
  219. ############################################################
  220. ############################################################
  221. # stop(benchmarker)
  222. # Stops the test using it's setup file
  223. ############################################################
  224. def stop(self, out, err):
  225. return self.setup_module.stop(out, err)
  226. ############################################################
  227. # End stop
  228. ############################################################
  229. ############################################################
  230. # verify_urls
  231. # Verifys each of the URLs for this test. THis will sinply
  232. # curl the URL and check for it's return status.
  233. # For each url, a flag will be set on this object for whether
  234. # or not it passed
  235. ############################################################
  236. def verify_urls(self, out, err):
  237. # JSON
  238. if self.runTests[JSON]:
  239. out.write( "VERIFYING JSON (" + self.json_url + ") ...\n" )
  240. out.flush()
  241. try:
  242. url = self.benchmarker.generate_url(self.json_url, self.port)
  243. output = self.__curl_url(url, JSON, out, err)
  244. if self.validateJson(output, out, err):
  245. self.json_url_passed = True
  246. else:
  247. self.json_url_passed = False
  248. except (AttributeError, subprocess.CalledProcessError) as e:
  249. self.json_url_passed = False
  250. out.write("VALIDATING JSON ... ")
  251. if self.json_url_passed:
  252. out.write("PASS\n\n")
  253. else:
  254. out.write("FAIL\n\n")
  255. out.flush
  256. # DB
  257. if self.runTests[DB]:
  258. out.write( "VERIFYING DB (" + self.db_url + ") ...\n" )
  259. out.flush()
  260. try:
  261. url = self.benchmarker.generate_url(self.db_url, self.port)
  262. output = self.__curl_url(url, DB, out, err)
  263. if self.validateDb(output, out, err):
  264. self.db_url_passed = True
  265. else:
  266. self.db_url_passed = False
  267. except (AttributeError, subprocess.CalledProcessError) as e:
  268. self.db_url_passed = False
  269. out.write("VALIDATING DB ... ")
  270. if self.db_url_passed:
  271. out.write("PASS\n\n")
  272. else:
  273. out.write("FAIL\n\n")
  274. out.flush
  275. # Query
  276. if self.runTests[QUERY]:
  277. out.write( "VERIFYING QUERY (" + self.query_url + "2) ...\n" )
  278. out.flush()
  279. try:
  280. url = self.benchmarker.generate_url(self.query_url + "2", self.port)
  281. output = self.__curl_url(url, QUERY, out, err)
  282. if self.validateQuery(output, out, err):
  283. self.query_url_passed = True
  284. else:
  285. self.query_url_passed = False
  286. except (AttributeError, subprocess.CalledProcessError) as e:
  287. self.query_url_passed = False
  288. out.write("VALIDATING QUERY ... ")
  289. if self.query_url_passed:
  290. out.write("PASS\n\n")
  291. else:
  292. out.write("FAIL\n\n")
  293. out.flush
  294. # Fortune
  295. if self.runTests[FORTUNE]:
  296. out.write( "VERIFYING FORTUNE (" + self.fortune_url + ") ...\n" )
  297. out.flush()
  298. try:
  299. url = self.benchmarker.generate_url(self.fortune_url, self.port)
  300. output = self.__curl_url(url, FORTUNE, out, err)
  301. if self.validateFortune(output, out, err):
  302. self.fortune_url_passed = True
  303. else:
  304. self.fortune_url_passed = False
  305. except (AttributeError, subprocess.CalledProcessError) as e:
  306. self.fortune_url_passed = False
  307. out.write("VALIDATING FORTUNE ... ")
  308. if self.fortune_url_passed:
  309. out.write("PASS\n\n")
  310. else:
  311. out.write("FAIL\n\n")
  312. out.flush
  313. # Update
  314. if self.runTests[UPDATE]:
  315. out.write( "VERIFYING UPDATE (" + self.update_url + "2) ...\n" )
  316. out.flush()
  317. try:
  318. url = self.benchmarker.generate_url(self.update_url + "2", self.port)
  319. output = self.__curl_url(url, UPDATE, out, err)
  320. if self.validateUpdate(output, out, err):
  321. self.update_url_passed = True
  322. else:
  323. self.update_url_passed = False
  324. except (AttributeError, subprocess.CalledProcessError) as e:
  325. self.update_url_passed = False
  326. out.write("VALIDATING UPDATE ... ")
  327. if self.update_url_passed:
  328. out.write("PASS\n\n")
  329. else:
  330. out.write("FAIL\n\n")
  331. out.flush
  332. # plaintext
  333. if self.runTests[PLAINTEXT]:
  334. out.write( "VERIFYING PLAINTEXT (" + self.plaintext_url + ") ...\n" )
  335. out.flush()
  336. try:
  337. url = self.benchmarker.generate_url(self.plaintext_url, self.port)
  338. output = self.__curl_url(url, PLAINTEXT, out, err)
  339. if self.validatePlaintext(output, out, err):
  340. self.plaintext_url_passed = True
  341. else:
  342. self.plaintext_url_passed = False
  343. except (AttributeError, subprocess.CalledProcessError) as e:
  344. self.plaintext_url_passed = False
  345. out.write("VALIDATING PLAINTEXT ... ")
  346. if self.plaintext_url_passed:
  347. out.write("PASS\n\n")
  348. else:
  349. out.write("FAIL\n\n")
  350. out.flush
  351. ############################################################
  352. # End verify_urls
  353. ############################################################
  354. ############################################################
  355. # contains_type(type)
  356. # true if this test contains an implementation of the given
  357. # test type (json, db, etc.)
  358. ############################################################
  359. def contains_type(self, type):
  360. try:
  361. if type == JSON and self.json_url != None:
  362. return True
  363. if type == DB and self.db_url != None:
  364. return True
  365. if type == QUERY and self.query_url != None:
  366. return True
  367. if type == FORTUNE and self.fortune_url != None:
  368. return True
  369. if type == UPDATE and self.update_url != None:
  370. return True
  371. if type == PLAINTEXT and self.plaintext_url != None:
  372. return True
  373. except AttributeError:
  374. pass
  375. return False
  376. ############################################################
  377. # End stop
  378. ############################################################
  379. ############################################################
  380. # benchmark
  381. # Runs the benchmark for each type of test that it implements
  382. # JSON/DB/Query.
  383. ############################################################
  384. def benchmark(self, out, err):
  385. # JSON
  386. if self.runTests[JSON]:
  387. try:
  388. out.write("BENCHMARKING JSON ... ")
  389. out.flush()
  390. results = None
  391. output_file = self.benchmarker.output_file(self.name, JSON)
  392. if not os.path.exists(output_file):
  393. with open(output_file, 'w'):
  394. # Simply opening the file in write mode should create the empty file.
  395. pass
  396. if self.json_url_passed:
  397. remote_script = self.__generate_concurrency_script(self.json_url, self.port, self.accept_json)
  398. self.__run_benchmark(remote_script, output_file, err)
  399. results = self.__parse_test(JSON)
  400. self.benchmarker.report_results(framework=self, test=JSON, results=results['results'])
  401. out.write( "Complete\n" )
  402. out.flush()
  403. except AttributeError:
  404. pass
  405. # DB
  406. if self.runTests[DB]:
  407. try:
  408. out.write("BENCHMARKING DB ... ")
  409. out.flush()
  410. results = None
  411. output_file = self.benchmarker.output_file(self.name, DB)
  412. if not os.path.exists(output_file):
  413. with open(output_file, 'w'):
  414. # Simply opening the file in write mode should create the empty file.
  415. pass
  416. if self.db_url_passed:
  417. remote_script = self.__generate_concurrency_script(self.db_url, self.port, self.accept_json)
  418. self.__run_benchmark(remote_script, output_file, err)
  419. results = self.__parse_test(DB)
  420. self.benchmarker.report_results(framework=self, test=DB, results=results['results'])
  421. out.write( "Complete\n" )
  422. except AttributeError:
  423. pass
  424. # Query
  425. if self.runTests[QUERY]:
  426. try:
  427. out.write("BENCHMARKING Query ... ")
  428. out.flush()
  429. results = None
  430. output_file = self.benchmarker.output_file(self.name, QUERY)
  431. if not os.path.exists(output_file):
  432. with open(output_file, 'w'):
  433. # Simply opening the file in write mode should create the empty file.
  434. pass
  435. if self.query_url_passed:
  436. remote_script = self.__generate_query_script(self.query_url, self.port, self.accept_json)
  437. self.__run_benchmark(remote_script, output_file, err)
  438. results = self.__parse_test(QUERY)
  439. self.benchmarker.report_results(framework=self, test=QUERY, results=results['results'])
  440. out.write( "Complete\n" )
  441. out.flush()
  442. except AttributeError:
  443. pass
  444. # fortune
  445. if self.runTests[FORTUNE]:
  446. try:
  447. out.write("BENCHMARKING Fortune ... ")
  448. out.flush()
  449. results = None
  450. output_file = self.benchmarker.output_file(self.name, FORTUNE)
  451. if not os.path.exists(output_file):
  452. with open(output_file, 'w'):
  453. # Simply opening the file in write mode should create the empty file.
  454. pass
  455. if self.fortune_url_passed:
  456. remote_script = self.__generate_concurrency_script(self.fortune_url, self.port, self.accept_html)
  457. self.__run_benchmark(remote_script, output_file, err)
  458. results = self.__parse_test(FORTUNE)
  459. self.benchmarker.report_results(framework=self, test=FORTUNE, results=results['results'])
  460. out.write( "Complete\n" )
  461. out.flush()
  462. except AttributeError:
  463. pass
  464. # update
  465. if self.runTests[UPDATE]:
  466. try:
  467. out.write("BENCHMARKING Update ... ")
  468. out.flush()
  469. results = None
  470. output_file = self.benchmarker.output_file(self.name, UPDATE)
  471. if not os.path.exists(output_file):
  472. with open(output_file, 'w'):
  473. # Simply opening the file in write mode should create the empty file.
  474. pass
  475. if self.update_url_passed:
  476. remote_script = self.__generate_query_script(self.update_url, self.port, self.accept_json)
  477. self.__run_benchmark(remote_script, output_file, err)
  478. results = self.__parse_test(UPDATE)
  479. self.benchmarker.report_results(framework=self, test=UPDATE, results=results['results'])
  480. out.write( "Complete\n" )
  481. out.flush()
  482. except AttributeError:
  483. pass
  484. # plaintext
  485. if self.runTests[PLAINTEXT]:
  486. try:
  487. out.write("BENCHMARKING Plaintext ... ")
  488. out.flush()
  489. results = None
  490. output_file = self.benchmarker.output_file(self.name, PLAINTEXT)
  491. if not os.path.exists(output_file):
  492. with open(output_file, 'w'):
  493. # Simply opening the file in write mode should create the empty file.
  494. pass
  495. if self.plaintext_url_passed:
  496. remote_script = self.__generate_concurrency_script(self.plaintext_url, self.port, self.accept_plaintext, wrk_command="wrk-pipeline", intervals=[256,1024,4096,16384], pipeline="--pipeline 16")
  497. self.__run_benchmark(remote_script, output_file, err)
  498. results = self.__parse_test(PLAINTEXT)
  499. self.benchmarker.report_results(framework=self, test=PLAINTEXT, results=results['results'])
  500. out.write( "Complete\n" )
  501. out.flush()
  502. except AttributeError:
  503. traceback.print_exc()
  504. pass
  505. ############################################################
  506. # End benchmark
  507. ############################################################
  508. ############################################################
  509. # parse_all
  510. # Method meant to be run for a given timestamp
  511. ############################################################
  512. def parse_all(self):
  513. # JSON
  514. if os.path.exists(self.benchmarker.get_output_file(self.name, JSON)):
  515. results = self.__parse_test(JSON)
  516. self.benchmarker.report_results(framework=self, test=JSON, results=results['results'])
  517. # DB
  518. if os.path.exists(self.benchmarker.get_output_file(self.name, DB)):
  519. results = self.__parse_test(DB)
  520. self.benchmarker.report_results(framework=self, test=DB, results=results['results'])
  521. # Query
  522. if os.path.exists(self.benchmarker.get_output_file(self.name, QUERY)):
  523. results = self.__parse_test(QUERY)
  524. self.benchmarker.report_results(framework=self, test=QUERY, results=results['results'])
  525. # Fortune
  526. if os.path.exists(self.benchmarker.get_output_file(self.name, FORTUNE)):
  527. results = self.__parse_test(FORTUNE)
  528. self.benchmarker.report_results(framework=self, test=FORTUNE, results=results['results'])
  529. # Update
  530. if os.path.exists(self.benchmarker.get_output_file(self.name, UPDATE)):
  531. results = self.__parse_test(UPDATE)
  532. self.benchmarker.report_results(framework=self, test=UPDATE, results=results['results'])
  533. # Plaintext
  534. if os.path.exists(self.benchmarker.get_output_file(self.name, PLAINTEXT)):
  535. results = self.__parse_test(PLAINTEXT)
  536. self.benchmarker.report_results(framework=self, test=PLAINTEXT, results=results['results'])
  537. ############################################################
  538. # End parse_all
  539. ############################################################
  540. ############################################################
  541. # __parse_test(test_type)
  542. ############################################################
  543. def __parse_test(self, test_type):
  544. try:
  545. results = dict()
  546. results['results'] = []
  547. if os.path.exists(self.benchmarker.get_output_file(self.name, test_type)):
  548. with open(self.benchmarker.output_file(self.name, test_type)) as raw_data:
  549. is_warmup = True
  550. rawData = None
  551. for line in raw_data:
  552. if "Queries:" in line or "Concurrency:" in line:
  553. is_warmup = False
  554. rawData = None
  555. continue
  556. if "Warmup" in line or "Primer" in line:
  557. is_warmup = True
  558. continue
  559. if not is_warmup:
  560. if rawData == None:
  561. rawData = dict()
  562. results['results'].append(rawData)
  563. #if "Requests/sec:" in line:
  564. # m = re.search("Requests/sec:\s+([0-9]+)", line)
  565. # rawData['reportedResults'] = m.group(1)
  566. # search for weighttp data such as succeeded and failed.
  567. if "Latency" in line:
  568. m = re.findall("([0-9]+\.*[0-9]*[us|ms|s|m|%]+)", line)
  569. if len(m) == 4:
  570. rawData['latencyAvg'] = m[0]
  571. rawData['latencyStdev'] = m[1]
  572. rawData['latencyMax'] = m[2]
  573. # rawData['latencyStdevPercent'] = m[3]
  574. #if "Req/Sec" in line:
  575. # m = re.findall("([0-9]+\.*[0-9]*[k|%]*)", line)
  576. # if len(m) == 4:
  577. # rawData['requestsAvg'] = m[0]
  578. # rawData['requestsStdev'] = m[1]
  579. # rawData['requestsMax'] = m[2]
  580. # rawData['requestsStdevPercent'] = m[3]
  581. #if "requests in" in line:
  582. # m = re.search("requests in ([0-9]+\.*[0-9]*[ms|s|m|h]+)", line)
  583. # if m != None:
  584. # # parse out the raw time, which may be in minutes or seconds
  585. # raw_time = m.group(1)
  586. # if "ms" in raw_time:
  587. # rawData['total_time'] = float(raw_time[:len(raw_time)-2]) / 1000.0
  588. # elif "s" in raw_time:
  589. # rawData['total_time'] = float(raw_time[:len(raw_time)-1])
  590. # elif "m" in raw_time:
  591. # rawData['total_time'] = float(raw_time[:len(raw_time)-1]) * 60.0
  592. # elif "h" in raw_time:
  593. # rawData['total_time'] = float(raw_time[:len(raw_time)-1]) * 3600.0
  594. if "requests in" in line:
  595. m = re.search("([0-9]+) requests in", line)
  596. if m != None:
  597. rawData['totalRequests'] = int(m.group(1))
  598. if "Socket errors" in line:
  599. if "connect" in line:
  600. m = re.search("connect ([0-9]+)", line)
  601. rawData['connect'] = int(m.group(1))
  602. if "read" in line:
  603. m = re.search("read ([0-9]+)", line)
  604. rawData['read'] = int(m.group(1))
  605. if "write" in line:
  606. m = re.search("write ([0-9]+)", line)
  607. rawData['write'] = int(m.group(1))
  608. if "timeout" in line:
  609. m = re.search("timeout ([0-9]+)", line)
  610. rawData['timeout'] = int(m.group(1))
  611. if "Non-2xx" in line:
  612. m = re.search("Non-2xx or 3xx responses: ([0-9]+)", line)
  613. if m != None:
  614. rawData['5xx'] = int(m.group(1))
  615. return results
  616. except IOError:
  617. return None
  618. ############################################################
  619. # End benchmark
  620. ############################################################
  621. ##########################################################################################
  622. # Private Methods
  623. ##########################################################################################
  624. ############################################################
  625. # __run_benchmark(script, output_file)
  626. # Runs a single benchmark using the script which is a bash
  627. # template that uses weighttp to run the test. All the results
  628. # outputed to the output_file.
  629. ############################################################
  630. def __run_benchmark(self, script, output_file, err):
  631. with open(output_file, 'w') as raw_file:
  632. p = subprocess.Popen(self.benchmarker.client_ssh_string.split(" "), stdin=subprocess.PIPE, stdout=raw_file, stderr=err)
  633. p.communicate(script)
  634. err.flush()
  635. ############################################################
  636. # End __run_benchmark
  637. ############################################################
  638. ############################################################
  639. # __generate_concurrency_script(url, port)
  640. # Generates the string containing the bash script that will
  641. # be run on the client to benchmark a single test. This
  642. # specifically works for the variable concurrency tests (JSON
  643. # and DB)
  644. ############################################################
  645. def __generate_concurrency_script(self, url, port, accept_header, wrk_command="wrk", intervals=[], pipeline=""):
  646. if len(intervals) == 0:
  647. intervals = self.benchmarker.concurrency_levels
  648. headers = self.__get_request_headers(accept_header)
  649. return self.concurrency_template.format(max_concurrency=self.benchmarker.max_concurrency,
  650. max_threads=self.benchmarker.max_threads, name=self.name, duration=self.benchmarker.duration,
  651. interval=" ".join("{}".format(item) for item in intervals),
  652. server_host=self.benchmarker.server_host, port=port, url=url, headers=headers, wrk=wrk_command,
  653. pipeline=pipeline)
  654. ############################################################
  655. # End __generate_concurrency_script
  656. ############################################################
  657. ############################################################
  658. # __generate_query_script(url, port)
  659. # Generates the string containing the bash script that will
  660. # be run on the client to benchmark a single test. This
  661. # specifically works for the variable query tests (Query)
  662. ############################################################
  663. def __generate_query_script(self, url, port, accept_header):
  664. headers = self.__get_request_headers(accept_header)
  665. return self.query_template.format(max_concurrency=self.benchmarker.max_concurrency,
  666. max_threads=self.benchmarker.max_threads, name=self.name, duration=self.benchmarker.duration,
  667. interval=" ".join("{}".format(item) for item in self.benchmarker.query_intervals),
  668. server_host=self.benchmarker.server_host, port=port, url=url, headers=headers)
  669. ############################################################
  670. # End __generate_query_script
  671. ############################################################
  672. ############################################################
  673. # __get_request_headers(accept_header)
  674. # Generates the complete HTTP header string
  675. ############################################################
  676. def __get_request_headers(self, accept_header):
  677. return self.headers_template.format(accept=accept_header)
  678. ############################################################
  679. # End __format_request_headers
  680. ############################################################
  681. ############################################################
  682. # __curl_url
  683. # Dump HTTP response and headers. Throw exception if there
  684. # is an HTTP error.
  685. ############################################################
  686. def __curl_url(self, url, testType, out, err):
  687. # Use -i to output response with headers.
  688. # Don't use -f so that the HTTP response code is ignored.
  689. # Use --stderr - to redirect stderr to stdout so we get
  690. # error output for sure in stdout.
  691. # Use -sS to hide progress bar, but show errors.
  692. subprocess.check_call(["curl", "-i", "-sS", url], stderr=err, stdout=out)
  693. out.flush()
  694. err.flush()
  695. # HTTP output may not end in a newline, so add that here.
  696. out.write( "\n" )
  697. out.flush()
  698. # We need to get the respond body from the curl and return it.
  699. p = subprocess.Popen(["curl", "-s", url], stdout=subprocess.PIPE)
  700. output = p.communicate()
  701. # In the curl invocation above we could not use -f because
  702. # then the HTTP response would not be output, so use -f in
  703. # an additional invocation so that if there is an HTTP error,
  704. # subprocess.CalledProcessError will be thrown. Note that this
  705. # uses check_output() instead of check_call() so that we can
  706. # ignore the HTTP response because we already output that in
  707. # the first curl invocation.
  708. subprocess.check_output(["curl", "-fsS", url], stderr=err)
  709. err.flush()
  710. # HTTP output may not end in a newline, so add that here.
  711. out.write( "\n" )
  712. out.flush()
  713. if output:
  714. # We have the response body - return it
  715. return output[0]
  716. ##############################################################
  717. # End __curl_url
  718. ##############################################################
  719. ##########################################################################################
  720. # Constructor
  721. ##########################################################################################
  722. def __init__(self, name, directory, benchmarker, runTests, args):
  723. self.name = name
  724. self.directory = directory
  725. self.benchmarker = benchmarker
  726. self.runTests = runTests
  727. self.__dict__.update(args)
  728. # ensure directory has __init__.py file so that we can use it as a Python package
  729. if not os.path.exists(os.path.join(directory, "__init__.py")):
  730. open(os.path.join(directory, "__init__.py"), 'w').close()
  731. self.setup_module = setup_module = importlib.import_module(directory + '.' + self.setup_file)
  732. ############################################################
  733. # End __init__
  734. ############################################################
  735. ############################################################
  736. # End FrameworkTest
  737. ############################################################
  738. ##########################################################################################
  739. # Static methods
  740. ##########################################################################################
  741. ##############################################################
  742. # parse_config(config, directory, benchmarker)
  743. # parses a config file and returns a list of FrameworkTest
  744. # objects based on that config file.
  745. ##############################################################
  746. def parse_config(config, directory, benchmarker):
  747. tests = []
  748. # The config object can specify multiple tests, we neep to loop
  749. # over them and parse them out
  750. for test in config['tests']:
  751. for key, value in test.iteritems():
  752. test_name = config['framework']
  753. runTests = dict()
  754. if (benchmarker.type == "all" or benchmarker.type == JSON) and value.get("json_url", False):
  755. runTests["json"] = True
  756. if (benchmarker.type == "all" or benchmarker.type == DB) and value.get("db_url", False):
  757. runTests["db"] = True
  758. if (benchmarker.type == "all" or benchmarker.type == QUERY) and value.get("query_url", False):
  759. runTests["query"] = True
  760. if (benchmarker.type == "all" or benchmarker.type == FORTUNE) and value.get("fortune_url", False):
  761. runTests["fortune"] = True
  762. if (benchmarker.type == "all" or benchmarker.type == UPDATE) and value.get("update_url", False):
  763. runTests["update"] = True
  764. if (benchmarker.type == "all" or benchmarker.type == PLAINTEXT) and value.get("plaintext_url", False):
  765. runTests["plaintext"] = True
  766. # if the test uses the 'defualt' keywork, then we don't
  767. # append anything to it's name. All configs should only have 1 default
  768. if key != 'default':
  769. # we need to use the key in the test_name
  770. test_name = test_name + "-" + key
  771. tests.append(FrameworkTest(test_name, directory, benchmarker, runTests, value))
  772. return tests
  773. ##############################################################
  774. # End parse_config
  775. ##############################################################