framework_test.py 32 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773
  1. import importlib
  2. import os
  3. import subprocess
  4. import time
  5. import re
  6. import pprint
  7. import sys
  8. import traceback
  9. import json
  10. class FrameworkTest:
  11. ##########################################################################################
  12. # Class variables
  13. ##########################################################################################
  14. headers_template = "-H 'Host: localhost' -H '{accept}' -H 'Connection: keep-alive'"
  15. headers_full_template = "-H 'Host: localhost' -H '{accept}' -H 'Accept-Language: en-US,en;q=0.5' -H 'User-Agent: Mozilla/5.0 (X11; Linux x86_64) Gecko/20130501 Firefox/30.0 AppleWebKit/600.00 Chrome/30.0.0000.0 Trident/10.0 Safari/600.00' -H 'Cookie: uid=12345678901234567890; __utma=1.1234567890.1234567890.1234567890.1234567890.12; wd=2560x1600' -H 'Connection: keep-alive'"
  16. accept_json = "Accept: application/json,text/html;q=0.9,application/xhtml+xml;q=0.9,application/xml;q=0.8,*/*;q=0.7"
  17. accept_html = "Accept: text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8"
  18. accept_plaintext = "Accept: text/plain,text/html;q=0.9,application/xhtml+xml;q=0.9,application/xml;q=0.8,*/*;q=0.7"
  19. concurrency_template = """
  20. echo ""
  21. echo "---------------------------------------------------------"
  22. echo " Running Primer {name}"
  23. echo " {wrk} {headers} -d 5 -c 8 -t 8 \"http://{server_host}:{port}{url}\""
  24. echo "---------------------------------------------------------"
  25. echo ""
  26. {wrk} {headers} -d 5 -c 8 -t 8 "http://{server_host}:{port}{url}"
  27. sleep 5
  28. echo ""
  29. echo "---------------------------------------------------------"
  30. echo " Running Warmup {name}"
  31. echo " {wrk} {headers} -d {duration} -c {max_concurrency} -t {max_threads} \"http://{server_host}:{port}{url}\""
  32. echo "---------------------------------------------------------"
  33. echo ""
  34. {wrk} {headers} -d {duration} -c {max_concurrency} -t {max_threads} "http://{server_host}:{port}{url}"
  35. sleep 5
  36. for c in {interval}
  37. do
  38. echo ""
  39. echo "---------------------------------------------------------"
  40. echo " Concurrency: $c for {name}"
  41. echo " {wrk} {headers} {pipeline} -d {duration} -c $c -t $(($c>{max_threads}?{max_threads}:$c)) \"http://{server_host}:{port}{url}\""
  42. echo "---------------------------------------------------------"
  43. echo ""
  44. {wrk} {headers} {pipeline} -d {duration} -c "$c" -t "$(($c>{max_threads}?{max_threads}:$c))" http://{server_host}:{port}{url}
  45. sleep 2
  46. done
  47. """
  48. query_template = """
  49. echo ""
  50. echo "---------------------------------------------------------"
  51. echo " Running Primer {name}"
  52. echo " wrk {headers} -d 5 -c 8 -t 8 \"http://{server_host}:{port}{url}2\""
  53. echo "---------------------------------------------------------"
  54. echo ""
  55. wrk {headers} -d 5 -c 8 -t 8 "http://{server_host}:{port}{url}2"
  56. sleep 5
  57. echo ""
  58. echo "---------------------------------------------------------"
  59. echo " Running Warmup {name}"
  60. echo " wrk {headers} -d {duration} -c {max_concurrency} -t {max_threads} \"http://{server_host}:{port}{url}2\""
  61. echo "---------------------------------------------------------"
  62. echo ""
  63. wrk {headers} -d {duration} -c {max_concurrency} -t {max_threads} "http://{server_host}:{port}{url}2"
  64. sleep 5
  65. for c in {interval}
  66. do
  67. echo ""
  68. echo "---------------------------------------------------------"
  69. echo " Queries: $c for {name}"
  70. echo " wrk {headers} -d {duration} -c {max_concurrency} -t {max_threads} \"http://{server_host}:{port}{url}$c\""
  71. echo "---------------------------------------------------------"
  72. echo ""
  73. wrk {headers} -d {duration} -c {max_concurrency} -t {max_threads} "http://{server_host}:{port}{url}$c"
  74. sleep 2
  75. done
  76. """
  77. language = None
  78. platform = None
  79. webserver = None
  80. classification = None
  81. database = None
  82. approach = None
  83. orm = None
  84. framework = None
  85. os = None
  86. database_os = None
  87. display_name = None
  88. notes = None
  89. versus = None
  90. ############################################################
  91. # Test Variables
  92. ############################################################
  93. JSON = "json"
  94. DB = "db"
  95. QUERY = "query"
  96. FORTUNE = "fortune"
  97. UPDATE = "update"
  98. PLAINTEXT = "plaintext"
  99. ##########################################################################################
  100. # Public Methods
  101. ##########################################################################################
  102. ############################################################
  103. # Validates the jsonString is a JSON object with a 'message'
  104. # key with the value "hello, world!" (case-insensitive).
  105. ############################################################
  106. def validateJson(self, jsonString):
  107. obj = json.loads(jsonString)
  108. if not obj:
  109. return False
  110. if not obj["message"]:
  111. return False
  112. if not obj["message"].lower() == "hello, world!":
  113. return False
  114. return True
  115. ############################################################
  116. # Validates the jsonString is a JSON object that has an "id"
  117. # and a "randomNumber" key, and that both keys map to
  118. # integers.
  119. ############################################################
  120. def validateDb(self, jsonString):
  121. obj = json.loads(jsonString)
  122. if not obj:
  123. return False
  124. if not obj["id"] or type(obj["id"]) != int:
  125. return False
  126. if not obj["randomNumber"] or type(obj["randomNumber"]) != int:
  127. return False
  128. return True
  129. ############################################################
  130. # Validates the jsonString is an array with a length of
  131. # 2, that each entry in the array is a JSON object, that
  132. # each object has an "id" and a "randomNumber" key, and that
  133. # both keys map to integers.
  134. ############################################################
  135. def validateQuery(self, jsonString):
  136. arr = json.loads(jsonString)
  137. if not arr or len(arr) != 2 or type(arr[0]) != dict or type(arr[1]) != dict:
  138. return False
  139. if not arr[0]["id"] or type(arr[0]["id"]) != int:
  140. return False
  141. if not arr[0]["randomNumber"] or type(arr[0]["randomNumber"]) != int:
  142. return False
  143. if not arr[1]["id"] or type(arr[1]["id"]) != int:
  144. return False
  145. if not arr[1]["randomNumber"] or type(arr[1]["randomNumber"]) != int:
  146. return False
  147. return True
  148. ############################################################
  149. #
  150. ############################################################
  151. def validateFortune(self, htmlString):
  152. fortuneValidator = FortuneHTMLParser(htmlString)
  153. return fortuneValidator.isValidFortune()
  154. ############################################################
  155. # Validates the jsonString is an array with a length of
  156. # 2, that each entry in the array is a JSON object, that
  157. # each object has an "id" and a "randomNumber" key, and that
  158. # both keys map to integers.
  159. ############################################################
  160. def validateUpdate(self, jsonString):
  161. arr = json.loads(jsonString)
  162. if not arr or len(arr) != 2 or type(arr[0]) != dict or type(arr[1]) != dict:
  163. return False
  164. if not arr[0]["id"] or type(arr[0]["id"]) != int:
  165. return False
  166. if not arr[0]["randomNumber"] or type(arr[0]["randomNumber"]) != int:
  167. return False
  168. if not arr[1]["id"] or type(arr[1]["id"]) != int:
  169. return False
  170. if not arr[1]["randomNumber"] or type(arr[1]["randomNumber"]) != int:
  171. return False
  172. return True
  173. ############################################################
  174. #
  175. ############################################################
  176. def validatePlaintext(self, jsonString):
  177. return jsonString.lower().strip() == "hello, world!"
  178. ############################################################
  179. # start(benchmarker)
  180. # Start the test using it's setup file
  181. ############################################################
  182. def start(self, out, err):
  183. return self.setup_module.start(self.benchmarker, out, err)
  184. ############################################################
  185. # End start
  186. ############################################################
  187. ############################################################
  188. # stop(benchmarker)
  189. # Stops the test using it's setup file
  190. ############################################################
  191. def stop(self, out, err):
  192. return self.setup_module.stop(out, err)
  193. ############################################################
  194. # End stop
  195. ############################################################
  196. ############################################################
  197. # verify_urls
  198. # Verifys each of the URLs for this test. THis will sinply
  199. # curl the URL and check for it's return status.
  200. # For each url, a flag will be set on this object for whether
  201. # or not it passed
  202. ############################################################
  203. def verify_urls(self, out, err):
  204. # JSON
  205. if self.runTests[self.JSON]:
  206. try:
  207. out.write( "VERIFYING JSON (" + self.json_url + ") ...\n" )
  208. out.flush()
  209. url = self.benchmarker.generate_url(self.json_url, self.port)
  210. output = self.__curl_url(url, self.JSON, out, err)
  211. if self.validateJson(output):
  212. self.json_url_passed = True
  213. else:
  214. self.json_url_passed = False
  215. except (AttributeError, subprocess.CalledProcessError) as e:
  216. self.json_url_passed = False
  217. # DB
  218. if self.runTests[self.DB]:
  219. try:
  220. out.write( "VERIFYING DB (" + self.db_url + ") ...\n" )
  221. out.flush()
  222. url = self.benchmarker.generate_url(self.db_url, self.port)
  223. output = self.__curl_url(url, self.DB, out, err)
  224. if self.validateDb(output):
  225. self.db_url_passed = True
  226. else:
  227. self.db_url_passed = False
  228. except (AttributeError, subprocess.CalledProcessError) as e:
  229. self.db_url_passed = False
  230. # Query
  231. if self.runTests[self.QUERY]:
  232. try:
  233. out.write( "VERIFYING Query (" + self.query_url + "2) ...\n" )
  234. out.flush()
  235. url = self.benchmarker.generate_url(self.query_url + "2", self.port)
  236. output = self.__curl_url(url, self.QUERY, out, err)
  237. if self.validateQuery(output):
  238. self.query_url_passed = True
  239. else:
  240. self.query_url_passed = False
  241. except (AttributeError, subprocess.CalledProcessError) as e:
  242. self.query_url_passed = False
  243. # Fortune
  244. if self.runTests[self.FORTUNE]:
  245. try:
  246. out.write( "VERIFYING Fortune (" + self.fortune_url + ") ...\n" )
  247. out.flush()
  248. url = self.benchmarker.generate_url(self.fortune_url, self.port)
  249. output = self.__curl_url(url, self.FORTUNE, out, err)
  250. if self.validateFortune(output):
  251. self.fortune_url_passed = True
  252. else:
  253. self.fortune_url_passed = False
  254. except (AttributeError, subprocess.CalledProcessError) as e:
  255. self.fortune_url_passed = False
  256. # Update
  257. if self.runTests[self.UPDATE]:
  258. try:
  259. out.write( "VERIFYING Update (" + self.update_url + "2) ...\n" )
  260. out.flush()
  261. url = self.benchmarker.generate_url(self.update_url + "2", self.port)
  262. output = self.__curl_url(url, self.UPDATE, out, err)
  263. if self.validateUpdate(output):
  264. self.update_url_passed = True
  265. else:
  266. self.update_url_passed = False
  267. except (AttributeError, subprocess.CalledProcessError) as e:
  268. self.update_url_passed = False
  269. # plaintext
  270. if self.runTests[self.PLAINTEXT]:
  271. try:
  272. out.write( "VERIFYING Plaintext (" + self.plaintext_url + ") ...\n" )
  273. out.flush()
  274. url = self.benchmarker.generate_url(self.plaintext_url, self.port)
  275. output = self.__curl_url(url, self.PLAINTEXT, out, err)
  276. if self.validatePlaintext(output):
  277. self.plaintext_url_passed = True
  278. else:
  279. self.plaintext_url_passed = False
  280. except (AttributeError, subprocess.CalledProcessError) as e:
  281. self.plaintext_url_passed = False
  282. ############################################################
  283. # End verify_urls
  284. ############################################################
  285. ############################################################
  286. # contains_type(type)
  287. # true if this test contains an implementation of the given
  288. # test type (json, db, etc.)
  289. ############################################################
  290. def contains_type(self, type):
  291. try:
  292. if type == self.JSON and self.json_url != None:
  293. return True
  294. if type == self.DB and self.db_url != None:
  295. return True
  296. if type == self.QUERY and self.query_url != None:
  297. return True
  298. if type == self.FORTUNE and self.fortune_url != None:
  299. return True
  300. if type == self.UPDATE and self.update_url != None:
  301. return True
  302. if type == self.PLAINTEXT and self.plaintext_url != None:
  303. return True
  304. except AttributeError:
  305. pass
  306. return False
  307. ############################################################
  308. # End stop
  309. ############################################################
  310. ############################################################
  311. # benchmark
  312. # Runs the benchmark for each type of test that it implements
  313. # JSON/DB/Query.
  314. ############################################################
  315. def benchmark(self, out, err):
  316. # JSON
  317. if self.runTests[self.JSON]:
  318. try:
  319. if self.benchmarker.type == "all" or self.benchmarker.type == self.JSON:
  320. out.write("BENCHMARKING JSON ... ")
  321. out.flush()
  322. results = None
  323. if self.json_url_passed:
  324. remote_script = self.__generate_concurrency_script(self.json_url, self.port, self.accept_json)
  325. self.__run_benchmark(remote_script, self.benchmarker.output_file(self.name, self.JSON), err)
  326. results = self.__parse_test(self.JSON)
  327. else:
  328. results = dict()
  329. results['results'] = []
  330. self.benchmarker.report_results(framework=self, test=self.JSON, results=results['results'], passed=self.json_url_passed)
  331. out.write( "Complete\n" )
  332. out.flush()
  333. except AttributeError:
  334. pass
  335. # DB
  336. if self.runTests[self.DB]:
  337. try:
  338. if self.benchmarker.type == "all" or self.benchmarker.type == self.DB:
  339. out.write("BENCHMARKING DB ... ")
  340. out.flush()
  341. results = None
  342. if self.db_url_passed:
  343. remote_script = self.__generate_concurrency_script(self.db_url, self.port, self.accept_json)
  344. self.__run_benchmark(remote_script, self.benchmarker.output_file(self.name, self.DB), err)
  345. results = self.__parse_test(self.DB)
  346. else:
  347. results = dict()
  348. results['results'] = []
  349. self.benchmarker.report_results(framework=self, test=self.DB, results=results['results'], passed=self.db_url_passed)
  350. out.write( "Complete\n" )
  351. except AttributeError:
  352. pass
  353. # Query
  354. if self.runTests[self.QUERY]:
  355. try:
  356. if self.benchmarker.type == "all" or self.benchmarker.type == self.QUERY:
  357. out.write("BENCHMARKING Query ... ")
  358. out.flush()
  359. results = None
  360. if self.query_url_passed:
  361. remote_script = self.__generate_query_script(self.query_url, self.port, self.accept_json)
  362. self.__run_benchmark(remote_script, self.benchmarker.output_file(self.name, self.QUERY), err)
  363. results = self.__parse_test(self.QUERY)
  364. else:
  365. results = dict()
  366. results['results'] = []
  367. self.benchmarker.report_results(framework=self, test=self.QUERY, results=results['results'], passed=self.query_url_passed)
  368. out.write( "Complete\n" )
  369. out.flush()
  370. except AttributeError:
  371. pass
  372. # fortune
  373. if self.runTests[self.FORTUNE]:
  374. try:
  375. if self.benchmarker.type == "all" or self.benchmarker.type == self.FORTUNE:
  376. out.write("BENCHMARKING Fortune ... ")
  377. out.flush()
  378. results = None
  379. if self.fortune_url_passed:
  380. remote_script = self.__generate_concurrency_script(self.fortune_url, self.port, self.accept_html)
  381. self.__run_benchmark(remote_script, self.benchmarker.output_file(self.name, self.FORTUNE), err)
  382. results = self.__parse_test(self.FORTUNE)
  383. else:
  384. results = dict()
  385. results['results'] = []
  386. self.benchmarker.report_results(framework=self, test=self.FORTUNE, results=results['results'], passed=self.fortune_url_passed)
  387. out.write( "Complete\n" )
  388. out.flush()
  389. except AttributeError:
  390. pass
  391. # update
  392. if self.runTests[self.UPDATE]:
  393. try:
  394. if self.benchmarker.type == "all" or self.benchmarker.type == self.UPDATE:
  395. out.write("BENCHMARKING Update ... ")
  396. out.flush()
  397. results = None
  398. if self.update_url_passed:
  399. remote_script = self.__generate_query_script(self.update_url, self.port, self.accept_json)
  400. self.__run_benchmark(remote_script, self.benchmarker.output_file(self.name, self.UPDATE), err)
  401. results = self.__parse_test(self.UPDATE)
  402. else:
  403. results = dict()
  404. results['results'] = []
  405. self.benchmarker.report_results(framework=self, test=self.UPDATE, results=results['results'], passed=self.update_url_passed)
  406. out.write( "Complete\n" )
  407. out.flush()
  408. except AttributeError:
  409. pass
  410. # plaintext
  411. if self.runTests[self.PLAINTEXT]:
  412. try:
  413. if self.benchmarker.type == "all" or self.benchmarker.type == self.PLAINTEXT:
  414. out.write("BENCHMARKING Plaintext ... ")
  415. out.flush()
  416. results = None
  417. if self.plaintext_url_passed:
  418. remote_script = self.__generate_concurrency_script(self.plaintext_url, self.port, self.accept_plaintext, wrk_command="wrk-pipeline", intervals=[256,1024,4096,16384], pipeline="--pipeline 16")
  419. self.__run_benchmark(remote_script, self.benchmarker.output_file(self.name, self.PLAINTEXT), err)
  420. results = self.__parse_test(self.PLAINTEXT)
  421. else:
  422. results = dict()
  423. results['results'] = []
  424. self.benchmarker.report_results(framework=self, test=self.PLAINTEXT, results=results['results'], passed=self.plaintext_url_passed)
  425. out.write( "Complete\n" )
  426. out.flush()
  427. except AttributeError:
  428. traceback.print_exc()
  429. pass
  430. ############################################################
  431. # End benchmark
  432. ############################################################
  433. ############################################################
  434. # parse_all
  435. # Method meant to be run for a given timestamp
  436. ############################################################
  437. def parse_all(self):
  438. # JSON
  439. if os.path.exists(self.benchmarker.output_file(self.name, self.JSON)):
  440. results = self.__parse_test(self.JSON)
  441. self.benchmarker.report_results(framework=self, test=self.JSON, results=results['results'])
  442. # DB
  443. if os.path.exists(self.benchmarker.output_file(self.name, self.DB)):
  444. results = self.__parse_test(self.DB)
  445. self.benchmarker.report_results(framework=self, test=self.DB, results=results['results'])
  446. # Query
  447. if os.path.exists(self.benchmarker.output_file(self.name, self.QUERY)):
  448. results = self.__parse_test(self.QUERY)
  449. self.benchmarker.report_results(framework=self, test=self.QUERY, results=results['results'])
  450. # Fortune
  451. if os.path.exists(self.benchmarker.output_file(self.name, self.FORTUNE)):
  452. results = self.__parse_test(self.FORTUNE)
  453. self.benchmarker.report_results(framework=self, test=self.FORTUNE, results=results['results'])
  454. # Update
  455. if os.path.exists(self.benchmarker.output_file(self.name, self.UPDATE)):
  456. results = self.__parse_test(self.UPDATE)
  457. self.benchmarker.report_results(framework=self, test=self.UPDATE, results=results['results'])
  458. # Plaintext
  459. if os.path.exists(self.benchmarker.output_file(self.name, self.PLAINTEXT)):
  460. results = self.__parse_test(self.PLAINTEXT)
  461. self.benchmarker.report_results(framework=self, test=self.PLAINTEXT, results=results['results'])
  462. ############################################################
  463. # End parse_all
  464. ############################################################
  465. ############################################################
  466. # __parse_test(test_type)
  467. ############################################################
  468. def __parse_test(self, test_type):
  469. try:
  470. results = dict()
  471. results['results'] = []
  472. with open(self.benchmarker.output_file(self.name, test_type)) as raw_data:
  473. is_warmup = True
  474. rawData = None
  475. for line in raw_data:
  476. if "Queries:" in line or "Concurrency:" in line:
  477. is_warmup = False
  478. rawData = None
  479. continue
  480. if "Warmup" in line or "Primer" in line:
  481. is_warmup = True
  482. continue
  483. if not is_warmup:
  484. if rawData == None:
  485. rawData = dict()
  486. results['results'].append(rawData)
  487. #if "Requests/sec:" in line:
  488. # m = re.search("Requests/sec:\s+([0-9]+)", line)
  489. # rawData['reportedResults'] = m.group(1)
  490. # search for weighttp data such as succeeded and failed.
  491. if "Latency" in line:
  492. m = re.findall("([0-9]+\.*[0-9]*[us|ms|s|m|%]+)", line)
  493. if len(m) == 4:
  494. rawData['latencyAvg'] = m[0]
  495. rawData['latencyStdev'] = m[1]
  496. rawData['latencyMax'] = m[2]
  497. # rawData['latencyStdevPercent'] = m[3]
  498. #if "Req/Sec" in line:
  499. # m = re.findall("([0-9]+\.*[0-9]*[k|%]*)", line)
  500. # if len(m) == 4:
  501. # rawData['requestsAvg'] = m[0]
  502. # rawData['requestsStdev'] = m[1]
  503. # rawData['requestsMax'] = m[2]
  504. # rawData['requestsStdevPercent'] = m[3]
  505. #if "requests in" in line:
  506. # m = re.search("requests in ([0-9]+\.*[0-9]*[ms|s|m|h]+)", line)
  507. # if m != None:
  508. # # parse out the raw time, which may be in minutes or seconds
  509. # raw_time = m.group(1)
  510. # if "ms" in raw_time:
  511. # rawData['total_time'] = float(raw_time[:len(raw_time)-2]) / 1000.0
  512. # elif "s" in raw_time:
  513. # rawData['total_time'] = float(raw_time[:len(raw_time)-1])
  514. # elif "m" in raw_time:
  515. # rawData['total_time'] = float(raw_time[:len(raw_time)-1]) * 60.0
  516. # elif "h" in raw_time:
  517. # rawData['total_time'] = float(raw_time[:len(raw_time)-1]) * 3600.0
  518. if "requests in" in line:
  519. m = re.search("([0-9]+) requests in", line)
  520. if m != None:
  521. rawData['totalRequests'] = int(m.group(1))
  522. if "Socket errors" in line:
  523. if "connect" in line:
  524. m = re.search("connect ([0-9]+)", line)
  525. rawData['connect'] = int(m.group(1))
  526. if "read" in line:
  527. m = re.search("read ([0-9]+)", line)
  528. rawData['read'] = int(m.group(1))
  529. if "write" in line:
  530. m = re.search("write ([0-9]+)", line)
  531. rawData['write'] = int(m.group(1))
  532. if "timeout" in line:
  533. m = re.search("timeout ([0-9]+)", line)
  534. rawData['timeout'] = int(m.group(1))
  535. if "Non-2xx" in line:
  536. m = re.search("Non-2xx or 3xx responses: ([0-9]+)", line)
  537. if m != None:
  538. rawData['5xx'] = int(m.group(1))
  539. return results
  540. except IOError:
  541. return None
  542. ############################################################
  543. # End benchmark
  544. ############################################################
  545. ##########################################################################################
  546. # Private Methods
  547. ##########################################################################################
  548. ############################################################
  549. # __run_benchmark(script, output_file)
  550. # Runs a single benchmark using the script which is a bash
  551. # template that uses weighttp to run the test. All the results
  552. # outputed to the output_file.
  553. ############################################################
  554. def __run_benchmark(self, script, output_file, err):
  555. with open(output_file, 'w') as raw_file:
  556. p = subprocess.Popen(self.benchmarker.client_ssh_string.split(" "), stdin=subprocess.PIPE, stdout=raw_file, stderr=err)
  557. p.communicate(script)
  558. err.flush()
  559. ############################################################
  560. # End __run_benchmark
  561. ############################################################
  562. ############################################################
  563. # __generate_concurrency_script(url, port)
  564. # Generates the string containing the bash script that will
  565. # be run on the client to benchmark a single test. This
  566. # specifically works for the variable concurrency tests (JSON
  567. # and DB)
  568. ############################################################
  569. def __generate_concurrency_script(self, url, port, accept_header, wrk_command="wrk", intervals=[], pipeline=""):
  570. if len(intervals) == 0:
  571. intervals = self.benchmarker.concurrency_levels
  572. headers = self.__get_request_headers(accept_header)
  573. return self.concurrency_template.format(max_concurrency=self.benchmarker.max_concurrency,
  574. max_threads=self.benchmarker.max_threads, name=self.name, duration=self.benchmarker.duration,
  575. interval=" ".join("{}".format(item) for item in intervals),
  576. server_host=self.benchmarker.server_host, port=port, url=url, headers=headers, wrk=wrk_command,
  577. pipeline=pipeline)
  578. ############################################################
  579. # End __generate_concurrency_script
  580. ############################################################
  581. ############################################################
  582. # __generate_query_script(url, port)
  583. # Generates the string containing the bash script that will
  584. # be run on the client to benchmark a single test. This
  585. # specifically works for the variable query tests (Query)
  586. ############################################################
  587. def __generate_query_script(self, url, port, accept_header):
  588. headers = self.__get_request_headers(accept_header)
  589. return self.query_template.format(max_concurrency=self.benchmarker.max_concurrency,
  590. max_threads=self.benchmarker.max_threads, name=self.name, duration=self.benchmarker.duration,
  591. interval=" ".join("{}".format(item) for item in self.benchmarker.query_intervals),
  592. server_host=self.benchmarker.server_host, port=port, url=url, headers=headers)
  593. ############################################################
  594. # End __generate_query_script
  595. ############################################################
  596. ############################################################
  597. # __get_request_headers(accept_header)
  598. # Generates the complete HTTP header string
  599. ############################################################
  600. def __get_request_headers(self, accept_header):
  601. return self.headers_template.format(accept=accept_header)
  602. ############################################################
  603. # End __format_request_headers
  604. ############################################################
  605. ############################################################
  606. # __curl_url
  607. # Dump HTTP response and headers. Throw exception if there
  608. # is an HTTP error.
  609. ############################################################
  610. def __curl_url(self, url, testType, out, err):
  611. # Use -i to output response with headers.
  612. # Don't use -f so that the HTTP response code is ignored.
  613. # Use --stderr - to redirect stderr to stdout so we get
  614. # error output for sure in stdout.
  615. # Use -sS to hide progress bar, but show errors.
  616. subprocess.check_call(["curl", "-i", "-sS", url], stderr=err, stdout=out)
  617. out.flush()
  618. err.flush()
  619. # HTTP output may not end in a newline, so add that here.
  620. out.write( "\n" )
  621. out.flush()
  622. # We need to get the respond body from the curl and return it.
  623. p = subprocess.Popen(["curl", "-s", url], stdout=subprocess.PIPE)
  624. output = p.communicate()
  625. # In the curl invocation above we could not use -f because
  626. # then the HTTP response would not be output, so use -f in
  627. # an additional invocation so that if there is an HTTP error,
  628. # subprocess.CalledProcessError will be thrown. Note that this
  629. # uses check_output() instead of check_call() so that we can
  630. # ignore the HTTP response because we already output that in
  631. # the first curl invocation.
  632. subprocess.check_output(["curl", "-fsS", url], stderr=err)
  633. out.flush()
  634. err.flush()
  635. # HTTP output may not end in a newline, so add that here.
  636. out.write( "\n" )
  637. out.flush()
  638. if output:
  639. # We have the response body - return it
  640. return output[0]
  641. ##############################################################
  642. # End __curl_url
  643. ##############################################################
  644. ##########################################################################################
  645. # Constructor
  646. ##########################################################################################
  647. def __init__(self, name, directory, benchmarker, runTests, args):
  648. self.name = name
  649. self.directory = directory
  650. self.benchmarker = benchmarker
  651. self.runTests = runTests
  652. self.__dict__.update(args)
  653. # ensure directory has __init__.py file so that we can use it as a Python package
  654. if not os.path.exists(os.path.join(directory, "__init__.py")):
  655. open(os.path.join(directory, "__init__.py"), 'w').close()
  656. self.setup_module = setup_module = importlib.import_module(directory + '.' + self.setup_file)
  657. ############################################################
  658. # End __init__
  659. ############################################################
  660. ############################################################
  661. # End FrameworkTest
  662. ############################################################
  663. ##########################################################################################
  664. # Static methods
  665. ##########################################################################################
  666. ##############################################################
  667. # parse_config(config, directory, benchmarker)
  668. # parses a config file and returns a list of FrameworkTest
  669. # objects based on that config file.
  670. ##############################################################
  671. def parse_config(config, directory, benchmarker):
  672. tests = []
  673. # The config object can specify multiple tests, we neep to loop
  674. # over them and parse them out
  675. for test in config['tests']:
  676. for key, value in test.iteritems():
  677. test_name = config['framework']
  678. runTests = dict()
  679. runTests["json"] = True if value.get("json_url", False) else False
  680. runTests["db"] = True if value.get("db_url", False) else False
  681. runTests["query"] = True if value.get("query_url", False) else False
  682. runTests["fortune"] = True if value.get("fortune_url", False) else False
  683. runTests["update"] = True if value.get("update_url", False) else False
  684. runTests["plaintext"] = True if value.get("plaintext_url", False) else False
  685. # if the test uses the 'defualt' keywork, then we don't
  686. # append anything to it's name. All configs should only have 1 default
  687. if key != 'default':
  688. # we need to use the key in the test_name
  689. test_name = test_name + "-" + key
  690. tests.append(FrameworkTest(test_name, directory, benchmarker, runTests, value))
  691. return tests
  692. ##############################################################
  693. # End parse_config
  694. ##############################################################