framework_test.py 32 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776
  1. from benchmark import FortuneHTMLParser
  2. import importlib
  3. import os
  4. import subprocess
  5. import time
  6. import re
  7. import pprint
  8. import sys
  9. import traceback
  10. import json
  11. class FrameworkTest:
  12. ##########################################################################################
  13. # Class variables
  14. ##########################################################################################
  15. headers_template = "-H 'Host: localhost' -H '{accept}' -H 'Connection: keep-alive'"
  16. headers_full_template = "-H 'Host: localhost' -H '{accept}' -H 'Accept-Language: en-US,en;q=0.5' -H 'User-Agent: Mozilla/5.0 (X11; Linux x86_64) Gecko/20130501 Firefox/30.0 AppleWebKit/600.00 Chrome/30.0.0000.0 Trident/10.0 Safari/600.00' -H 'Cookie: uid=12345678901234567890; __utma=1.1234567890.1234567890.1234567890.1234567890.12; wd=2560x1600' -H 'Connection: keep-alive'"
  17. accept_json = "Accept: application/json,text/html;q=0.9,application/xhtml+xml;q=0.9,application/xml;q=0.8,*/*;q=0.7"
  18. accept_html = "Accept: text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8"
  19. accept_plaintext = "Accept: text/plain,text/html;q=0.9,application/xhtml+xml;q=0.9,application/xml;q=0.8,*/*;q=0.7"
  20. concurrency_template = """
  21. echo ""
  22. echo "---------------------------------------------------------"
  23. echo " Running Primer {name}"
  24. echo " {wrk} {headers} -d 5 -c 8 -t 8 \"http://{server_host}:{port}{url}\""
  25. echo "---------------------------------------------------------"
  26. echo ""
  27. {wrk} {headers} -d 5 -c 8 -t 8 "http://{server_host}:{port}{url}"
  28. sleep 5
  29. echo ""
  30. echo "---------------------------------------------------------"
  31. echo " Running Warmup {name}"
  32. echo " {wrk} {headers} -d {duration} -c {max_concurrency} -t {max_threads} \"http://{server_host}:{port}{url}\""
  33. echo "---------------------------------------------------------"
  34. echo ""
  35. {wrk} {headers} -d {duration} -c {max_concurrency} -t {max_threads} "http://{server_host}:{port}{url}"
  36. sleep 5
  37. for c in {interval}
  38. do
  39. echo ""
  40. echo "---------------------------------------------------------"
  41. echo " Concurrency: $c for {name}"
  42. echo " {wrk} {headers} {pipeline} -d {duration} -c $c -t $(($c>{max_threads}?{max_threads}:$c)) \"http://{server_host}:{port}{url}\""
  43. echo "---------------------------------------------------------"
  44. echo ""
  45. {wrk} {headers} {pipeline} -d {duration} -c "$c" -t "$(($c>{max_threads}?{max_threads}:$c))" http://{server_host}:{port}{url}
  46. sleep 2
  47. done
  48. """
  49. query_template = """
  50. echo ""
  51. echo "---------------------------------------------------------"
  52. echo " Running Primer {name}"
  53. echo " wrk {headers} -d 5 -c 8 -t 8 \"http://{server_host}:{port}{url}2\""
  54. echo "---------------------------------------------------------"
  55. echo ""
  56. wrk {headers} -d 5 -c 8 -t 8 "http://{server_host}:{port}{url}2"
  57. sleep 5
  58. echo ""
  59. echo "---------------------------------------------------------"
  60. echo " Running Warmup {name}"
  61. echo " wrk {headers} -d {duration} -c {max_concurrency} -t {max_threads} \"http://{server_host}:{port}{url}2\""
  62. echo "---------------------------------------------------------"
  63. echo ""
  64. wrk {headers} -d {duration} -c {max_concurrency} -t {max_threads} "http://{server_host}:{port}{url}2"
  65. sleep 5
  66. for c in {interval}
  67. do
  68. echo ""
  69. echo "---------------------------------------------------------"
  70. echo " Queries: $c for {name}"
  71. echo " wrk {headers} -d {duration} -c {max_concurrency} -t {max_threads} \"http://{server_host}:{port}{url}$c\""
  72. echo "---------------------------------------------------------"
  73. echo ""
  74. wrk {headers} -d {duration} -c {max_concurrency} -t {max_threads} "http://{server_host}:{port}{url}$c"
  75. sleep 2
  76. done
  77. """
  78. language = None
  79. platform = None
  80. webserver = None
  81. classification = None
  82. database = None
  83. approach = None
  84. orm = None
  85. framework = None
  86. os = None
  87. database_os = None
  88. display_name = None
  89. notes = None
  90. versus = None
  91. ############################################################
  92. # Test Variables
  93. ############################################################
  94. JSON = "json"
  95. DB = "db"
  96. QUERY = "query"
  97. FORTUNE = "fortune"
  98. UPDATE = "update"
  99. PLAINTEXT = "plaintext"
  100. ##########################################################################################
  101. # Public Methods
  102. ##########################################################################################
  103. ############################################################
  104. # Validates the jsonString is a JSON object with a 'message'
  105. # key with the value "hello, world!" (case-insensitive).
  106. ############################################################
  107. def validateJson(self, jsonString):
  108. obj = json.loads(jsonString)
  109. if not obj:
  110. return False
  111. if not obj["message"]:
  112. return False
  113. if not obj["message"].lower() == "hello, world!":
  114. return False
  115. return True
  116. ############################################################
  117. # Validates the jsonString is a JSON object that has an "id"
  118. # and a "randomNumber" key, and that both keys map to
  119. # integers.
  120. ############################################################
  121. def validateDb(self, jsonString):
  122. obj = json.loads(jsonString)
  123. if not obj:
  124. return False
  125. if not obj["id"] or type(obj["id"]) != int:
  126. return False
  127. if not obj["randomNumber"] or type(obj["randomNumber"]) != int:
  128. return False
  129. return True
  130. ############################################################
  131. # Validates the jsonString is an array with a length of
  132. # 2, that each entry in the array is a JSON object, that
  133. # each object has an "id" and a "randomNumber" key, and that
  134. # both keys map to integers.
  135. ############################################################
  136. def validateQuery(self, jsonString):
  137. arr = json.loads(jsonString)
  138. if not arr or len(arr) != 2 or type(arr[0]) != dict or type(arr[1]) != dict:
  139. return False
  140. if not arr[0]["id"] or type(arr[0]["id"]) != int:
  141. return False
  142. if not arr[0]["randomNumber"] or type(arr[0]["randomNumber"]) != int:
  143. return False
  144. if not arr[1]["id"] or type(arr[1]["id"]) != int:
  145. return False
  146. if not arr[1]["randomNumber"] or type(arr[1]["randomNumber"]) != int:
  147. return False
  148. return True
  149. ############################################################
  150. #
  151. ############################################################
  152. def validateFortune(self, htmlString):
  153. parser = FortuneHTMLParser()
  154. parser.feed(htmlString)
  155. return fortuneValidator.isValidFortune()
  156. ############################################################
  157. # Validates the jsonString is an array with a length of
  158. # 2, that each entry in the array is a JSON object, that
  159. # each object has an "id" and a "randomNumber" key, and that
  160. # both keys map to integers.
  161. ############################################################
  162. def validateUpdate(self, jsonString):
  163. arr = json.loads(jsonString)
  164. if not arr or len(arr) != 2 or type(arr[0]) != dict or type(arr[1]) != dict:
  165. return False
  166. if not arr[0]["id"] or type(arr[0]["id"]) != int:
  167. return False
  168. if not arr[0]["randomNumber"] or type(arr[0]["randomNumber"]) != int:
  169. return False
  170. if not arr[1]["id"] or type(arr[1]["id"]) != int:
  171. return False
  172. if not arr[1]["randomNumber"] or type(arr[1]["randomNumber"]) != int:
  173. return False
  174. return True
  175. ############################################################
  176. #
  177. ############################################################
  178. def validatePlaintext(self, jsonString):
  179. return jsonString.lower().strip() == "hello, world!"
  180. ############################################################
  181. # start(benchmarker)
  182. # Start the test using it's setup file
  183. ############################################################
  184. def start(self, out, err):
  185. return self.setup_module.start(self.benchmarker, out, err)
  186. ############################################################
  187. # End start
  188. ############################################################
  189. ############################################################
  190. # stop(benchmarker)
  191. # Stops the test using it's setup file
  192. ############################################################
  193. def stop(self, out, err):
  194. return self.setup_module.stop(out, err)
  195. ############################################################
  196. # End stop
  197. ############################################################
  198. ############################################################
  199. # verify_urls
  200. # Verifys each of the URLs for this test. THis will sinply
  201. # curl the URL and check for it's return status.
  202. # For each url, a flag will be set on this object for whether
  203. # or not it passed
  204. ############################################################
  205. def verify_urls(self, out, err):
  206. # JSON
  207. if self.runTests[self.JSON]:
  208. try:
  209. out.write( "VERIFYING JSON (" + self.json_url + ") ...\n" )
  210. out.flush()
  211. url = self.benchmarker.generate_url(self.json_url, self.port)
  212. output = self.__curl_url(url, self.JSON, out, err)
  213. if self.validateJson(output):
  214. self.json_url_passed = True
  215. else:
  216. self.json_url_passed = False
  217. except (AttributeError, subprocess.CalledProcessError) as e:
  218. self.json_url_passed = False
  219. # DB
  220. if self.runTests[self.DB]:
  221. try:
  222. out.write( "VERIFYING DB (" + self.db_url + ") ...\n" )
  223. out.flush()
  224. url = self.benchmarker.generate_url(self.db_url, self.port)
  225. output = self.__curl_url(url, self.DB, out, err)
  226. if self.validateDb(output):
  227. self.db_url_passed = True
  228. else:
  229. self.db_url_passed = False
  230. except (AttributeError, subprocess.CalledProcessError) as e:
  231. self.db_url_passed = False
  232. # Query
  233. if self.runTests[self.QUERY]:
  234. try:
  235. out.write( "VERIFYING Query (" + self.query_url + "2) ...\n" )
  236. out.flush()
  237. url = self.benchmarker.generate_url(self.query_url + "2", self.port)
  238. output = self.__curl_url(url, self.QUERY, out, err)
  239. if self.validateQuery(output):
  240. self.query_url_passed = True
  241. else:
  242. self.query_url_passed = False
  243. except (AttributeError, subprocess.CalledProcessError) as e:
  244. self.query_url_passed = False
  245. # Fortune
  246. if self.runTests[self.FORTUNE]:
  247. try:
  248. out.write( "VERIFYING Fortune (" + self.fortune_url + ") ...\n" )
  249. out.flush()
  250. url = self.benchmarker.generate_url(self.fortune_url, self.port)
  251. output = self.__curl_url(url, self.FORTUNE, out, err)
  252. if self.validateFortune(output):
  253. self.fortune_url_passed = True
  254. else:
  255. self.fortune_url_passed = False
  256. except (AttributeError, subprocess.CalledProcessError) as e:
  257. self.fortune_url_passed = False
  258. # Update
  259. if self.runTests[self.UPDATE]:
  260. try:
  261. out.write( "VERIFYING Update (" + self.update_url + "2) ...\n" )
  262. out.flush()
  263. url = self.benchmarker.generate_url(self.update_url + "2", self.port)
  264. output = self.__curl_url(url, self.UPDATE, out, err)
  265. if self.validateUpdate(output):
  266. self.update_url_passed = True
  267. else:
  268. self.update_url_passed = False
  269. except (AttributeError, subprocess.CalledProcessError) as e:
  270. self.update_url_passed = False
  271. # plaintext
  272. if self.runTests[self.PLAINTEXT]:
  273. try:
  274. out.write( "VERIFYING Plaintext (" + self.plaintext_url + ") ...\n" )
  275. out.flush()
  276. url = self.benchmarker.generate_url(self.plaintext_url, self.port)
  277. output = self.__curl_url(url, self.PLAINTEXT, out, err)
  278. if self.validatePlaintext(output):
  279. self.plaintext_url_passed = True
  280. else:
  281. self.plaintext_url_passed = False
  282. except (AttributeError, subprocess.CalledProcessError) as e:
  283. self.plaintext_url_passed = False
  284. ############################################################
  285. # End verify_urls
  286. ############################################################
  287. ############################################################
  288. # contains_type(type)
  289. # true if this test contains an implementation of the given
  290. # test type (json, db, etc.)
  291. ############################################################
  292. def contains_type(self, type):
  293. try:
  294. if type == self.JSON and self.json_url != None:
  295. return True
  296. if type == self.DB and self.db_url != None:
  297. return True
  298. if type == self.QUERY and self.query_url != None:
  299. return True
  300. if type == self.FORTUNE and self.fortune_url != None:
  301. return True
  302. if type == self.UPDATE and self.update_url != None:
  303. return True
  304. if type == self.PLAINTEXT and self.plaintext_url != None:
  305. return True
  306. except AttributeError:
  307. pass
  308. return False
  309. ############################################################
  310. # End stop
  311. ############################################################
  312. ############################################################
  313. # benchmark
  314. # Runs the benchmark for each type of test that it implements
  315. # JSON/DB/Query.
  316. ############################################################
  317. def benchmark(self, out, err):
  318. # JSON
  319. if self.runTests[self.JSON]:
  320. try:
  321. if self.benchmarker.type == "all" or self.benchmarker.type == self.JSON:
  322. out.write("BENCHMARKING JSON ... ")
  323. out.flush()
  324. results = None
  325. if self.json_url_passed:
  326. remote_script = self.__generate_concurrency_script(self.json_url, self.port, self.accept_json)
  327. self.__run_benchmark(remote_script, self.benchmarker.output_file(self.name, self.JSON), err)
  328. results = self.__parse_test(self.JSON)
  329. else:
  330. results = dict()
  331. results['results'] = []
  332. self.benchmarker.report_results(framework=self, test=self.JSON, results=results['results'], passed=self.json_url_passed)
  333. out.write( "Complete\n" )
  334. out.flush()
  335. except AttributeError:
  336. pass
  337. # DB
  338. if self.runTests[self.DB]:
  339. try:
  340. if self.benchmarker.type == "all" or self.benchmarker.type == self.DB:
  341. out.write("BENCHMARKING DB ... ")
  342. out.flush()
  343. results = None
  344. if self.db_url_passed:
  345. remote_script = self.__generate_concurrency_script(self.db_url, self.port, self.accept_json)
  346. self.__run_benchmark(remote_script, self.benchmarker.output_file(self.name, self.DB), err)
  347. results = self.__parse_test(self.DB)
  348. else:
  349. results = dict()
  350. results['results'] = []
  351. self.benchmarker.report_results(framework=self, test=self.DB, results=results['results'], passed=self.db_url_passed)
  352. out.write( "Complete\n" )
  353. except AttributeError:
  354. pass
  355. # Query
  356. if self.runTests[self.QUERY]:
  357. try:
  358. if self.benchmarker.type == "all" or self.benchmarker.type == self.QUERY:
  359. out.write("BENCHMARKING Query ... ")
  360. out.flush()
  361. results = None
  362. if self.query_url_passed:
  363. remote_script = self.__generate_query_script(self.query_url, self.port, self.accept_json)
  364. self.__run_benchmark(remote_script, self.benchmarker.output_file(self.name, self.QUERY), err)
  365. results = self.__parse_test(self.QUERY)
  366. else:
  367. results = dict()
  368. results['results'] = []
  369. self.benchmarker.report_results(framework=self, test=self.QUERY, results=results['results'], passed=self.query_url_passed)
  370. out.write( "Complete\n" )
  371. out.flush()
  372. except AttributeError:
  373. pass
  374. # fortune
  375. if self.runTests[self.FORTUNE]:
  376. try:
  377. if self.benchmarker.type == "all" or self.benchmarker.type == self.FORTUNE:
  378. out.write("BENCHMARKING Fortune ... ")
  379. out.flush()
  380. results = None
  381. if self.fortune_url_passed:
  382. remote_script = self.__generate_concurrency_script(self.fortune_url, self.port, self.accept_html)
  383. self.__run_benchmark(remote_script, self.benchmarker.output_file(self.name, self.FORTUNE), err)
  384. results = self.__parse_test(self.FORTUNE)
  385. else:
  386. results = dict()
  387. results['results'] = []
  388. self.benchmarker.report_results(framework=self, test=self.FORTUNE, results=results['results'], passed=self.fortune_url_passed)
  389. out.write( "Complete\n" )
  390. out.flush()
  391. except AttributeError:
  392. pass
  393. # update
  394. if self.runTests[self.UPDATE]:
  395. try:
  396. if self.benchmarker.type == "all" or self.benchmarker.type == self.UPDATE:
  397. out.write("BENCHMARKING Update ... ")
  398. out.flush()
  399. results = None
  400. if self.update_url_passed:
  401. remote_script = self.__generate_query_script(self.update_url, self.port, self.accept_json)
  402. self.__run_benchmark(remote_script, self.benchmarker.output_file(self.name, self.UPDATE), err)
  403. results = self.__parse_test(self.UPDATE)
  404. else:
  405. results = dict()
  406. results['results'] = []
  407. self.benchmarker.report_results(framework=self, test=self.UPDATE, results=results['results'], passed=self.update_url_passed)
  408. out.write( "Complete\n" )
  409. out.flush()
  410. except AttributeError:
  411. pass
  412. # plaintext
  413. if self.runTests[self.PLAINTEXT]:
  414. try:
  415. if self.benchmarker.type == "all" or self.benchmarker.type == self.PLAINTEXT:
  416. out.write("BENCHMARKING Plaintext ... ")
  417. out.flush()
  418. results = None
  419. if self.plaintext_url_passed:
  420. remote_script = self.__generate_concurrency_script(self.plaintext_url, self.port, self.accept_plaintext, wrk_command="wrk-pipeline", intervals=[256,1024,4096,16384], pipeline="--pipeline 16")
  421. self.__run_benchmark(remote_script, self.benchmarker.output_file(self.name, self.PLAINTEXT), err)
  422. results = self.__parse_test(self.PLAINTEXT)
  423. else:
  424. results = dict()
  425. results['results'] = []
  426. self.benchmarker.report_results(framework=self, test=self.PLAINTEXT, results=results['results'], passed=self.plaintext_url_passed)
  427. out.write( "Complete\n" )
  428. out.flush()
  429. except AttributeError:
  430. traceback.print_exc()
  431. pass
  432. ############################################################
  433. # End benchmark
  434. ############################################################
  435. ############################################################
  436. # parse_all
  437. # Method meant to be run for a given timestamp
  438. ############################################################
  439. def parse_all(self):
  440. # JSON
  441. if os.path.exists(self.benchmarker.output_file(self.name, self.JSON)):
  442. results = self.__parse_test(self.JSON)
  443. self.benchmarker.report_results(framework=self, test=self.JSON, results=results['results'])
  444. # DB
  445. if os.path.exists(self.benchmarker.output_file(self.name, self.DB)):
  446. results = self.__parse_test(self.DB)
  447. self.benchmarker.report_results(framework=self, test=self.DB, results=results['results'])
  448. # Query
  449. if os.path.exists(self.benchmarker.output_file(self.name, self.QUERY)):
  450. results = self.__parse_test(self.QUERY)
  451. self.benchmarker.report_results(framework=self, test=self.QUERY, results=results['results'])
  452. # Fortune
  453. if os.path.exists(self.benchmarker.output_file(self.name, self.FORTUNE)):
  454. results = self.__parse_test(self.FORTUNE)
  455. self.benchmarker.report_results(framework=self, test=self.FORTUNE, results=results['results'])
  456. # Update
  457. if os.path.exists(self.benchmarker.output_file(self.name, self.UPDATE)):
  458. results = self.__parse_test(self.UPDATE)
  459. self.benchmarker.report_results(framework=self, test=self.UPDATE, results=results['results'])
  460. # Plaintext
  461. if os.path.exists(self.benchmarker.output_file(self.name, self.PLAINTEXT)):
  462. results = self.__parse_test(self.PLAINTEXT)
  463. self.benchmarker.report_results(framework=self, test=self.PLAINTEXT, results=results['results'])
  464. ############################################################
  465. # End parse_all
  466. ############################################################
  467. ############################################################
  468. # __parse_test(test_type)
  469. ############################################################
  470. def __parse_test(self, test_type):
  471. try:
  472. results = dict()
  473. results['results'] = []
  474. with open(self.benchmarker.output_file(self.name, test_type)) as raw_data:
  475. is_warmup = True
  476. rawData = None
  477. for line in raw_data:
  478. if "Queries:" in line or "Concurrency:" in line:
  479. is_warmup = False
  480. rawData = None
  481. continue
  482. if "Warmup" in line or "Primer" in line:
  483. is_warmup = True
  484. continue
  485. if not is_warmup:
  486. if rawData == None:
  487. rawData = dict()
  488. results['results'].append(rawData)
  489. #if "Requests/sec:" in line:
  490. # m = re.search("Requests/sec:\s+([0-9]+)", line)
  491. # rawData['reportedResults'] = m.group(1)
  492. # search for weighttp data such as succeeded and failed.
  493. if "Latency" in line:
  494. m = re.findall("([0-9]+\.*[0-9]*[us|ms|s|m|%]+)", line)
  495. if len(m) == 4:
  496. rawData['latencyAvg'] = m[0]
  497. rawData['latencyStdev'] = m[1]
  498. rawData['latencyMax'] = m[2]
  499. # rawData['latencyStdevPercent'] = m[3]
  500. #if "Req/Sec" in line:
  501. # m = re.findall("([0-9]+\.*[0-9]*[k|%]*)", line)
  502. # if len(m) == 4:
  503. # rawData['requestsAvg'] = m[0]
  504. # rawData['requestsStdev'] = m[1]
  505. # rawData['requestsMax'] = m[2]
  506. # rawData['requestsStdevPercent'] = m[3]
  507. #if "requests in" in line:
  508. # m = re.search("requests in ([0-9]+\.*[0-9]*[ms|s|m|h]+)", line)
  509. # if m != None:
  510. # # parse out the raw time, which may be in minutes or seconds
  511. # raw_time = m.group(1)
  512. # if "ms" in raw_time:
  513. # rawData['total_time'] = float(raw_time[:len(raw_time)-2]) / 1000.0
  514. # elif "s" in raw_time:
  515. # rawData['total_time'] = float(raw_time[:len(raw_time)-1])
  516. # elif "m" in raw_time:
  517. # rawData['total_time'] = float(raw_time[:len(raw_time)-1]) * 60.0
  518. # elif "h" in raw_time:
  519. # rawData['total_time'] = float(raw_time[:len(raw_time)-1]) * 3600.0
  520. if "requests in" in line:
  521. m = re.search("([0-9]+) requests in", line)
  522. if m != None:
  523. rawData['totalRequests'] = int(m.group(1))
  524. if "Socket errors" in line:
  525. if "connect" in line:
  526. m = re.search("connect ([0-9]+)", line)
  527. rawData['connect'] = int(m.group(1))
  528. if "read" in line:
  529. m = re.search("read ([0-9]+)", line)
  530. rawData['read'] = int(m.group(1))
  531. if "write" in line:
  532. m = re.search("write ([0-9]+)", line)
  533. rawData['write'] = int(m.group(1))
  534. if "timeout" in line:
  535. m = re.search("timeout ([0-9]+)", line)
  536. rawData['timeout'] = int(m.group(1))
  537. if "Non-2xx" in line:
  538. m = re.search("Non-2xx or 3xx responses: ([0-9]+)", line)
  539. if m != None:
  540. rawData['5xx'] = int(m.group(1))
  541. return results
  542. except IOError:
  543. return None
  544. ############################################################
  545. # End benchmark
  546. ############################################################
  547. ##########################################################################################
  548. # Private Methods
  549. ##########################################################################################
  550. ############################################################
  551. # __run_benchmark(script, output_file)
  552. # Runs a single benchmark using the script which is a bash
  553. # template that uses weighttp to run the test. All the results
  554. # outputed to the output_file.
  555. ############################################################
  556. def __run_benchmark(self, script, output_file, err):
  557. with open(output_file, 'w') as raw_file:
  558. p = subprocess.Popen(self.benchmarker.client_ssh_string.split(" "), stdin=subprocess.PIPE, stdout=raw_file, stderr=err)
  559. p.communicate(script)
  560. err.flush()
  561. ############################################################
  562. # End __run_benchmark
  563. ############################################################
  564. ############################################################
  565. # __generate_concurrency_script(url, port)
  566. # Generates the string containing the bash script that will
  567. # be run on the client to benchmark a single test. This
  568. # specifically works for the variable concurrency tests (JSON
  569. # and DB)
  570. ############################################################
  571. def __generate_concurrency_script(self, url, port, accept_header, wrk_command="wrk", intervals=[], pipeline=""):
  572. if len(intervals) == 0:
  573. intervals = self.benchmarker.concurrency_levels
  574. headers = self.__get_request_headers(accept_header)
  575. return self.concurrency_template.format(max_concurrency=self.benchmarker.max_concurrency,
  576. max_threads=self.benchmarker.max_threads, name=self.name, duration=self.benchmarker.duration,
  577. interval=" ".join("{}".format(item) for item in intervals),
  578. server_host=self.benchmarker.server_host, port=port, url=url, headers=headers, wrk=wrk_command,
  579. pipeline=pipeline)
  580. ############################################################
  581. # End __generate_concurrency_script
  582. ############################################################
  583. ############################################################
  584. # __generate_query_script(url, port)
  585. # Generates the string containing the bash script that will
  586. # be run on the client to benchmark a single test. This
  587. # specifically works for the variable query tests (Query)
  588. ############################################################
  589. def __generate_query_script(self, url, port, accept_header):
  590. headers = self.__get_request_headers(accept_header)
  591. return self.query_template.format(max_concurrency=self.benchmarker.max_concurrency,
  592. max_threads=self.benchmarker.max_threads, name=self.name, duration=self.benchmarker.duration,
  593. interval=" ".join("{}".format(item) for item in self.benchmarker.query_intervals),
  594. server_host=self.benchmarker.server_host, port=port, url=url, headers=headers)
  595. ############################################################
  596. # End __generate_query_script
  597. ############################################################
  598. ############################################################
  599. # __get_request_headers(accept_header)
  600. # Generates the complete HTTP header string
  601. ############################################################
  602. def __get_request_headers(self, accept_header):
  603. return self.headers_template.format(accept=accept_header)
  604. ############################################################
  605. # End __format_request_headers
  606. ############################################################
  607. ############################################################
  608. # __curl_url
  609. # Dump HTTP response and headers. Throw exception if there
  610. # is an HTTP error.
  611. ############################################################
  612. def __curl_url(self, url, testType, out, err):
  613. # Use -i to output response with headers.
  614. # Don't use -f so that the HTTP response code is ignored.
  615. # Use --stderr - to redirect stderr to stdout so we get
  616. # error output for sure in stdout.
  617. # Use -sS to hide progress bar, but show errors.
  618. subprocess.check_call(["curl", "-i", "-sS", url], stderr=err, stdout=out)
  619. out.flush()
  620. err.flush()
  621. # HTTP output may not end in a newline, so add that here.
  622. out.write( "\n" )
  623. out.flush()
  624. # We need to get the respond body from the curl and return it.
  625. p = subprocess.Popen(["curl", "-s", url], stdout=subprocess.PIPE)
  626. output = p.communicate()
  627. # In the curl invocation above we could not use -f because
  628. # then the HTTP response would not be output, so use -f in
  629. # an additional invocation so that if there is an HTTP error,
  630. # subprocess.CalledProcessError will be thrown. Note that this
  631. # uses check_output() instead of check_call() so that we can
  632. # ignore the HTTP response because we already output that in
  633. # the first curl invocation.
  634. subprocess.check_output(["curl", "-fsS", url], stderr=err)
  635. out.flush()
  636. err.flush()
  637. # HTTP output may not end in a newline, so add that here.
  638. out.write( "\n" )
  639. out.flush()
  640. if output:
  641. # We have the response body - return it
  642. return output[0]
  643. ##############################################################
  644. # End __curl_url
  645. ##############################################################
  646. ##########################################################################################
  647. # Constructor
  648. ##########################################################################################
  649. def __init__(self, name, directory, benchmarker, runTests, args):
  650. self.name = name
  651. self.directory = directory
  652. self.benchmarker = benchmarker
  653. self.runTests = runTests
  654. self.__dict__.update(args)
  655. # ensure directory has __init__.py file so that we can use it as a Python package
  656. if not os.path.exists(os.path.join(directory, "__init__.py")):
  657. open(os.path.join(directory, "__init__.py"), 'w').close()
  658. self.setup_module = setup_module = importlib.import_module(directory + '.' + self.setup_file)
  659. ############################################################
  660. # End __init__
  661. ############################################################
  662. ############################################################
  663. # End FrameworkTest
  664. ############################################################
  665. ##########################################################################################
  666. # Static methods
  667. ##########################################################################################
  668. ##############################################################
  669. # parse_config(config, directory, benchmarker)
  670. # parses a config file and returns a list of FrameworkTest
  671. # objects based on that config file.
  672. ##############################################################
  673. def parse_config(config, directory, benchmarker):
  674. tests = []
  675. # The config object can specify multiple tests, we neep to loop
  676. # over them and parse them out
  677. for test in config['tests']:
  678. for key, value in test.iteritems():
  679. test_name = config['framework']
  680. runTests = dict()
  681. runTests["json"] = True if value.get("json_url", False) else False
  682. runTests["db"] = True if value.get("db_url", False) else False
  683. runTests["query"] = True if value.get("query_url", False) else False
  684. runTests["fortune"] = True if value.get("fortune_url", False) else False
  685. runTests["update"] = True if value.get("update_url", False) else False
  686. runTests["plaintext"] = True if value.get("plaintext_url", False) else False
  687. # if the test uses the 'defualt' keywork, then we don't
  688. # append anything to it's name. All configs should only have 1 default
  689. if key != 'default':
  690. # we need to use the key in the test_name
  691. test_name = test_name + "-" + key
  692. tests.append(FrameworkTest(test_name, directory, benchmarker, runTests, value))
  693. return tests
  694. ##############################################################
  695. # End parse_config
  696. ##############################################################