framework_test.py 39 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975
  1. from benchmark.fortune_html_parser import FortuneHTMLParser
  2. from setup.linux import setup_util
  3. import importlib
  4. import os
  5. import subprocess
  6. import time
  7. import re
  8. import pprint
  9. import sys
  10. import traceback
  11. import json
  12. import textwrap
  13. import logging
  14. class FrameworkTest:
  15. ##########################################################################################
  16. # Class variables
  17. ##########################################################################################
  18. headers_template = "-H 'Host: localhost' -H '{accept}' -H 'Connection: keep-alive'"
  19. headers_full_template = "-H 'Host: localhost' -H '{accept}' -H 'Accept-Language: en-US,en;q=0.5' -H 'User-Agent: Mozilla/5.0 (X11; Linux x86_64) Gecko/20130501 Firefox/30.0 AppleWebKit/600.00 Chrome/30.0.0000.0 Trident/10.0 Safari/600.00' -H 'Cookie: uid=12345678901234567890; __utma=1.1234567890.1234567890.1234567890.1234567890.12; wd=2560x1600' -H 'Connection: keep-alive'"
  20. accept_json = "Accept: application/json,text/html;q=0.9,application/xhtml+xml;q=0.9,application/xml;q=0.8,*/*;q=0.7"
  21. accept_html = "Accept: text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8"
  22. accept_plaintext = "Accept: text/plain,text/html;q=0.9,application/xhtml+xml;q=0.9,application/xml;q=0.8,*/*;q=0.7"
  23. concurrency_template = """
  24. echo ""
  25. echo "---------------------------------------------------------"
  26. echo " Running Primer {name}"
  27. echo " {wrk} {headers} -d 5 -c 8 --timeout 8 -t 8 \"http://{server_host}:{port}{url}\""
  28. echo "---------------------------------------------------------"
  29. echo ""
  30. {wrk} {headers} -d 5 -c 8 --timeout 8 -t 8 "http://{server_host}:{port}{url}"
  31. sleep 5
  32. echo ""
  33. echo "---------------------------------------------------------"
  34. echo " Running Warmup {name}"
  35. echo " {wrk} {headers} -d {duration} -c {max_concurrency} --timeout {max_concurrency} -t {max_threads} \"http://{server_host}:{port}{url}\""
  36. echo "---------------------------------------------------------"
  37. echo ""
  38. {wrk} {headers} -d {duration} -c {max_concurrency} --timeout {max_concurrency} -t {max_threads} "http://{server_host}:{port}{url}"
  39. sleep 5
  40. for c in {interval}
  41. do
  42. echo ""
  43. echo "---------------------------------------------------------"
  44. echo " Concurrency: $c for {name}"
  45. echo " {wrk} {headers} -d {duration} -c $c --timeout $c -t $(($c>{max_threads}?{max_threads}:$c)) \"http://{server_host}:{port}{url}\" -s ~/pipeline.lua -- {pipeline}"
  46. echo "---------------------------------------------------------"
  47. echo ""
  48. {wrk} {headers} -d {duration} -c $c --timeout $c -t "$(($c>{max_threads}?{max_threads}:$c))" http://{server_host}:{port}{url} -s ~/pipeline.lua -- {pipeline}
  49. sleep 2
  50. done
  51. """
  52. query_template = """
  53. echo ""
  54. echo "---------------------------------------------------------"
  55. echo " Running Primer {name}"
  56. echo " wrk {headers} -d 5 -c 8 --timeout 8 -t 8 \"http://{server_host}:{port}{url}2\""
  57. echo "---------------------------------------------------------"
  58. echo ""
  59. wrk {headers} -d 5 -c 8 --timeout 8 -t 8 "http://{server_host}:{port}{url}2"
  60. sleep 5
  61. echo ""
  62. echo "---------------------------------------------------------"
  63. echo " Running Warmup {name}"
  64. echo " wrk {headers} -d {duration} -c {max_concurrency} --timeout {max_concurrency} -t {max_threads} \"http://{server_host}:{port}{url}2\""
  65. echo "---------------------------------------------------------"
  66. echo ""
  67. wrk {headers} -d {duration} -c {max_concurrency} --timeout {max_concurrency} -t {max_threads} "http://{server_host}:{port}{url}2"
  68. sleep 5
  69. for c in {interval}
  70. do
  71. echo ""
  72. echo "---------------------------------------------------------"
  73. echo " Queries: $c for {name}"
  74. echo " wrk {headers} -d {duration} -c {max_concurrency} --timeout {max_concurrency} -t {max_threads} \"http://{server_host}:{port}{url}$c\""
  75. echo "---------------------------------------------------------"
  76. echo ""
  77. wrk {headers} -d {duration} -c {max_concurrency} --timeout {max_concurrency} -t {max_threads} "http://{server_host}:{port}{url}$c"
  78. sleep 2
  79. done
  80. """
  81. language = None
  82. platform = None
  83. webserver = None
  84. classification = None
  85. database = None
  86. approach = None
  87. orm = None
  88. framework = None
  89. os = None
  90. database_os = None
  91. display_name = None
  92. notes = None
  93. versus = None
  94. ############################################################
  95. # Test Variables
  96. ############################################################
  97. JSON = "json"
  98. DB = "db"
  99. QUERY = "query"
  100. FORTUNE = "fortune"
  101. UPDATE = "update"
  102. PLAINTEXT = "plaintext"
  103. ##########################################################################################
  104. # Public Methods
  105. ##########################################################################################
  106. ############################################################
  107. # Validates the jsonString is a JSON object with a 'message'
  108. # key with the value "hello, world!" (case-insensitive).
  109. ############################################################
  110. def validateJson(self, jsonString, out, err):
  111. try:
  112. obj = {k.lower(): v for k,v in json.loads(jsonString).items()}
  113. if obj["message"].lower() == "hello, world!":
  114. return True
  115. except:
  116. pass
  117. return False
  118. ############################################################
  119. # Validates the jsonString is a JSON object that has an "id"
  120. # and a "randomNumber" key, and that both keys map to
  121. # integers.
  122. ############################################################
  123. def validateDb(self, jsonString, out, err):
  124. try:
  125. obj = {k.lower(): v for k,v in json.loads(jsonString).items()}
  126. # We are allowing the single-object array for the DB
  127. # test for now, but will likely remove this later.
  128. if type(obj) == list:
  129. obj = obj[0]
  130. # This will error out of the value could not parsed to a
  131. # float (this will work with ints, but it will turn them
  132. # into their float equivalent; i.e. "123" => 123.0)
  133. if (type(float(obj["id"])) == float and
  134. type(float(obj["randomnumber"])) == float):
  135. return True
  136. except:
  137. pass
  138. return False
  139. def validateDbStrict(self, jsonString, out, err):
  140. try:
  141. obj = {k.lower(): v for k,v in json.loads(jsonString).items()}
  142. # This will error out of the value could not parsed to a
  143. # float (this will work with ints, but it will turn them
  144. # into their float equivalent; i.e. "123" => 123.0)
  145. if (type(float(obj["id"])) == float and
  146. type(float(obj["randomnumber"])) == float):
  147. return True
  148. except:
  149. pass
  150. return False
  151. ############################################################
  152. # Validates the jsonString is an array with a length of
  153. # 2, that each entry in the array is a JSON object, that
  154. # each object has an "id" and a "randomNumber" key, and that
  155. # both keys map to integers.
  156. ############################################################
  157. def validateQuery(self, jsonString, out, err):
  158. try:
  159. arr = [{k.lower(): v for k,v in d.items()} for d in json.loads(jsonString)]
  160. if (type(float(arr[0]["id"])) == float and
  161. type(float(arr[0]["randomnumber"])) == float and
  162. type(float(arr[1]["id"])) == float and
  163. type(float(arr[1]["randomnumber"])) == float):
  164. return True
  165. except:
  166. pass
  167. return False
  168. ############################################################
  169. # Validates the jsonString is an array with a length of
  170. # 1, that each entry in the array is a JSON object, that
  171. # each object has an "id" and a "randomNumber" key, and that
  172. # both keys map to integers.
  173. ############################################################
  174. def validateQueryOneOrLess(self, jsonString, out, err):
  175. try:
  176. arr = {k.lower(): v for k,v in json.loads(jsonString).items()}
  177. if len(arr) != 1:
  178. return False
  179. for obj in arr:
  180. if (type(float(obj["id"])) != float or
  181. type(float(obj["randomnumber"])) != float or
  182. type(float(obj["id"])) != float or
  183. type(float(obj["randomnumber"])) != float):
  184. return False
  185. # By here, it's passed validation
  186. return True
  187. except:
  188. pass
  189. return False
  190. ############################################################
  191. # Validates the jsonString is an array with a length of
  192. # 500, that each entry in the array is a JSON object, that
  193. # each object has an "id" and a "randomNumber" key, and that
  194. # both keys map to integers.
  195. ############################################################
  196. def validateQueryFiveHundredOrMore(self, jsonString, out, err):
  197. try:
  198. arr = {k.lower(): v for k,v in json.loads(jsonString).items()}
  199. if len(arr) != 500:
  200. return False
  201. for obj in arr:
  202. if (type(float(obj["id"])) != float or
  203. type(float(obj["randomnumber"])) != float or
  204. type(float(obj["id"])) != float or
  205. type(float(obj["randomnumber"])) != float):
  206. return False
  207. # By here, it's passed validation
  208. return True
  209. except:
  210. pass
  211. return False
  212. ############################################################
  213. # Parses the given HTML string and asks a FortuneHTMLParser
  214. # whether the parsed string is a valid fortune return.
  215. ############################################################
  216. def validateFortune(self, htmlString, out, err):
  217. try:
  218. parser = FortuneHTMLParser()
  219. parser.feed(htmlString)
  220. return parser.isValidFortune()
  221. except:
  222. pass
  223. return False
  224. ############################################################
  225. # Validates the jsonString is an array with a length of
  226. # 2, that each entry in the array is a JSON object, that
  227. # each object has an "id" and a "randomNumber" key, and that
  228. # both keys map to integers.
  229. ############################################################
  230. def validateUpdate(self, jsonString, out, err):
  231. try:
  232. arr = [{k.lower(): v for k,v in d.items()} for d in json.loads(jsonString)]
  233. if (type(float(arr[0]["id"])) == float and
  234. type(float(arr[0]["randomnumber"])) == float and
  235. type(float(arr[1]["id"])) == float and
  236. type(float(arr[1]["randomnumber"])) == float):
  237. return True
  238. except:
  239. pass
  240. return False
  241. ############################################################
  242. #
  243. ############################################################
  244. def validatePlaintext(self, jsonString, out, err):
  245. try:
  246. return jsonString.lower().strip() == "hello, world!"
  247. except:
  248. pass
  249. return False
  250. ############################################################
  251. # start(benchmarker)
  252. # Start the test using it's setup file
  253. ############################################################
  254. def start(self, out, err):
  255. # Load profile for this installation
  256. profile="%s/bash_profile.sh" % self.directory
  257. if not os.path.exists(profile):
  258. logging.warning("Framework %s does not have a bash_profile" % self.name)
  259. profile="$FWROOT/config/benchmark_profile"
  260. set_iroot="export IROOT=%s" % self.install_root
  261. setup_util.replace_environ(config=profile, command=set_iroot)
  262. return self.setup_module.start(self.benchmarker, out, err)
  263. ############################################################
  264. # End start
  265. ############################################################
  266. ############################################################
  267. # stop(benchmarker)
  268. # Stops the test using it's setup file
  269. ############################################################
  270. def stop(self, out, err):
  271. return self.setup_module.stop(out, err)
  272. ############################################################
  273. # End stop
  274. ############################################################
  275. ############################################################
  276. # verify_urls
  277. # Verifys each of the URLs for this test. THis will sinply
  278. # curl the URL and check for it's return status.
  279. # For each url, a flag will be set on this object for whether
  280. # or not it passed
  281. ############################################################
  282. def verify_urls(self, out, err):
  283. # JSON
  284. if self.runTests[self.JSON]:
  285. out.write(textwrap.dedent("""
  286. -----------------------------------------------------
  287. VERIFYING JSON ({url})
  288. -----------------------------------------------------
  289. """.format(url = self.json_url)))
  290. out.flush()
  291. url = self.benchmarker.generate_url(self.json_url, self.port)
  292. output = self.__curl_url(url, self.JSON, out, err)
  293. out.write("VALIDATING JSON ... ")
  294. if self.validateJson(output, out, err):
  295. self.json_url_passed = True
  296. out.write("PASS\n\n")
  297. else:
  298. self.json_url_passed = False
  299. out.write("FAIL\n\n")
  300. out.flush
  301. # DB
  302. if self.runTests[self.DB]:
  303. out.write(textwrap.dedent("""
  304. -----------------------------------------------------
  305. VERIFYING DB ({url})
  306. -----------------------------------------------------
  307. """.format(url = self.db_url)))
  308. out.flush()
  309. url = self.benchmarker.generate_url(self.db_url, self.port)
  310. output = self.__curl_url(url, self.DB, out, err)
  311. if self.validateDb(output, out, err):
  312. self.db_url_passed = True
  313. else:
  314. self.db_url_passed = False
  315. if self.validateDbStrict(output, out, err):
  316. self.db_url_warn = False
  317. else:
  318. self.db_url_warn = True
  319. out.write("VALIDATING DB ... ")
  320. if self.db_url_passed:
  321. out.write("PASS")
  322. if self.db_url_warn:
  323. out.write(" (with warnings)")
  324. out.write("\n\n")
  325. else:
  326. out.write("FAIL\n\n")
  327. out.flush
  328. # Query
  329. if self.runTests[self.QUERY]:
  330. out.write(textwrap.dedent("""
  331. -----------------------------------------------------
  332. VERIFYING QUERY ({url})
  333. -----------------------------------------------------
  334. """.format(url=self.query_url+"2")))
  335. out.flush()
  336. url = self.benchmarker.generate_url(self.query_url + "2", self.port)
  337. output = self.__curl_url(url, self.QUERY, out, err)
  338. if self.validateQuery(output, out, err):
  339. self.query_url_passed = True
  340. out.write(self.query_url + "2 - PASS\n\n")
  341. else:
  342. self.query_url_passed = False
  343. out.write(self.query_url + "2 - FAIL\n\n")
  344. out.write("-----------------------------------------------------\n\n")
  345. out.flush()
  346. self.query_url_warn = False
  347. url2 = self.benchmarker.generate_url(self.query_url + "0", self.port)
  348. output2 = self.__curl_url(url2, self.QUERY, out, err)
  349. if not self.validateQueryOneOrLess(output2, out, err):
  350. self.query_url_warn = True
  351. out.write(self.query_url + "0 - WARNING\n\n")
  352. else:
  353. out.write(self.query_url + "0 - PASS\n\n")
  354. out.write("-----------------------------------------------------\n\n")
  355. out.flush()
  356. url3 = self.benchmarker.generate_url(self.query_url + "foo", self.port)
  357. output3 = self.__curl_url(url3, self.QUERY, out, err)
  358. if not self.validateQueryOneOrLess(output3, out, err):
  359. self.query_url_warn = True
  360. out.write(self.query_url + "foo - WARNING\n\n")
  361. else:
  362. out.write(self.query_url + "foo - PASS\n\n")
  363. out.write("-----------------------------------------------------\n\n")
  364. out.flush()
  365. url4 = self.benchmarker.generate_url(self.query_url + "501", self.port)
  366. output4 = self.__curl_url(url4, self.QUERY, out, err)
  367. if not self.validateQueryFiveHundredOrMore(output4, out, err):
  368. self.query_url_warn = True
  369. out.write(self.query_url + "501 - WARNING\n\n")
  370. else:
  371. out.write(self.query_url + "501 - PASS\n\n")
  372. out.write("-----------------------------------------------------\n\n\n")
  373. out.flush()
  374. out.write("VALIDATING QUERY ... ")
  375. if self.query_url_passed:
  376. out.write("PASS")
  377. if self.query_url_warn:
  378. out.write(" (with warnings)")
  379. out.write("\n\n")
  380. else:
  381. out.write("FAIL\n\n")
  382. out.flush
  383. # Fortune
  384. if self.runTests[self.FORTUNE]:
  385. out.write(textwrap.dedent("""
  386. -----------------------------------------------------
  387. VERIFYING FORTUNE ({url})
  388. -----------------------------------------------------
  389. """.format(url = self.fortune_url)))
  390. out.flush()
  391. url = self.benchmarker.generate_url(self.fortune_url, self.port)
  392. output = self.__curl_url(url, self.FORTUNE, out, err)
  393. out.write("VALIDATING FORTUNE ... ")
  394. if self.validateFortune(output, out, err):
  395. self.fortune_url_passed = True
  396. out.write("PASS\n\n")
  397. else:
  398. self.fortune_url_passed = False
  399. out.write("FAIL\n\n")
  400. out.flush
  401. # Update
  402. if self.runTests[self.UPDATE]:
  403. out.write(textwrap.dedent("""
  404. -----------------------------------------------------
  405. VERIFYING UPDATE ({url})
  406. -----------------------------------------------------
  407. """.format(url = self.update_url)))
  408. out.flush()
  409. url = self.benchmarker.generate_url(self.update_url + "2", self.port)
  410. output = self.__curl_url(url, self.UPDATE, out, err)
  411. out.write("VALIDATING UPDATE ... ")
  412. if self.validateUpdate(output, out, err):
  413. self.update_url_passed = True
  414. out.write("PASS\n\n")
  415. else:
  416. self.update_url_passed = False
  417. out.write("FAIL\n\n")
  418. out.flush
  419. # plaintext
  420. if self.runTests[self.PLAINTEXT]:
  421. out.write(textwrap.dedent("""
  422. -----------------------------------------------------
  423. VERIFYING PLAINTEXT ({url})
  424. -----------------------------------------------------
  425. """.format(url = self.plaintext_url)))
  426. out.flush()
  427. url = self.benchmarker.generate_url(self.plaintext_url, self.port)
  428. output = self.__curl_url(url, self.PLAINTEXT, out, err)
  429. out.write("VALIDATING PLAINTEXT ... ")
  430. if self.validatePlaintext(output, out, err):
  431. self.plaintext_url_passed = True
  432. out.write("PASS\n\n")
  433. else:
  434. self.plaintext_url_passed = False
  435. out.write("FAIL\n\n")
  436. out.flush
  437. ############################################################
  438. # End verify_urls
  439. ############################################################
  440. ############################################################
  441. # contains_type(type)
  442. # true if this test contains an implementation of the given
  443. # test type (json, db, etc.)
  444. ############################################################
  445. def contains_type(self, type):
  446. try:
  447. if type == self.JSON and self.json_url is not None:
  448. return True
  449. if type == self.DB and self.db_url is not None:
  450. return True
  451. if type == self.QUERY and self.query_url is not None:
  452. return True
  453. if type == self.FORTUNE and self.fortune_url is not None:
  454. return True
  455. if type == self.UPDATE and self.update_url is not None:
  456. return True
  457. if type == self.PLAINTEXT and self.plaintext_url is not None:
  458. return True
  459. except AttributeError:
  460. pass
  461. return False
  462. ############################################################
  463. # End stop
  464. ############################################################
  465. ############################################################
  466. # benchmark
  467. # Runs the benchmark for each type of test that it implements
  468. # JSON/DB/Query.
  469. ############################################################
  470. def benchmark(self, out, err):
  471. # JSON
  472. if self.runTests[self.JSON]:
  473. try:
  474. out.write("BENCHMARKING JSON ... ")
  475. out.flush()
  476. results = None
  477. output_file = self.benchmarker.output_file(self.name, self.JSON)
  478. if not os.path.exists(output_file):
  479. with open(output_file, 'w'):
  480. # Simply opening the file in write mode should create the empty file.
  481. pass
  482. if self.json_url_passed:
  483. remote_script = self.__generate_concurrency_script(self.json_url, self.port, self.accept_json)
  484. self.__run_benchmark(remote_script, output_file, err)
  485. results = self.__parse_test(self.JSON)
  486. self.benchmarker.report_results(framework=self, test=self.JSON, results=results['results'])
  487. out.write( "Complete\n" )
  488. out.flush()
  489. except AttributeError:
  490. pass
  491. # DB
  492. if self.runTests[self.DB]:
  493. try:
  494. out.write("BENCHMARKING DB ... ")
  495. out.flush()
  496. results = None
  497. output_file = self.benchmarker.output_file(self.name, self.DB)
  498. warning_file = self.benchmarker.warning_file(self.name, self.DB)
  499. if not os.path.exists(output_file):
  500. with open(output_file, 'w'):
  501. # Simply opening the file in write mode should create the empty file.
  502. pass
  503. if self.db_url_warn:
  504. with open(warning_file, 'w'):
  505. pass
  506. if self.db_url_passed:
  507. remote_script = self.__generate_concurrency_script(self.db_url, self.port, self.accept_json)
  508. self.__run_benchmark(remote_script, output_file, err)
  509. results = self.__parse_test(self.DB)
  510. self.benchmarker.report_results(framework=self, test=self.DB, results=results['results'])
  511. out.write( "Complete\n" )
  512. except AttributeError:
  513. pass
  514. # Query
  515. if self.runTests[self.QUERY]:
  516. try:
  517. out.write("BENCHMARKING Query ... ")
  518. out.flush()
  519. results = None
  520. output_file = self.benchmarker.output_file(self.name, self.QUERY)
  521. warning_file = self.benchmarker.warning_file(self.name, self.QUERY)
  522. if not os.path.exists(output_file):
  523. with open(output_file, 'w'):
  524. # Simply opening the file in write mode should create the empty file.
  525. pass
  526. if self.query_url_warn:
  527. with open(warning_file, 'w'):
  528. pass
  529. if self.query_url_passed:
  530. remote_script = self.__generate_query_script(self.query_url, self.port, self.accept_json)
  531. self.__run_benchmark(remote_script, output_file, err)
  532. results = self.__parse_test(self.QUERY)
  533. self.benchmarker.report_results(framework=self, test=self.QUERY, results=results['results'])
  534. out.write( "Complete\n" )
  535. out.flush()
  536. except AttributeError:
  537. pass
  538. # fortune
  539. if self.runTests[self.FORTUNE]:
  540. try:
  541. out.write("BENCHMARKING Fortune ... ")
  542. out.flush()
  543. results = None
  544. output_file = self.benchmarker.output_file(self.name, self.FORTUNE)
  545. if not os.path.exists(output_file):
  546. with open(output_file, 'w'):
  547. # Simply opening the file in write mode should create the empty file.
  548. pass
  549. if self.fortune_url_passed:
  550. remote_script = self.__generate_concurrency_script(self.fortune_url, self.port, self.accept_html)
  551. self.__run_benchmark(remote_script, output_file, err)
  552. results = self.__parse_test(self.FORTUNE)
  553. self.benchmarker.report_results(framework=self, test=self.FORTUNE, results=results['results'])
  554. out.write( "Complete\n" )
  555. out.flush()
  556. except AttributeError:
  557. pass
  558. # update
  559. if self.runTests[self.UPDATE]:
  560. try:
  561. out.write("BENCHMARKING Update ... ")
  562. out.flush()
  563. results = None
  564. output_file = self.benchmarker.output_file(self.name, self.UPDATE)
  565. if not os.path.exists(output_file):
  566. with open(output_file, 'w'):
  567. # Simply opening the file in write mode should create the empty file.
  568. pass
  569. if self.update_url_passed:
  570. remote_script = self.__generate_query_script(self.update_url, self.port, self.accept_json)
  571. self.__run_benchmark(remote_script, output_file, err)
  572. results = self.__parse_test(self.UPDATE)
  573. self.benchmarker.report_results(framework=self, test=self.UPDATE, results=results['results'])
  574. out.write( "Complete\n" )
  575. out.flush()
  576. except AttributeError:
  577. pass
  578. # plaintext
  579. if self.runTests[self.PLAINTEXT]:
  580. try:
  581. out.write("BENCHMARKING Plaintext ... ")
  582. out.flush()
  583. results = None
  584. output_file = self.benchmarker.output_file(self.name, self.PLAINTEXT)
  585. if not os.path.exists(output_file):
  586. with open(output_file, 'w'):
  587. # Simply opening the file in write mode should create the empty file.
  588. pass
  589. if self.plaintext_url_passed:
  590. remote_script = self.__generate_concurrency_script(self.plaintext_url, self.port, self.accept_plaintext, wrk_command="wrk", intervals=[256,1024,4096,16384], pipeline="16")
  591. self.__run_benchmark(remote_script, output_file, err)
  592. results = self.__parse_test(self.PLAINTEXT)
  593. self.benchmarker.report_results(framework=self, test=self.PLAINTEXT, results=results['results'])
  594. out.write( "Complete\n" )
  595. out.flush()
  596. except AttributeError:
  597. traceback.print_exc()
  598. pass
  599. ############################################################
  600. # End benchmark
  601. ############################################################
  602. ############################################################
  603. # parse_all
  604. # Method meant to be run for a given timestamp
  605. ############################################################
  606. def parse_all(self):
  607. # JSON
  608. if os.path.exists(self.benchmarker.get_output_file(self.name, self.JSON)):
  609. results = self.__parse_test(self.JSON)
  610. self.benchmarker.report_results(framework=self, test=self.JSON, results=results['results'])
  611. # DB
  612. if os.path.exists(self.benchmarker.get_output_file(self.name, self.DB)):
  613. results = self.__parse_test(self.DB)
  614. self.benchmarker.report_results(framework=self, test=self.DB, results=results['results'])
  615. # Query
  616. if os.path.exists(self.benchmarker.get_output_file(self.name, self.QUERY)):
  617. results = self.__parse_test(self.QUERY)
  618. self.benchmarker.report_results(framework=self, test=self.QUERY, results=results['results'])
  619. # Fortune
  620. if os.path.exists(self.benchmarker.get_output_file(self.name, self.FORTUNE)):
  621. results = self.__parse_test(self.FORTUNE)
  622. self.benchmarker.report_results(framework=self, test=self.FORTUNE, results=results['results'])
  623. # Update
  624. if os.path.exists(self.benchmarker.get_output_file(self.name, self.UPDATE)):
  625. results = self.__parse_test(self.UPDATE)
  626. self.benchmarker.report_results(framework=self, test=self.UPDATE, results=results['results'])
  627. # Plaintext
  628. if os.path.exists(self.benchmarker.get_output_file(self.name, self.PLAINTEXT)):
  629. results = self.__parse_test(self.PLAINTEXT)
  630. self.benchmarker.report_results(framework=self, test=self.PLAINTEXT, results=results['results'])
  631. ############################################################
  632. # End parse_all
  633. ############################################################
  634. ############################################################
  635. # __parse_test(test_type)
  636. ############################################################
  637. def __parse_test(self, test_type):
  638. try:
  639. results = dict()
  640. results['results'] = []
  641. if os.path.exists(self.benchmarker.get_output_file(self.name, test_type)):
  642. with open(self.benchmarker.output_file(self.name, test_type)) as raw_data:
  643. is_warmup = True
  644. rawData = None
  645. for line in raw_data:
  646. if "Queries:" in line or "Concurrency:" in line:
  647. is_warmup = False
  648. rawData = None
  649. continue
  650. if "Warmup" in line or "Primer" in line:
  651. is_warmup = True
  652. continue
  653. if not is_warmup:
  654. if rawData == None:
  655. rawData = dict()
  656. results['results'].append(rawData)
  657. #if "Requests/sec:" in line:
  658. # m = re.search("Requests/sec:\s+([0-9]+)", line)
  659. # rawData['reportedResults'] = m.group(1)
  660. # search for weighttp data such as succeeded and failed.
  661. if "Latency" in line:
  662. m = re.findall("([0-9]+\.*[0-9]*[us|ms|s|m|%]+)", line)
  663. if len(m) == 4:
  664. rawData['latencyAvg'] = m[0]
  665. rawData['latencyStdev'] = m[1]
  666. rawData['latencyMax'] = m[2]
  667. # rawData['latencyStdevPercent'] = m[3]
  668. #if "Req/Sec" in line:
  669. # m = re.findall("([0-9]+\.*[0-9]*[k|%]*)", line)
  670. # if len(m) == 4:
  671. # rawData['requestsAvg'] = m[0]
  672. # rawData['requestsStdev'] = m[1]
  673. # rawData['requestsMax'] = m[2]
  674. # rawData['requestsStdevPercent'] = m[3]
  675. #if "requests in" in line:
  676. # m = re.search("requests in ([0-9]+\.*[0-9]*[ms|s|m|h]+)", line)
  677. # if m != None:
  678. # # parse out the raw time, which may be in minutes or seconds
  679. # raw_time = m.group(1)
  680. # if "ms" in raw_time:
  681. # rawData['total_time'] = float(raw_time[:len(raw_time)-2]) / 1000.0
  682. # elif "s" in raw_time:
  683. # rawData['total_time'] = float(raw_time[:len(raw_time)-1])
  684. # elif "m" in raw_time:
  685. # rawData['total_time'] = float(raw_time[:len(raw_time)-1]) * 60.0
  686. # elif "h" in raw_time:
  687. # rawData['total_time'] = float(raw_time[:len(raw_time)-1]) * 3600.0
  688. if "requests in" in line:
  689. m = re.search("([0-9]+) requests in", line)
  690. if m != None:
  691. rawData['totalRequests'] = int(m.group(1))
  692. if "Socket errors" in line:
  693. if "connect" in line:
  694. m = re.search("connect ([0-9]+)", line)
  695. rawData['connect'] = int(m.group(1))
  696. if "read" in line:
  697. m = re.search("read ([0-9]+)", line)
  698. rawData['read'] = int(m.group(1))
  699. if "write" in line:
  700. m = re.search("write ([0-9]+)", line)
  701. rawData['write'] = int(m.group(1))
  702. if "timeout" in line:
  703. m = re.search("timeout ([0-9]+)", line)
  704. rawData['timeout'] = int(m.group(1))
  705. if "Non-2xx" in line:
  706. m = re.search("Non-2xx or 3xx responses: ([0-9]+)", line)
  707. if m != None:
  708. rawData['5xx'] = int(m.group(1))
  709. return results
  710. except IOError:
  711. return None
  712. ############################################################
  713. # End benchmark
  714. ############################################################
  715. ##########################################################################################
  716. # Private Methods
  717. ##########################################################################################
  718. ############################################################
  719. # __run_benchmark(script, output_file)
  720. # Runs a single benchmark using the script which is a bash
  721. # template that uses weighttp to run the test. All the results
  722. # outputed to the output_file.
  723. ############################################################
  724. def __run_benchmark(self, script, output_file, err):
  725. with open(output_file, 'w') as raw_file:
  726. p = subprocess.Popen(self.benchmarker.client_ssh_string.split(" "), stdin=subprocess.PIPE, stdout=raw_file, stderr=err)
  727. p.communicate(script)
  728. err.flush()
  729. ############################################################
  730. # End __run_benchmark
  731. ############################################################
  732. ############################################################
  733. # __generate_concurrency_script(url, port)
  734. # Generates the string containing the bash script that will
  735. # be run on the client to benchmark a single test. This
  736. # specifically works for the variable concurrency tests (JSON
  737. # and DB)
  738. ############################################################
  739. def __generate_concurrency_script(self, url, port, accept_header, wrk_command="wrk", intervals=[], pipeline=""):
  740. if len(intervals) == 0:
  741. intervals = self.benchmarker.concurrency_levels
  742. headers = self.__get_request_headers(accept_header)
  743. return self.concurrency_template.format(max_concurrency=self.benchmarker.max_concurrency,
  744. max_threads=self.benchmarker.max_threads, name=self.name, duration=self.benchmarker.duration,
  745. interval=" ".join("{}".format(item) for item in intervals),
  746. server_host=self.benchmarker.server_host, port=port, url=url, headers=headers, wrk=wrk_command,
  747. pipeline=pipeline)
  748. ############################################################
  749. # End __generate_concurrency_script
  750. ############################################################
  751. ############################################################
  752. # __generate_query_script(url, port)
  753. # Generates the string containing the bash script that will
  754. # be run on the client to benchmark a single test. This
  755. # specifically works for the variable query tests (Query)
  756. ############################################################
  757. def __generate_query_script(self, url, port, accept_header):
  758. headers = self.__get_request_headers(accept_header)
  759. return self.query_template.format(max_concurrency=self.benchmarker.max_concurrency,
  760. max_threads=self.benchmarker.max_threads, name=self.name, duration=self.benchmarker.duration,
  761. interval=" ".join("{}".format(item) for item in self.benchmarker.query_intervals),
  762. server_host=self.benchmarker.server_host, port=port, url=url, headers=headers)
  763. ############################################################
  764. # End __generate_query_script
  765. ############################################################
  766. ############################################################
  767. # __get_request_headers(accept_header)
  768. # Generates the complete HTTP header string
  769. ############################################################
  770. def __get_request_headers(self, accept_header):
  771. return self.headers_template.format(accept=accept_header)
  772. ############################################################
  773. # End __format_request_headers
  774. ############################################################
  775. ############################################################
  776. # __curl_url
  777. # Dump HTTP response and headers. Throw exception if there
  778. # is an HTTP error.
  779. ############################################################
  780. def __curl_url(self, url, testType, out, err):
  781. output = None
  782. try:
  783. # Use -m 15 to make curl stop trying after 15sec.
  784. # Use -i to output response with headers.
  785. # Don't use -f so that the HTTP response code is ignored.
  786. # Use --stderr - to redirect stderr to stdout so we get
  787. # error output for sure in stdout.
  788. # Use -sS to hide progress bar, but show errors.
  789. subprocess.check_call(["curl", "-m", "15", "-i", "-sS", url], stderr=err, stdout=out)
  790. # HTTP output may not end in a newline, so add that here.
  791. out.write( "\n\n" )
  792. out.flush()
  793. err.flush()
  794. # We need to get the respond body from the curl and return it.
  795. p = subprocess.Popen(["curl", "-m", "15", "-s", url], stdout=subprocess.PIPE)
  796. output = p.communicate()
  797. except:
  798. pass
  799. if output:
  800. # We have the response body - return it
  801. return output[0]
  802. ##############################################################
  803. # End __curl_url
  804. ##############################################################
  805. def requires_database(self):
  806. """Returns True/False if this test requires a database"""
  807. return (self.contains_type(self.FORTUNE) or
  808. self.contains_type(self.DB) or
  809. self.contains_type(self.QUERY) or
  810. self.contains_type(self.UPDATE))
  811. ##########################################################################################
  812. # Constructor
  813. ##########################################################################################
  814. def __init__(self, name, directory, benchmarker, runTests, args):
  815. self.name = name
  816. self.directory = directory
  817. self.benchmarker = benchmarker
  818. self.runTests = runTests
  819. self.fwroot = benchmarker.fwroot
  820. # setup logging
  821. logging.basicConfig(stream=sys.stderr, level=logging.DEBUG)
  822. self.install_root="%s/%s" % (self.fwroot, "installs")
  823. if benchmarker.install_strategy is 'pertest':
  824. self.install_root="%s/pertest/%s" % (self.install_root, name)
  825. self.__dict__.update(args)
  826. # ensure directory has __init__.py file so that we can use it as a Python package
  827. if not os.path.exists(os.path.join(directory, "__init__.py")):
  828. open(os.path.join(directory, "__init__.py"), 'w').close()
  829. self.setup_module = setup_module = importlib.import_module(directory + '.' + self.setup_file)
  830. ############################################################
  831. # End __init__
  832. ############################################################
  833. ############################################################
  834. # End FrameworkTest
  835. ############################################################
  836. ##########################################################################################
  837. # Static methods
  838. ##########################################################################################
  839. ##############################################################
  840. # parse_config(config, directory, benchmarker)
  841. # parses a config file and returns a list of FrameworkTest
  842. # objects based on that config file.
  843. ##############################################################
  844. def parse_config(config, directory, benchmarker):
  845. tests = []
  846. # The config object can specify multiple tests, we neep to loop
  847. # over them and parse them out
  848. for test in config['tests']:
  849. for key, value in test.iteritems():
  850. test_name = config['framework']
  851. runTests = dict()
  852. runTests["json"] = (benchmarker.type == "all" or benchmarker.type == "json") and value.get("json_url", False)
  853. runTests["db"] = (benchmarker.type == "all" or benchmarker.type == "db") and value.get("db_url", False)
  854. runTests["query"] = (benchmarker.type == "all" or benchmarker.type == "query") and value.get("query_url", False)
  855. runTests["fortune"] = (benchmarker.type == "all" or benchmarker.type == "fortune") and value.get("fortune_url", False)
  856. runTests["update"] = (benchmarker.type == "all" or benchmarker.type == "update") and value.get("update_url", False)
  857. runTests["plaintext"] = (benchmarker.type == "all" or benchmarker.type == "plaintext") and value.get("plaintext_url", False)
  858. # if the test uses the 'defualt' keywork, then we don't
  859. # append anything to it's name. All configs should only have 1 default
  860. if key != 'default':
  861. # we need to use the key in the test_name
  862. test_name = test_name + "-" + key
  863. tests.append(FrameworkTest(test_name, directory, benchmarker, runTests, value))
  864. return tests
  865. ##############################################################
  866. # End parse_config
  867. ##############################################################