framework_test.py 39 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955
  1. from benchmark.fortune_html_parser import FortuneHTMLParser
  2. from setup.linux import setup_util
  3. import importlib
  4. import os
  5. import subprocess
  6. import time
  7. import re
  8. import pprint
  9. import sys
  10. import traceback
  11. import json
  12. import logging
  13. log = logging.getLogger('framework_test')
  14. from utils import WrapLogger
  15. from utils import Header
  16. class FrameworkTest:
  17. """
  18. Represents a framework test, including all types (JSON, plaintext, DB, etc)
  19. defined in that test. Used by Benchmarker to start, verify, benchmark, and
  20. stop tests. Calls into the test's setup.py as needed.
  21. Note: Any method in this class called from Benchmarker#__run_test is run
  22. inside a thread
  23. Note: Many methods have a parameter 'logger' passed in from Benchmarker.
  24. This uses python's logging module to support writing output to both a
  25. file and stdout concurrently. If you wish to print something to stdout,
  26. regardless of the current global log level, use logger.info("Something").
  27. If you wish to respect the current global log level, use the logger
  28. defined for this class e.g. log.info("Something else"). If you would
  29. like to use this 'logger' with subprocess, see class WrapLogger
  30. """
  31. headers_template = "-H 'Host: localhost' -H '{accept}' -H 'Connection: keep-alive'"
  32. headers_full_template = "-H 'Host: localhost' -H '{accept}' -H 'Accept-Language: en-US,en;q=0.5' -H 'User-Agent: Mozilla/5.0 (X11; Linux x86_64) Gecko/20130501 Firefox/30.0 AppleWebKit/600.00 Chrome/30.0.0000.0 Trident/10.0 Safari/600.00' -H 'Cookie: uid=12345678901234567890; __utma=1.1234567890.1234567890.1234567890.1234567890.12; wd=2560x1600' -H 'Connection: keep-alive'"
  33. accept_json = "Accept: application/json,text/html;q=0.9,application/xhtml+xml;q=0.9,application/xml;q=0.8,*/*;q=0.7"
  34. accept_html = "Accept: text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8"
  35. accept_plaintext = "Accept: text/plain,text/html;q=0.9,application/xhtml+xml;q=0.9,application/xml;q=0.8,*/*;q=0.7"
  36. concurrency_template = """
  37. echo ""
  38. echo "---------------------------------------------------------"
  39. echo " Running Primer {name}"
  40. echo " {wrk} {headers} -d 5 -c 8 --timeout 8 -t 8 \"http://{server_host}:{port}{url}\""
  41. echo "---------------------------------------------------------"
  42. echo ""
  43. {wrk} {headers} -d 5 -c 8 --timeout 8 -t 8 "http://{server_host}:{port}{url}"
  44. sleep 5
  45. echo ""
  46. echo "---------------------------------------------------------"
  47. echo " Running Warmup {name}"
  48. echo " {wrk} {headers} -d {duration} -c {max_concurrency} --timeout {max_concurrency} -t {max_threads} \"http://{server_host}:{port}{url}\""
  49. echo "---------------------------------------------------------"
  50. echo ""
  51. {wrk} {headers} -d {duration} -c {max_concurrency} --timeout {max_concurrency} -t {max_threads} "http://{server_host}:{port}{url}"
  52. sleep 5
  53. for c in {interval}
  54. do
  55. echo ""
  56. echo "---------------------------------------------------------"
  57. echo " Concurrency: $c for {name}"
  58. echo " {wrk} {headers} -d {duration} -c $c --timeout $c -t $(($c>{max_threads}?{max_threads}:$c)) \"http://{server_host}:{port}{url}\" -s ~/pipeline.lua -- {pipeline}"
  59. echo "---------------------------------------------------------"
  60. echo ""
  61. {wrk} {headers} -d {duration} -c $c --timeout $c -t "$(($c>{max_threads}?{max_threads}:$c))" http://{server_host}:{port}{url} -s ~/pipeline.lua -- {pipeline}
  62. sleep 2
  63. done
  64. """
  65. query_template = """
  66. echo ""
  67. echo "---------------------------------------------------------"
  68. echo " Running Primer {name}"
  69. echo " wrk {headers} -d 5 -c 8 --timeout 8 -t 8 \"http://{server_host}:{port}{url}2\""
  70. echo "---------------------------------------------------------"
  71. echo ""
  72. wrk {headers} -d 5 -c 8 --timeout 8 -t 8 "http://{server_host}:{port}{url}2"
  73. sleep 5
  74. echo ""
  75. echo "---------------------------------------------------------"
  76. echo " Running Warmup {name}"
  77. echo " wrk {headers} -d {duration} -c {max_concurrency} --timeout {max_concurrency} -t {max_threads} \"http://{server_host}:{port}{url}2\""
  78. echo "---------------------------------------------------------"
  79. echo ""
  80. wrk {headers} -d {duration} -c {max_concurrency} --timeout {max_concurrency} -t {max_threads} "http://{server_host}:{port}{url}2"
  81. sleep 5
  82. for c in {interval}
  83. do
  84. echo ""
  85. echo "---------------------------------------------------------"
  86. echo " Queries: $c for {name}"
  87. echo " wrk {headers} -d {duration} -c {max_concurrency} --timeout {max_concurrency} -t {max_threads} \"http://{server_host}:{port}{url}$c\""
  88. echo "---------------------------------------------------------"
  89. echo ""
  90. wrk {headers} -d {duration} -c {max_concurrency} --timeout {max_concurrency} -t {max_threads} "http://{server_host}:{port}{url}$c"
  91. sleep 2
  92. done
  93. """
  94. language = None
  95. platform = None
  96. webserver = None
  97. classification = None
  98. database = None
  99. approach = None
  100. orm = None
  101. framework = None
  102. os = None
  103. database_os = None
  104. display_name = None
  105. notes = None
  106. versus = None
  107. ############################################################
  108. # Test Variables
  109. ############################################################
  110. JSON = "json"
  111. DB = "db"
  112. QUERY = "query"
  113. FORTUNE = "fortune"
  114. UPDATE = "update"
  115. PLAINTEXT = "plaintext"
  116. ##########################################################################################
  117. # Public Methods
  118. ##########################################################################################
  119. ############################################################
  120. # Validates the jsonString is a JSON object with a 'message'
  121. # key with the value "hello, world!" (case-insensitive).
  122. ############################################################
  123. def validateJson(self, jsonString, logger=log):
  124. try:
  125. obj = {k.lower(): v for k,v in json.loads(jsonString).items()}
  126. if obj["message"].lower() == "hello, world!":
  127. return True
  128. except:
  129. logger.debug("Expected: %s", "{'message':'hello, world!'}")
  130. logger.debug("Got: '%s'", jsonString)
  131. pass
  132. return False
  133. ############################################################
  134. # Validates the jsonString is a JSON object that has an "id"
  135. # and a "randomNumber" key, and that both keys map to
  136. # integers.
  137. ############################################################
  138. def validateDb(self, jsonString, logger=log):
  139. try:
  140. obj = {k.lower(): v for k,v in json.loads(jsonString).items()}
  141. # We are allowing the single-object array for the DB
  142. # test for now, but will likely remove this later.
  143. if type(obj) == list:
  144. obj = obj[0]
  145. # This will error out of the value could not parsed to a
  146. # float (this will work with ints, but it will turn them
  147. # into their float equivalent; i.e. "123" => 123.0)
  148. if (type(float(obj["id"])) == float and
  149. type(float(obj["randomnumber"])) == float):
  150. return True
  151. except:
  152. # logger.debug("Expected: %s", "")
  153. logger.debug("Got: %s", jsonString)
  154. pass
  155. return False
  156. def validateDbStrict(self, jsonString, logger=log):
  157. try:
  158. obj = {k.lower(): v for k,v in json.loads(jsonString).items()}
  159. # This will error out of the value could not parsed to a
  160. # float (this will work with ints, but it will turn them
  161. # into their float equivalent; i.e. "123" => 123.0)
  162. if (type(float(obj["id"])) == float and
  163. type(float(obj["randomnumber"])) == float):
  164. return True
  165. except:
  166. # logger.debug("Expected: %s", "")
  167. logger.debug("Got: %s", jsonString)
  168. pass
  169. return False
  170. ############################################################
  171. # Validates the jsonString is an array with a length of
  172. # 2, that each entry in the array is a JSON object, that
  173. # each object has an "id" and a "randomNumber" key, and that
  174. # both keys map to integers.
  175. ############################################################
  176. def validateQuery(self, jsonString, logger=log):
  177. try:
  178. arr = [{k.lower(): v for k,v in d.items()} for d in json.loads(jsonString)]
  179. if (type(float(arr[0]["id"])) == float and
  180. type(float(arr[0]["randomnumber"])) == float and
  181. type(float(arr[1]["id"])) == float and
  182. type(float(arr[1]["randomnumber"])) == float):
  183. return True
  184. except:
  185. # logger.debug("Expected: %s", "")
  186. logger.debug("Got: %s", jsonString)
  187. pass
  188. return False
  189. ############################################################
  190. # Validates the jsonString is an array with a length of
  191. # 1, that each entry in the array is a JSON object, that
  192. # each object has an "id" and a "randomNumber" key, and that
  193. # both keys map to integers.
  194. ############################################################
  195. def validateQueryOneOrLess(self, jsonString, logger=log):
  196. try:
  197. arr = {k.lower(): v for k,v in json.loads(jsonString).items()}
  198. if len(arr) != 1:
  199. return False
  200. for obj in arr:
  201. if (type(float(obj["id"])) != float or
  202. type(float(obj["randomnumber"])) != float or
  203. type(float(obj["id"])) != float or
  204. type(float(obj["randomnumber"])) != float):
  205. return False
  206. # By here, it's passed validation
  207. return True
  208. except:
  209. # logger.debug("Expected: %s", "")
  210. logger.debug("Got: %s", jsonString)
  211. pass
  212. return False
  213. ############################################################
  214. # Validates the jsonString is an array with a length of
  215. # 500, that each entry in the array is a JSON object, that
  216. # each object has an "id" and a "randomNumber" key, and that
  217. # both keys map to integers.
  218. ############################################################
  219. def validateQueryFiveHundredOrMore(self, jsonString, logger=log):
  220. try:
  221. arr = {k.lower(): v for k,v in json.loads(jsonString).items()}
  222. if len(arr) != 500:
  223. return False
  224. for obj in arr:
  225. if (type(float(obj["id"])) != float or
  226. type(float(obj["randomnumber"])) != float or
  227. type(float(obj["id"])) != float or
  228. type(float(obj["randomnumber"])) != float):
  229. return False
  230. # By here, it's passed validation
  231. return True
  232. except:
  233. # logger.debug("Expected: %s", "")
  234. logger.debug("Got: %s", jsonString)
  235. pass
  236. return False
  237. ############################################################
  238. # Parses the given HTML string and asks a FortuneHTMLParser
  239. # whether the parsed string is a valid fortune return.
  240. ############################################################
  241. def validateFortune(self, htmlString, logger=log):
  242. try:
  243. parser = FortuneHTMLParser()
  244. parser.feed(htmlString)
  245. return parser.isValidFortune()
  246. except:
  247. # logger.debug("Expected: %s", "")
  248. logger.debug("Got: %s", htmlString)
  249. pass
  250. return False
  251. ############################################################
  252. # Validates the jsonString is an array with a length of
  253. # 2, that each entry in the array is a JSON object, that
  254. # each object has an "id" and a "randomNumber" key, and that
  255. # both keys map to integers.
  256. ############################################################
  257. def validateUpdate(self, jsonString, logger=log):
  258. try:
  259. arr = [{k.lower(): v for k,v in d.items()} for d in json.loads(jsonString)]
  260. if (type(float(arr[0]["id"])) == float and
  261. type(float(arr[0]["randomnumber"])) == float and
  262. type(float(arr[1]["id"])) == float and
  263. type(float(arr[1]["randomnumber"])) == float):
  264. return True
  265. except:
  266. # logger.debug("Expected: %s", "")
  267. logger.debug("Got: %s", jsonString)
  268. pass
  269. return False
  270. ############################################################
  271. #
  272. ############################################################
  273. def validatePlaintext(self, jsonString, logger=log):
  274. try:
  275. return jsonString.lower().strip() == "hello, world!"
  276. except:
  277. logger.debug("Expected: %s", "hello, world!")
  278. logger.debug("Got: %s", jsonString)
  279. pass
  280. return False
  281. ############################################################
  282. # start(benchmarker)
  283. # Start the test using it's setup file
  284. ############################################################
  285. def start(self, logger=log):
  286. log.info("start")
  287. # Load profile for this installation
  288. profile="%s/bash_profile.sh" % self.directory
  289. if not os.path.exists(profile):
  290. logger.warning("Framework %s does not have a bash_profile", self.name)
  291. profile="$FWROOT/config/benchmark_profile"
  292. set_iroot="export IROOT=%s" % self.install_root
  293. setup_util.replace_environ(config=profile, command=set_iroot)
  294. (out, err) = WrapLogger(logger, logging.INFO), WrapLogger(logger, logging.ERROR)
  295. return self.setup_module.start(self.benchmarker, out, err)
  296. ############################################################
  297. # End start
  298. ############################################################
  299. ############################################################
  300. # stop(benchmarker)
  301. # Stops the test using it's setup file
  302. ############################################################
  303. def stop(self, logger=log):
  304. log.info("stop")
  305. (out, err) = WrapLogger(logger, logging.INFO), WrapLogger(logger, logging.ERROR)
  306. return self.setup_module.stop(out, err)
  307. ############################################################
  308. # End stop
  309. ############################################################
  310. ############################################################
  311. # verify_urls
  312. # Verifys each of the URLs for this test. THis will sinply
  313. # curl the URL and check for it's return status.
  314. # For each url, a flag will be set on this object for whether
  315. # or not it passed
  316. ############################################################
  317. def verify_urls(self, logger=log):
  318. # JSON
  319. if self.runTests[self.JSON]:
  320. logger.info(Header("VERIFYING JSON (%s)" % self.json_url))
  321. url = self.benchmarker.generate_url(self.json_url, self.port)
  322. output = self.__curl_url(url, self.JSON, logger)
  323. logger.info("VALIDATING JSON ... ")
  324. if self.validateJson(output, log):
  325. self.json_url_passed = True
  326. logger.info("PASS")
  327. else:
  328. self.json_url_passed = False
  329. logger.info("FAIL")
  330. # DB
  331. if self.runTests[self.DB]:
  332. logger.info(Header("VERIFYING DB (%s)" % self.db_url))
  333. url = self.benchmarker.generate_url(self.db_url, self.port)
  334. output = self.__curl_url(url, self.DB, logger)
  335. if self.validateDb(output, logger):
  336. self.db_url_passed = True
  337. else:
  338. self.db_url_passed = False
  339. if self.validateDbStrict(output, logger):
  340. self.db_url_warn = False
  341. else:
  342. self.db_url_warn = True
  343. logger.info("VALIDATING DB ... ")
  344. if self.db_url_passed:
  345. if self.db_url_warn:
  346. logger.info("PASS (with warnings)")
  347. else:
  348. logger.info("PASS")
  349. else:
  350. logger.info("FAIL")
  351. # Query
  352. if self.runTests[self.QUERY]:
  353. logger.info(Header("VERIFYING QUERY (%s)" % self.query_url+"2"))
  354. url = self.benchmarker.generate_url(self.query_url + "2", self.port)
  355. output = self.__curl_url(url, self.QUERY, logger)
  356. if self.validateQuery(output, logger):
  357. self.query_url_passed = True
  358. logger.info(self.query_url + "2 - PASS")
  359. else:
  360. self.query_url_passed = False
  361. logger.info(self.query_url + "2 - FAIL")
  362. logger.info("-----------------------------------------------------")
  363. self.query_url_warn = False
  364. url2 = self.benchmarker.generate_url(self.query_url + "0", self.port)
  365. output2 = self.__curl_url(url2, self.QUERY, logger)
  366. if not self.validateQueryOneOrLess(output2, logger):
  367. self.query_url_warn = True
  368. logger.info(self.query_url + "0 - WARNING")
  369. else:
  370. logger.info(self.query_url + "0 - PASS")
  371. logger.info("-----------------------------------------------------")
  372. url3 = self.benchmarker.generate_url(self.query_url + "foo", self.port)
  373. output3 = self.__curl_url(url3, self.QUERY, logger)
  374. if not self.validateQueryOneOrLess(output3, logger):
  375. self.query_url_warn = True
  376. logger.info(self.query_url + "foo - WARNING")
  377. else:
  378. logger.info(self.query_url + "foo - PASS")
  379. logger.info("-----------------------------------------------------")
  380. url4 = self.benchmarker.generate_url(self.query_url + "501", self.port)
  381. output4 = self.__curl_url(url4, self.QUERY, logger)
  382. if not self.validateQueryFiveHundredOrMore(output4, logger):
  383. self.query_url_warn = True
  384. logger.info(self.query_url + "501 - WARNING")
  385. else:
  386. logger.info(self.query_url + "501 - PASS")
  387. logger.info("-----------------------------------------------------")
  388. logger.info("VALIDATING QUERY ... ")
  389. if self.query_url_passed and self.query_url_warn:
  390. logger.info("PASS (with warnings)")
  391. elif self.query_url_passed:
  392. logger.info("PASS")
  393. else:
  394. logger.info("FAIL")
  395. # Fortune
  396. if self.runTests[self.FORTUNE]:
  397. logger.info(Header("VERIFYING FORTUNE (%s)" % self.fortune_url))
  398. url = self.benchmarker.generate_url(self.fortune_url, self.port)
  399. output = self.__curl_url(url, self.FORTUNE, logger)
  400. logger.info("VALIDATING FORTUNE ... ")
  401. if self.validateFortune(output, logger):
  402. self.fortune_url_passed = True
  403. logger.info("PASS")
  404. else:
  405. self.fortune_url_passed = False
  406. logger.info("FAIL")
  407. # Update
  408. if self.runTests[self.UPDATE]:
  409. logger.info(Header("VERIFYING UPDATE (%s)" % self.update_url))
  410. url = self.benchmarker.generate_url(self.update_url + "2", self.port)
  411. output = self.__curl_url(url, self.UPDATE, logger)
  412. logger.info("VALIDATING UPDATE ... ")
  413. if self.validateUpdate(output, logger):
  414. self.update_url_passed = True
  415. logger.info("PASS")
  416. else:
  417. self.update_url_passed = False
  418. logger.info("FAIL")
  419. # plaintext
  420. if self.runTests[self.PLAINTEXT]:
  421. logger.info(Header("VERIFYING PLAINTEXT (%s)" % self.plaintext_url))
  422. url = self.benchmarker.generate_url(self.plaintext_url, self.port)
  423. output = self.__curl_url(url, self.PLAINTEXT, logger)
  424. logger.info("VALIDATING PLAINTEXT ... ")
  425. if self.validatePlaintext(output, logger):
  426. self.plaintext_url_passed = True
  427. logger.info("PASS")
  428. else:
  429. self.plaintext_url_passed = False
  430. logger.info("FAIL")
  431. ############################################################
  432. # End verify_urls
  433. ############################################################
  434. ############################################################
  435. # contains_type(type)
  436. # true if this test contains an implementation of the given
  437. # test type (json, db, etc.)
  438. ############################################################
  439. def contains_type(self, type):
  440. try:
  441. if type == self.JSON and self.json_url is not None:
  442. return True
  443. if type == self.DB and self.db_url is not None:
  444. return True
  445. if type == self.QUERY and self.query_url is not None:
  446. return True
  447. if type == self.FORTUNE and self.fortune_url is not None:
  448. return True
  449. if type == self.UPDATE and self.update_url is not None:
  450. return True
  451. if type == self.PLAINTEXT and self.plaintext_url is not None:
  452. return True
  453. except AttributeError:
  454. pass
  455. return False
  456. ############################################################
  457. # End stop
  458. ############################################################
  459. ############################################################
  460. # benchmark
  461. # Runs the benchmark for each type of test that it implements
  462. # JSON/DB/Query.
  463. ############################################################
  464. def benchmark(self, logger=log):
  465. # JSON
  466. if self.runTests[self.JSON]:
  467. try:
  468. logger.info("BENCHMARKING JSON ... ")
  469. results = None
  470. output_file = self.benchmarker.output_file(self.name, self.JSON)
  471. if not os.path.exists(output_file):
  472. with open(output_file, 'w'):
  473. # Simply opening the file in write mode should create the empty file.
  474. pass
  475. if self.json_url_passed:
  476. remote_script = self.__generate_concurrency_script(self.json_url, self.port, self.accept_json)
  477. self.__run_benchmark(remote_script, output_file, logger)
  478. results = self.__parse_test(self.JSON)
  479. self.benchmarker.report_results(framework=self, test=self.JSON, results=results['results'])
  480. logger.info("Complete")
  481. except AttributeError:
  482. pass
  483. # DB
  484. if self.runTests[self.DB]:
  485. try:
  486. logger.info("BENCHMARKING DB ... ")
  487. results = None
  488. output_file = self.benchmarker.output_file(self.name, self.DB)
  489. warning_file = self.benchmarker.warning_file(self.name, self.DB)
  490. if not os.path.exists(output_file):
  491. with open(output_file, 'w'):
  492. # Simply opening the file in write mode should create the empty file.
  493. pass
  494. if self.db_url_warn:
  495. with open(warning_file, 'w'):
  496. pass
  497. if self.db_url_passed:
  498. remote_script = self.__generate_concurrency_script(self.db_url, self.port, self.accept_json)
  499. self.__run_benchmark(remote_script, output_file, logger)
  500. results = self.__parse_test(self.DB)
  501. self.benchmarker.report_results(framework=self, test=self.DB, results=results['results'])
  502. logger.info("Complete")
  503. except AttributeError:
  504. pass
  505. # Query
  506. if self.runTests[self.QUERY]:
  507. try:
  508. logger.info("BENCHMARKING Query ...")
  509. results = None
  510. output_file = self.benchmarker.output_file(self.name, self.QUERY)
  511. warning_file = self.benchmarker.warning_file(self.name, self.QUERY)
  512. if not os.path.exists(output_file):
  513. with open(output_file, 'w'):
  514. # Simply opening the file in write mode should create the empty file.
  515. pass
  516. if self.query_url_warn:
  517. with open(warning_file, 'w'):
  518. pass
  519. if self.query_url_passed:
  520. remote_script = self.__generate_query_script(self.query_url, self.port, self.accept_json)
  521. self.__run_benchmark(remote_script, output_file, logger)
  522. results = self.__parse_test(self.QUERY)
  523. self.benchmarker.report_results(framework=self, test=self.QUERY, results=results['results'])
  524. logger.info("Complete")
  525. except AttributeError:
  526. pass
  527. # fortune
  528. if self.runTests[self.FORTUNE]:
  529. try:
  530. logger.info("BENCHMARKING Fortune ... ")
  531. results = None
  532. output_file = self.benchmarker.output_file(self.name, self.FORTUNE)
  533. if not os.path.exists(output_file):
  534. with open(output_file, 'w'):
  535. # Simply opening the file in write mode should create the empty file.
  536. pass
  537. if self.fortune_url_passed:
  538. remote_script = self.__generate_concurrency_script(self.fortune_url, self.port, self.accept_html)
  539. self.__run_benchmark(remote_script, output_file, logger)
  540. results = self.__parse_test(self.FORTUNE)
  541. self.benchmarker.report_results(framework=self, test=self.FORTUNE, results=results['results'])
  542. logger.info("Complete")
  543. except AttributeError:
  544. pass
  545. # update
  546. if self.runTests[self.UPDATE]:
  547. try:
  548. logger.info("BENCHMARKING Update ... ")
  549. results = None
  550. output_file = self.benchmarker.output_file(self.name, self.UPDATE)
  551. if not os.path.exists(output_file):
  552. with open(output_file, 'w'):
  553. # Simply opening the file in write mode should create the empty file.
  554. pass
  555. if self.update_url_passed:
  556. remote_script = self.__generate_query_script(self.update_url, self.port, self.accept_json)
  557. self.__run_benchmark(remote_script, output_file, logger)
  558. results = self.__parse_test(self.UPDATE)
  559. self.benchmarker.report_results(framework=self, test=self.UPDATE, results=results['results'])
  560. logger.info( "Complete" )
  561. except AttributeError:
  562. pass
  563. # plaintext
  564. if self.runTests[self.PLAINTEXT]:
  565. try:
  566. logger.info("BENCHMARKING Plaintext ... ")
  567. results = None
  568. output_file = self.benchmarker.output_file(self.name, self.PLAINTEXT)
  569. if not os.path.exists(output_file):
  570. with open(output_file, 'w'):
  571. # Simply opening the file in write mode should create the empty file.
  572. pass
  573. if self.plaintext_url_passed:
  574. remote_script = self.__generate_concurrency_script(self.plaintext_url, self.port, self.accept_plaintext, wrk_command="wrk", intervals=[256,1024,4096,16384], pipeline="16")
  575. self.__run_benchmark(remote_script, output_file, err)
  576. results = self.__parse_test(self.PLAINTEXT)
  577. self.benchmarker.report_results(framework=self, test=self.PLAINTEXT, results=results['results'])
  578. logger.info( "Complete" )
  579. except AttributeError:
  580. logger.exception("Error Running Benchmark for %s", self.name)
  581. pass
  582. ############################################################
  583. # End benchmark
  584. ############################################################
  585. ############################################################
  586. # parse_all
  587. # Method meant to be run for a given timestamp
  588. ############################################################
  589. def parse_all(self):
  590. log.info("parse_all")
  591. # JSON
  592. if os.path.exists(self.benchmarker.get_output_file(self.name, self.JSON)):
  593. results = self.__parse_test(self.JSON)
  594. self.benchmarker.report_results(framework=self, test=self.JSON, results=results['results'])
  595. # DB
  596. if os.path.exists(self.benchmarker.get_output_file(self.name, self.DB)):
  597. results = self.__parse_test(self.DB)
  598. self.benchmarker.report_results(framework=self, test=self.DB, results=results['results'])
  599. # Query
  600. if os.path.exists(self.benchmarker.get_output_file(self.name, self.QUERY)):
  601. results = self.__parse_test(self.QUERY)
  602. self.benchmarker.report_results(framework=self, test=self.QUERY, results=results['results'])
  603. # Fortune
  604. if os.path.exists(self.benchmarker.get_output_file(self.name, self.FORTUNE)):
  605. results = self.__parse_test(self.FORTUNE)
  606. self.benchmarker.report_results(framework=self, test=self.FORTUNE, results=results['results'])
  607. # Update
  608. if os.path.exists(self.benchmarker.get_output_file(self.name, self.UPDATE)):
  609. results = self.__parse_test(self.UPDATE)
  610. self.benchmarker.report_results(framework=self, test=self.UPDATE, results=results['results'])
  611. # Plaintext
  612. if os.path.exists(self.benchmarker.get_output_file(self.name, self.PLAINTEXT)):
  613. results = self.__parse_test(self.PLAINTEXT)
  614. self.benchmarker.report_results(framework=self, test=self.PLAINTEXT, results=results['results'])
  615. ############################################################
  616. # End parse_all
  617. ############################################################
  618. ############################################################
  619. # __parse_test(test_type)
  620. ############################################################
  621. def __parse_test(self, test_type):
  622. log.info("__parse_test")
  623. try:
  624. results = dict()
  625. results['results'] = []
  626. if os.path.exists(self.benchmarker.get_output_file(self.name, test_type)):
  627. with open(self.benchmarker.output_file(self.name, test_type)) as raw_data:
  628. is_warmup = True
  629. rawData = None
  630. for line in raw_data:
  631. if "Queries:" in line or "Concurrency:" in line:
  632. is_warmup = False
  633. rawData = None
  634. continue
  635. if "Warmup" in line or "Primer" in line:
  636. is_warmup = True
  637. continue
  638. if not is_warmup:
  639. if rawData == None:
  640. rawData = dict()
  641. results['results'].append(rawData)
  642. #if "Requests/sec:" in line:
  643. # m = re.search("Requests/sec:\s+([0-9]+)", line)
  644. # rawData['reportedResults'] = m.group(1)
  645. # search for weighttp data such as succeeded and failed.
  646. if "Latency" in line:
  647. m = re.findall("([0-9]+\.*[0-9]*[us|ms|s|m|%]+)", line)
  648. if len(m) == 4:
  649. rawData['latencyAvg'] = m[0]
  650. rawData['latencyStdev'] = m[1]
  651. rawData['latencyMax'] = m[2]
  652. # rawData['latencyStdevPercent'] = m[3]
  653. #if "Req/Sec" in line:
  654. # m = re.findall("([0-9]+\.*[0-9]*[k|%]*)", line)
  655. # if len(m) == 4:
  656. # rawData['requestsAvg'] = m[0]
  657. # rawData['requestsStdev'] = m[1]
  658. # rawData['requestsMax'] = m[2]
  659. # rawData['requestsStdevPercent'] = m[3]
  660. #if "requests in" in line:
  661. # m = re.search("requests in ([0-9]+\.*[0-9]*[ms|s|m|h]+)", line)
  662. # if m != None:
  663. # # parse out the raw time, which may be in minutes or seconds
  664. # raw_time = m.group(1)
  665. # if "ms" in raw_time:
  666. # rawData['total_time'] = float(raw_time[:len(raw_time)-2]) / 1000.0
  667. # elif "s" in raw_time:
  668. # rawData['total_time'] = float(raw_time[:len(raw_time)-1])
  669. # elif "m" in raw_time:
  670. # rawData['total_time'] = float(raw_time[:len(raw_time)-1]) * 60.0
  671. # elif "h" in raw_time:
  672. # rawData['total_time'] = float(raw_time[:len(raw_time)-1]) * 3600.0
  673. if "requests in" in line:
  674. m = re.search("([0-9]+) requests in", line)
  675. if m != None:
  676. rawData['totalRequests'] = int(m.group(1))
  677. if "Socket errors" in line:
  678. if "connect" in line:
  679. m = re.search("connect ([0-9]+)", line)
  680. rawData['connect'] = int(m.group(1))
  681. if "read" in line:
  682. m = re.search("read ([0-9]+)", line)
  683. rawData['read'] = int(m.group(1))
  684. if "write" in line:
  685. m = re.search("write ([0-9]+)", line)
  686. rawData['write'] = int(m.group(1))
  687. if "timeout" in line:
  688. m = re.search("timeout ([0-9]+)", line)
  689. rawData['timeout'] = int(m.group(1))
  690. if "Non-2xx" in line:
  691. m = re.search("Non-2xx or 3xx responses: ([0-9]+)", line)
  692. if m != None:
  693. rawData['5xx'] = int(m.group(1))
  694. return results
  695. except IOError:
  696. return None
  697. ############################################################
  698. # End benchmark
  699. ############################################################
  700. ##########################################################################################
  701. # Private Methods
  702. ##########################################################################################
  703. ############################################################
  704. # __run_benchmark(script, output_file)
  705. # Runs a single benchmark using the script which is a bash
  706. # template that uses weighttp to run the test. All the results
  707. # outputed to the output_file.
  708. ############################################################
  709. def __run_benchmark(self, script, output_file, logger):
  710. err = WrapLogger(logger, logging.ERROR)
  711. with open(output_file, 'w') as raw_file:
  712. p = subprocess.Popen(self.benchmarker.client_ssh_string.split(" "), stdin=subprocess.PIPE, stdout=raw_file, stderr=err)
  713. p.communicate(script)
  714. ############################################################
  715. # End __run_benchmark
  716. ############################################################
  717. ############################################################
  718. # __generate_concurrency_script(url, port)
  719. # Generates the string containing the bash script that will
  720. # be run on the client to benchmark a single test. This
  721. # specifically works for the variable concurrency tests (JSON
  722. # and DB)
  723. ############################################################
  724. def __generate_concurrency_script(self, url, port, accept_header, wrk_command="wrk", intervals=[], pipeline=""):
  725. if len(intervals) == 0:
  726. intervals = self.benchmarker.concurrency_levels
  727. headers = self.__get_request_headers(accept_header)
  728. return self.concurrency_template.format(max_concurrency=self.benchmarker.max_concurrency,
  729. max_threads=self.benchmarker.max_threads, name=self.name, duration=self.benchmarker.duration,
  730. interval=" ".join("{}".format(item) for item in intervals),
  731. server_host=self.benchmarker.server_host, port=port, url=url, headers=headers, wrk=wrk_command,
  732. pipeline=pipeline)
  733. ############################################################
  734. # End __generate_concurrency_script
  735. ############################################################
  736. ############################################################
  737. # __generate_query_script(url, port)
  738. # Generates the string containing the bash script that will
  739. # be run on the client to benchmark a single test. This
  740. # specifically works for the variable query tests (Query)
  741. ############################################################
  742. def __generate_query_script(self, url, port, accept_header):
  743. headers = self.__get_request_headers(accept_header)
  744. return self.query_template.format(max_concurrency=self.benchmarker.max_concurrency,
  745. max_threads=self.benchmarker.max_threads, name=self.name, duration=self.benchmarker.duration,
  746. interval=" ".join("{}".format(item) for item in self.benchmarker.query_intervals),
  747. server_host=self.benchmarker.server_host, port=port, url=url, headers=headers)
  748. ############################################################
  749. # End __generate_query_script
  750. ############################################################
  751. ############################################################
  752. # __get_request_headers(accept_header)
  753. # Generates the complete HTTP header string
  754. ############################################################
  755. def __get_request_headers(self, accept_header):
  756. return self.headers_template.format(accept=accept_header)
  757. ############################################################
  758. # End __format_request_headers
  759. ############################################################
  760. ############################################################
  761. # __curl_url
  762. # Dump HTTP response and headers. Throw exception if there
  763. # is an HTTP error.
  764. ############################################################
  765. def __curl_url(self, url, testType, logger=log):
  766. # Send output to our benchmark's logger for archival to file,
  767. # but only show err on stdout by default
  768. (out, err) = WrapLogger(logger, logging.DEBUG), WrapLogger(logger, logging.ERROR)
  769. output = None
  770. try:
  771. # Use -m 15 to make curl stop trying after 15sec.
  772. # Use -i to output response with headers.
  773. # Don't use -f so that the HTTP response code is ignored.
  774. # Use --stderr - to redirect stderr to stdout so we get
  775. # error output for sure in stdout.
  776. # Use -sS to hide progress bar, but show errors.
  777. subprocess.check_call(["curl", "-m", "15", "-i", "-sS", url], stderr=err, stdout=out)
  778. # We need to get the respond body from the curl and return it.
  779. p = subprocess.Popen(["curl", "-m", "15", "-s", url], stdout=subprocess.PIPE)
  780. output = p.communicate()
  781. except:
  782. pass
  783. if output:
  784. # We have the response body - return it
  785. return output[0]
  786. ##############################################################
  787. # End __curl_url
  788. ##############################################################
  789. def requires_database(self):
  790. """Returns True/False if this test requires a database"""
  791. return (self.runTests[self.FORTUNE] or
  792. self.runTests[self.DB] or
  793. self.runTests[self.QUERY] or
  794. self.runTests[self.UPDATE])
  795. ##########################################################################################
  796. # Constructor
  797. ##########################################################################################
  798. def __init__(self, name, directory, benchmarker, runTests, args):
  799. self.name = name
  800. self.directory = directory
  801. self.benchmarker = benchmarker
  802. self.runTests = runTests
  803. self.fwroot = benchmarker.fwroot
  804. self.install_root="%s/%s" % (self.fwroot, "installs")
  805. if benchmarker.install_strategy is 'pertest':
  806. self.install_root="%s/pertest/%s" % (self.install_root, name)
  807. self.__dict__.update(args)
  808. # ensure directory has __init__.py file so that we can use it as a Python package
  809. if not os.path.exists(os.path.join(directory, "__init__.py")):
  810. open(os.path.join(directory, "__init__.py"), 'w').close()
  811. self.setup_module = setup_module = importlib.import_module(directory + '.' + self.setup_file)
  812. ############################################################
  813. # End __init__
  814. ############################################################
  815. ############################################################
  816. # End FrameworkTest
  817. ############################################################
  818. ##########################################################################################
  819. # Static methods
  820. ##########################################################################################
  821. ##############################################################
  822. # parse_config(config, directory, benchmarker)
  823. # parses a config file and returns a list of FrameworkTest
  824. # objects based on that config file.
  825. ##############################################################
  826. def parse_config(config, directory, benchmarker):
  827. tests = []
  828. # The config object can specify multiple tests, we neep to loop
  829. # over them and parse them out
  830. for test in config['tests']:
  831. for key, value in test.iteritems():
  832. test_name = config['framework']
  833. runTests = dict()
  834. runTests["json"] = (benchmarker.type == "all" or benchmarker.type == "json") and value.get("json_url", False)
  835. runTests["db"] = (benchmarker.type == "all" or benchmarker.type == "db") and value.get("db_url", False)
  836. runTests["query"] = (benchmarker.type == "all" or benchmarker.type == "query") and value.get("query_url", False)
  837. runTests["fortune"] = (benchmarker.type == "all" or benchmarker.type == "fortune") and value.get("fortune_url", False)
  838. runTests["update"] = (benchmarker.type == "all" or benchmarker.type == "update") and value.get("update_url", False)
  839. runTests["plaintext"] = (benchmarker.type == "all" or benchmarker.type == "plaintext") and value.get("plaintext_url", False)
  840. # if the test uses the 'defualt' keywork, then we don't
  841. # append anything to it's name. All configs should only have 1 default
  842. if key != 'default':
  843. # we need to use the key in the test_name
  844. test_name = test_name + "-" + key
  845. tests.append(FrameworkTest(test_name, directory, benchmarker, runTests, value))
  846. return tests
  847. ##############################################################
  848. # End parse_config
  849. ##############################################################