framework_test.py 40 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978
  1. from benchmark.fortune_html_parser import FortuneHTMLParser
  2. from setup.linux import setup_util
  3. from test_runner import TestRunner
  4. import importlib
  5. import os
  6. import subprocess
  7. import time
  8. import re
  9. import pprint
  10. import sys
  11. import traceback
  12. import json
  13. import logging
  14. log = logging.getLogger('framework_test')
  15. import inspect
  16. from utils import WrapLogger
  17. from utils import Header
  18. class FrameworkTest:
  19. """
  20. Represents a framework test, including all types (JSON, plaintext, DB, etc)
  21. defined in that test. Used by Benchmarker to start, verify, benchmark, and
  22. stop tests. Calls into the test's setup.py as needed.
  23. Note: Any method in this class called from Benchmarker#__run_test is run
  24. inside a thread
  25. Note: Many methods have a parameter 'logger' passed in from Benchmarker.
  26. This uses python's logging module to support writing output to both a
  27. file and stdout concurrently. If you wish to print something to stdout,
  28. regardless of the current global log level, use logger.info("Something").
  29. If you wish to respect the current global log level, use the logger
  30. defined for this class e.g. log.info("Something else"). If you would
  31. like to use this 'logger' with subprocess, see class WrapLogger
  32. """
  33. headers_template = "-H 'Host: localhost' -H '{accept}' -H 'Connection: keep-alive'"
  34. headers_full_template = "-H 'Host: localhost' -H '{accept}' -H 'Accept-Language: en-US,en;q=0.5' -H 'User-Agent: Mozilla/5.0 (X11; Linux x86_64) Gecko/20130501 Firefox/30.0 AppleWebKit/600.00 Chrome/30.0.0000.0 Trident/10.0 Safari/600.00' -H 'Cookie: uid=12345678901234567890; __utma=1.1234567890.1234567890.1234567890.1234567890.12; wd=2560x1600' -H 'Connection: keep-alive'"
  35. accept_json = "Accept: application/json,text/html;q=0.9,application/xhtml+xml;q=0.9,application/xml;q=0.8,*/*;q=0.7"
  36. accept_html = "Accept: text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8"
  37. accept_plaintext = "Accept: text/plain,text/html;q=0.9,application/xhtml+xml;q=0.9,application/xml;q=0.8,*/*;q=0.7"
  38. concurrency_template = """
  39. echo ""
  40. echo "---------------------------------------------------------"
  41. echo " Running Primer {name}"
  42. echo " {wrk} {headers} -d 5 -c 8 --timeout 8 -t 8 \"http://{server_host}:{port}{url}\""
  43. echo "---------------------------------------------------------"
  44. echo ""
  45. {wrk} {headers} -d 5 -c 8 --timeout 8 -t 8 "http://{server_host}:{port}{url}"
  46. sleep 5
  47. echo ""
  48. echo "---------------------------------------------------------"
  49. echo " Running Warmup {name}"
  50. echo " {wrk} {headers} -d {duration} -c {max_concurrency} --timeout {max_concurrency} -t {max_threads} \"http://{server_host}:{port}{url}\""
  51. echo "---------------------------------------------------------"
  52. echo ""
  53. {wrk} {headers} -d {duration} -c {max_concurrency} --timeout {max_concurrency} -t {max_threads} "http://{server_host}:{port}{url}"
  54. sleep 5
  55. for c in {interval}
  56. do
  57. echo ""
  58. echo "---------------------------------------------------------"
  59. echo " Concurrency: $c for {name}"
  60. echo " {wrk} {headers} -d {duration} -c $c --timeout $c -t $(($c>{max_threads}?{max_threads}:$c)) \"http://{server_host}:{port}{url}\" -s ~/pipeline.lua -- {pipeline}"
  61. echo "---------------------------------------------------------"
  62. echo ""
  63. {wrk} {headers} -d {duration} -c $c --timeout $c -t "$(($c>{max_threads}?{max_threads}:$c))" http://{server_host}:{port}{url} -s ~/pipeline.lua -- {pipeline}
  64. sleep 2
  65. done
  66. """
  67. query_template = """
  68. echo ""
  69. echo "---------------------------------------------------------"
  70. echo " Running Primer {name}"
  71. echo " wrk {headers} -d 5 -c 8 --timeout 8 -t 8 \"http://{server_host}:{port}{url}2\""
  72. echo "---------------------------------------------------------"
  73. echo ""
  74. wrk {headers} -d 5 -c 8 --timeout 8 -t 8 "http://{server_host}:{port}{url}2"
  75. sleep 5
  76. echo ""
  77. echo "---------------------------------------------------------"
  78. echo " Running Warmup {name}"
  79. echo " wrk {headers} -d {duration} -c {max_concurrency} --timeout {max_concurrency} -t {max_threads} \"http://{server_host}:{port}{url}2\""
  80. echo "---------------------------------------------------------"
  81. echo ""
  82. wrk {headers} -d {duration} -c {max_concurrency} --timeout {max_concurrency} -t {max_threads} "http://{server_host}:{port}{url}2"
  83. sleep 5
  84. for c in {interval}
  85. do
  86. echo ""
  87. echo "---------------------------------------------------------"
  88. echo " Queries: $c for {name}"
  89. echo " wrk {headers} -d {duration} -c {max_concurrency} --timeout {max_concurrency} -t {max_threads} \"http://{server_host}:{port}{url}$c\""
  90. echo "---------------------------------------------------------"
  91. echo ""
  92. wrk {headers} -d {duration} -c {max_concurrency} --timeout {max_concurrency} -t {max_threads} "http://{server_host}:{port}{url}$c"
  93. sleep 2
  94. done
  95. """
  96. language = None
  97. platform = None
  98. webserver = None
  99. classification = None
  100. database = None
  101. approach = None
  102. orm = None
  103. framework = None
  104. os = None
  105. database_os = None
  106. display_name = None
  107. notes = None
  108. versus = None
  109. ############################################################
  110. # Test Variables
  111. ############################################################
  112. JSON = "json"
  113. DB = "db"
  114. QUERY = "query"
  115. FORTUNE = "fortune"
  116. UPDATE = "update"
  117. PLAINTEXT = "plaintext"
  118. ##########################################################################################
  119. # Public Methods
  120. ##########################################################################################
  121. ############################################################
  122. # Validates the jsonString is a JSON object with a 'message'
  123. # key with the value "hello, world!" (case-insensitive).
  124. ############################################################
  125. def validateJson(self, jsonString, logger=log):
  126. try:
  127. obj = {k.lower(): v for k,v in json.loads(jsonString).items()}
  128. if obj["message"].lower() == "hello, world!":
  129. return True
  130. except:
  131. logger.debug("Expected: %s", "{'message':'hello, world!'}")
  132. logger.debug("Got: '%s'", jsonString)
  133. pass
  134. return False
  135. ############################################################
  136. # Validates the jsonString is a JSON object that has an "id"
  137. # and a "randomNumber" key, and that both keys map to
  138. # integers.
  139. ############################################################
  140. def validateDb(self, jsonString, logger=log):
  141. try:
  142. obj = {k.lower(): v for k,v in json.loads(jsonString).items()}
  143. # We are allowing the single-object array for the DB
  144. # test for now, but will likely remove this later.
  145. if type(obj) == list:
  146. obj = obj[0]
  147. # This will error out of the value could not parsed to a
  148. # float (this will work with ints, but it will turn them
  149. # into their float equivalent; i.e. "123" => 123.0)
  150. if (type(float(obj["id"])) == float and
  151. type(float(obj["randomnumber"])) == float):
  152. return True
  153. except:
  154. # logger.debug("Expected: %s", "")
  155. logger.debug("Got: %s", jsonString)
  156. pass
  157. return False
  158. def validateDbStrict(self, jsonString, logger=log):
  159. try:
  160. obj = {k.lower(): v for k,v in json.loads(jsonString).items()}
  161. # This will error out of the value could not parsed to a
  162. # float (this will work with ints, but it will turn them
  163. # into their float equivalent; i.e. "123" => 123.0)
  164. if (type(float(obj["id"])) == float and
  165. type(float(obj["randomnumber"])) == float):
  166. return True
  167. except:
  168. # logger.debug("Expected: %s", "")
  169. logger.debug("Got: %s", jsonString)
  170. pass
  171. return False
  172. ############################################################
  173. # Validates the jsonString is an array with a length of
  174. # 2, that each entry in the array is a JSON object, that
  175. # each object has an "id" and a "randomNumber" key, and that
  176. # both keys map to integers.
  177. ############################################################
  178. def validateQuery(self, jsonString, logger=log):
  179. try:
  180. arr = [{k.lower(): v for k,v in d.items()} for d in json.loads(jsonString)]
  181. if (type(float(arr[0]["id"])) == float and
  182. type(float(arr[0]["randomnumber"])) == float and
  183. type(float(arr[1]["id"])) == float and
  184. type(float(arr[1]["randomnumber"])) == float):
  185. return True
  186. except:
  187. # logger.debug("Expected: %s", "")
  188. logger.debug("Got: %s", jsonString)
  189. pass
  190. return False
  191. ############################################################
  192. # Validates the jsonString is an array with a length of
  193. # 1, that each entry in the array is a JSON object, that
  194. # each object has an "id" and a "randomNumber" key, and that
  195. # both keys map to integers.
  196. ############################################################
  197. def validateQueryOneOrLess(self, jsonString, logger=log):
  198. try:
  199. arr = {k.lower(): v for k,v in json.loads(jsonString).items()}
  200. if len(arr) != 1:
  201. return False
  202. for obj in arr:
  203. if (type(float(obj["id"])) != float or
  204. type(float(obj["randomnumber"])) != float or
  205. type(float(obj["id"])) != float or
  206. type(float(obj["randomnumber"])) != float):
  207. return False
  208. # By here, it's passed validation
  209. return True
  210. except:
  211. # logger.debug("Expected: %s", "")
  212. logger.debug("Got: %s", jsonString)
  213. pass
  214. return False
  215. ############################################################
  216. # Validates the jsonString is an array with a length of
  217. # 500, that each entry in the array is a JSON object, that
  218. # each object has an "id" and a "randomNumber" key, and that
  219. # both keys map to integers.
  220. ############################################################
  221. def validateQueryFiveHundredOrMore(self, jsonString, logger=log):
  222. try:
  223. arr = {k.lower(): v for k,v in json.loads(jsonString).items()}
  224. if len(arr) != 500:
  225. return False
  226. for obj in arr:
  227. if (type(float(obj["id"])) != float or
  228. type(float(obj["randomnumber"])) != float or
  229. type(float(obj["id"])) != float or
  230. type(float(obj["randomnumber"])) != float):
  231. return False
  232. # By here, it's passed validation
  233. return True
  234. except:
  235. # logger.debug("Expected: %s", "")
  236. logger.debug("Got: %s", jsonString)
  237. pass
  238. return False
  239. ############################################################
  240. # Parses the given HTML string and asks a FortuneHTMLParser
  241. # whether the parsed string is a valid fortune return.
  242. ############################################################
  243. def validateFortune(self, htmlString, logger=log):
  244. try:
  245. parser = FortuneHTMLParser()
  246. parser.feed(htmlString)
  247. return parser.isValidFortune()
  248. except:
  249. # logger.debug("Expected: %s", "")
  250. logger.debug("Got: %s", htmlString)
  251. pass
  252. return False
  253. ############################################################
  254. # Validates the jsonString is an array with a length of
  255. # 2, that each entry in the array is a JSON object, that
  256. # each object has an "id" and a "randomNumber" key, and that
  257. # both keys map to integers.
  258. ############################################################
  259. def validateUpdate(self, jsonString, logger=log):
  260. try:
  261. arr = [{k.lower(): v for k,v in d.items()} for d in json.loads(jsonString)]
  262. if (type(float(arr[0]["id"])) == float and
  263. type(float(arr[0]["randomnumber"])) == float and
  264. type(float(arr[1]["id"])) == float and
  265. type(float(arr[1]["randomnumber"])) == float):
  266. return True
  267. except:
  268. # logger.debug("Expected: %s", "")
  269. logger.debug("Got: %s", jsonString)
  270. pass
  271. return False
  272. ############################################################
  273. #
  274. ############################################################
  275. def validatePlaintext(self, jsonString, logger=log):
  276. try:
  277. return jsonString.lower().strip() == "hello, world!"
  278. except:
  279. logger.debug("Expected: %s", "hello, world!")
  280. logger.debug("Got: %s", jsonString)
  281. pass
  282. return False
  283. ############################################################
  284. # start(benchmarker)
  285. # Start the test using it's setup file
  286. ############################################################
  287. def start(self, logger=log):
  288. log.info("start")
  289. # Load profile for this installation
  290. profile="%s/bash_profile.sh" % self.directory
  291. if not os.path.exists(profile):
  292. logger.warning("Framework %s does not have a bash_profile", self.name)
  293. profile="$FWROOT/config/benchmark_profile"
  294. set_iroot="export IROOT=%s" % self.install_root
  295. setup_util.replace_environ(config=profile, command=set_iroot)
  296. # Determine if setup.py contains a subclass of TestRunner
  297. self.runner = None
  298. for name, obj in inspect.getmembers(self.setup_module, inspect.isclass):
  299. try:
  300. is_subclass = TestRunner.is_parent_of(obj)
  301. except Exception as e:
  302. logger.critical("%s: %s", self.setup_module.__file__, e)
  303. return 1
  304. if is_subclass:
  305. logger.debug("Framework %s is using the new setup.py format" % self.name)
  306. self.runner = obj(self, self.setup_module, logger)
  307. return self.runner.start()
  308. # If not, call the start function directly
  309. logger.warning("Framework %s is using the old setup.py format" % self.name)
  310. (out, err) = WrapLogger(logger, logging.INFO), WrapLogger(logger, logging.ERROR)
  311. return self.setup_module.start(self.benchmarker, out, err)
  312. ############################################################
  313. # End start
  314. ############################################################
  315. ############################################################
  316. # stop(benchmarker)
  317. # Stops the test using it's setup file
  318. ############################################################
  319. def stop(self, logger=log):
  320. log.info("stop")
  321. # Are we using a TestRunner
  322. if self.runner:
  323. return self.runner.stop()
  324. (out, err) = WrapLogger(logger, logging.INFO), WrapLogger(logger, logging.ERROR)
  325. return self.setup_module.stop(out, err)
  326. ############################################################
  327. # End stop
  328. ############################################################
  329. ############################################################
  330. # verify_urls
  331. # Verifys each of the URLs for this test. THis will sinply
  332. # curl the URL and check for it's return status.
  333. # For each url, a flag will be set on this object for whether
  334. # or not it passed
  335. ############################################################
  336. def verify_urls(self, logger=log):
  337. # JSON
  338. if self.runTests[self.JSON]:
  339. logger.info(Header("VERIFYING JSON (%s)" % self.json_url))
  340. url = self.benchmarker.generate_url(self.json_url, self.port)
  341. output = self.__curl_url(url, self.JSON, logger)
  342. logger.info("VALIDATING JSON ... ")
  343. if self.validateJson(output, logger):
  344. self.json_url_passed = True
  345. logger.info("PASS")
  346. else:
  347. self.json_url_passed = False
  348. logger.info("FAIL")
  349. # DB
  350. if self.runTests[self.DB]:
  351. logger.info(Header("VERIFYING DB (%s)" % self.db_url))
  352. url = self.benchmarker.generate_url(self.db_url, self.port)
  353. output = self.__curl_url(url, self.DB, logger)
  354. if self.validateDb(output, logger):
  355. self.db_url_passed = True
  356. else:
  357. self.db_url_passed = False
  358. if self.validateDbStrict(output, logger):
  359. self.db_url_warn = False
  360. else:
  361. self.db_url_warn = True
  362. logger.info("VALIDATING DB ... ")
  363. if self.db_url_passed:
  364. if self.db_url_warn:
  365. logger.info("PASS (with warnings)")
  366. else:
  367. logger.info("PASS")
  368. else:
  369. logger.info("FAIL")
  370. # Query
  371. if self.runTests[self.QUERY]:
  372. logger.info(Header("VERIFYING QUERY (%s)" % self.query_url+"2"))
  373. url = self.benchmarker.generate_url(self.query_url + "2", self.port)
  374. output = self.__curl_url(url, self.QUERY, logger)
  375. if self.validateQuery(output, logger):
  376. self.query_url_passed = True
  377. logger.info(self.query_url + "2 - PASS")
  378. else:
  379. self.query_url_passed = False
  380. logger.info(self.query_url + "2 - FAIL")
  381. logger.info("-----------------------------------------------------")
  382. self.query_url_warn = False
  383. url2 = self.benchmarker.generate_url(self.query_url + "0", self.port)
  384. output2 = self.__curl_url(url2, self.QUERY, logger)
  385. if not self.validateQueryOneOrLess(output2, logger):
  386. self.query_url_warn = True
  387. logger.info(self.query_url + "0 - WARNING")
  388. else:
  389. logger.info(self.query_url + "0 - PASS")
  390. logger.info("-----------------------------------------------------")
  391. url3 = self.benchmarker.generate_url(self.query_url + "foo", self.port)
  392. output3 = self.__curl_url(url3, self.QUERY, logger)
  393. if not self.validateQueryOneOrLess(output3, logger):
  394. self.query_url_warn = True
  395. logger.info(self.query_url + "foo - WARNING")
  396. else:
  397. logger.info(self.query_url + "foo - PASS")
  398. logger.info("-----------------------------------------------------")
  399. url4 = self.benchmarker.generate_url(self.query_url + "501", self.port)
  400. output4 = self.__curl_url(url4, self.QUERY, logger)
  401. if not self.validateQueryFiveHundredOrMore(output4, logger):
  402. self.query_url_warn = True
  403. logger.info(self.query_url + "501 - WARNING")
  404. else:
  405. logger.info(self.query_url + "501 - PASS")
  406. logger.info("-----------------------------------------------------")
  407. logger.info("VALIDATING QUERY ... ")
  408. if self.query_url_passed and self.query_url_warn:
  409. logger.info("PASS (with warnings)")
  410. elif self.query_url_passed:
  411. logger.info("PASS")
  412. else:
  413. logger.info("FAIL")
  414. # Fortune
  415. if self.runTests[self.FORTUNE]:
  416. logger.info(Header("VERIFYING FORTUNE (%s)" % self.fortune_url))
  417. url = self.benchmarker.generate_url(self.fortune_url, self.port)
  418. output = self.__curl_url(url, self.FORTUNE, logger)
  419. logger.info("VALIDATING FORTUNE ... ")
  420. if self.validateFortune(output, logger):
  421. self.fortune_url_passed = True
  422. logger.info("PASS")
  423. else:
  424. self.fortune_url_passed = False
  425. logger.info("FAIL")
  426. # Update
  427. if self.runTests[self.UPDATE]:
  428. logger.info(Header("VERIFYING UPDATE (%s)" % self.update_url))
  429. url = self.benchmarker.generate_url(self.update_url + "2", self.port)
  430. output = self.__curl_url(url, self.UPDATE, logger)
  431. logger.info("VALIDATING UPDATE ... ")
  432. if self.validateUpdate(output, logger):
  433. self.update_url_passed = True
  434. logger.info("PASS")
  435. else:
  436. self.update_url_passed = False
  437. logger.info("FAIL")
  438. # plaintext
  439. if self.runTests[self.PLAINTEXT]:
  440. logger.info(Header("VERIFYING PLAINTEXT (%s)" % self.plaintext_url))
  441. url = self.benchmarker.generate_url(self.plaintext_url, self.port)
  442. output = self.__curl_url(url, self.PLAINTEXT, logger)
  443. logger.info("VALIDATING PLAINTEXT ... ")
  444. if self.validatePlaintext(output, logger):
  445. self.plaintext_url_passed = True
  446. logger.info("PASS")
  447. else:
  448. self.plaintext_url_passed = False
  449. logger.info("FAIL")
  450. ############################################################
  451. # End verify_urls
  452. ############################################################
  453. ############################################################
  454. # contains_type(type)
  455. # true if this test contains an implementation of the given
  456. # test type (json, db, etc.)
  457. ############################################################
  458. def contains_type(self, type):
  459. try:
  460. if type == self.JSON and self.json_url is not None:
  461. return True
  462. if type == self.DB and self.db_url is not None:
  463. return True
  464. if type == self.QUERY and self.query_url is not None:
  465. return True
  466. if type == self.FORTUNE and self.fortune_url is not None:
  467. return True
  468. if type == self.UPDATE and self.update_url is not None:
  469. return True
  470. if type == self.PLAINTEXT and self.plaintext_url is not None:
  471. return True
  472. except AttributeError:
  473. pass
  474. return False
  475. ############################################################
  476. # End stop
  477. ############################################################
  478. ############################################################
  479. # benchmark
  480. # Runs the benchmark for each type of test that it implements
  481. # JSON/DB/Query.
  482. ############################################################
  483. def benchmark(self, logger=log):
  484. # JSON
  485. if self.runTests[self.JSON]:
  486. try:
  487. logger.info("BENCHMARKING JSON ... ")
  488. results = None
  489. output_file = self.benchmarker.output_file(self.name, self.JSON)
  490. if not os.path.exists(output_file):
  491. with open(output_file, 'w'):
  492. # Simply opening the file in write mode should create the empty file.
  493. pass
  494. if self.json_url_passed:
  495. remote_script = self.__generate_concurrency_script(self.json_url, self.port, self.accept_json)
  496. self.__run_benchmark(remote_script, output_file, logger)
  497. results = self.__parse_test(self.JSON)
  498. self.benchmarker.report_results(framework=self, test=self.JSON, results=results['results'])
  499. logger.info("Complete")
  500. except AttributeError:
  501. pass
  502. # DB
  503. if self.runTests[self.DB]:
  504. try:
  505. logger.info("BENCHMARKING DB ... ")
  506. results = None
  507. output_file = self.benchmarker.output_file(self.name, self.DB)
  508. warning_file = self.benchmarker.warning_file(self.name, self.DB)
  509. if not os.path.exists(output_file):
  510. with open(output_file, 'w'):
  511. # Simply opening the file in write mode should create the empty file.
  512. pass
  513. if self.db_url_warn:
  514. with open(warning_file, 'w'):
  515. pass
  516. if self.db_url_passed:
  517. remote_script = self.__generate_concurrency_script(self.db_url, self.port, self.accept_json)
  518. self.__run_benchmark(remote_script, output_file, logger)
  519. results = self.__parse_test(self.DB)
  520. self.benchmarker.report_results(framework=self, test=self.DB, results=results['results'])
  521. logger.info("Complete")
  522. except AttributeError:
  523. pass
  524. # Query
  525. if self.runTests[self.QUERY]:
  526. try:
  527. logger.info("BENCHMARKING Query ...")
  528. results = None
  529. output_file = self.benchmarker.output_file(self.name, self.QUERY)
  530. warning_file = self.benchmarker.warning_file(self.name, self.QUERY)
  531. if not os.path.exists(output_file):
  532. with open(output_file, 'w'):
  533. # Simply opening the file in write mode should create the empty file.
  534. pass
  535. if self.query_url_warn:
  536. with open(warning_file, 'w'):
  537. pass
  538. if self.query_url_passed:
  539. remote_script = self.__generate_query_script(self.query_url, self.port, self.accept_json)
  540. self.__run_benchmark(remote_script, output_file, logger)
  541. results = self.__parse_test(self.QUERY)
  542. self.benchmarker.report_results(framework=self, test=self.QUERY, results=results['results'])
  543. logger.info("Complete")
  544. except AttributeError:
  545. pass
  546. # fortune
  547. if self.runTests[self.FORTUNE]:
  548. try:
  549. logger.info("BENCHMARKING Fortune ... ")
  550. results = None
  551. output_file = self.benchmarker.output_file(self.name, self.FORTUNE)
  552. if not os.path.exists(output_file):
  553. with open(output_file, 'w'):
  554. # Simply opening the file in write mode should create the empty file.
  555. pass
  556. if self.fortune_url_passed:
  557. remote_script = self.__generate_concurrency_script(self.fortune_url, self.port, self.accept_html)
  558. self.__run_benchmark(remote_script, output_file, logger)
  559. results = self.__parse_test(self.FORTUNE)
  560. self.benchmarker.report_results(framework=self, test=self.FORTUNE, results=results['results'])
  561. logger.info("Complete")
  562. except AttributeError:
  563. pass
  564. # update
  565. if self.runTests[self.UPDATE]:
  566. try:
  567. logger.info("BENCHMARKING Update ... ")
  568. results = None
  569. output_file = self.benchmarker.output_file(self.name, self.UPDATE)
  570. if not os.path.exists(output_file):
  571. with open(output_file, 'w'):
  572. # Simply opening the file in write mode should create the empty file.
  573. pass
  574. if self.update_url_passed:
  575. remote_script = self.__generate_query_script(self.update_url, self.port, self.accept_json)
  576. self.__run_benchmark(remote_script, output_file, logger)
  577. results = self.__parse_test(self.UPDATE)
  578. self.benchmarker.report_results(framework=self, test=self.UPDATE, results=results['results'])
  579. logger.info( "Complete" )
  580. except AttributeError:
  581. pass
  582. # plaintext
  583. if self.runTests[self.PLAINTEXT]:
  584. try:
  585. logger.info("BENCHMARKING Plaintext ... ")
  586. results = None
  587. output_file = self.benchmarker.output_file(self.name, self.PLAINTEXT)
  588. if not os.path.exists(output_file):
  589. with open(output_file, 'w'):
  590. # Simply opening the file in write mode should create the empty file.
  591. pass
  592. if self.plaintext_url_passed:
  593. remote_script = self.__generate_concurrency_script(self.plaintext_url, self.port, self.accept_plaintext, wrk_command="wrk", intervals=[256,1024,4096,16384], pipeline="16")
  594. self.__run_benchmark(remote_script, output_file, logger)
  595. results = self.__parse_test(self.PLAINTEXT)
  596. self.benchmarker.report_results(framework=self, test=self.PLAINTEXT, results=results['results'])
  597. logger.info( "Complete" )
  598. except AttributeError:
  599. logger.exception("Error Running Benchmark for %s", self.name)
  600. pass
  601. ############################################################
  602. # End benchmark
  603. ############################################################
  604. ############################################################
  605. # parse_all
  606. # Method meant to be run for a given timestamp
  607. ############################################################
  608. def parse_all(self):
  609. log.info("parse_all")
  610. # JSON
  611. if os.path.exists(self.benchmarker.get_output_file(self.name, self.JSON)):
  612. results = self.__parse_test(self.JSON)
  613. self.benchmarker.report_results(framework=self, test=self.JSON, results=results['results'])
  614. # DB
  615. if os.path.exists(self.benchmarker.get_output_file(self.name, self.DB)):
  616. results = self.__parse_test(self.DB)
  617. self.benchmarker.report_results(framework=self, test=self.DB, results=results['results'])
  618. # Query
  619. if os.path.exists(self.benchmarker.get_output_file(self.name, self.QUERY)):
  620. results = self.__parse_test(self.QUERY)
  621. self.benchmarker.report_results(framework=self, test=self.QUERY, results=results['results'])
  622. # Fortune
  623. if os.path.exists(self.benchmarker.get_output_file(self.name, self.FORTUNE)):
  624. results = self.__parse_test(self.FORTUNE)
  625. self.benchmarker.report_results(framework=self, test=self.FORTUNE, results=results['results'])
  626. # Update
  627. if os.path.exists(self.benchmarker.get_output_file(self.name, self.UPDATE)):
  628. results = self.__parse_test(self.UPDATE)
  629. self.benchmarker.report_results(framework=self, test=self.UPDATE, results=results['results'])
  630. # Plaintext
  631. if os.path.exists(self.benchmarker.get_output_file(self.name, self.PLAINTEXT)):
  632. results = self.__parse_test(self.PLAINTEXT)
  633. self.benchmarker.report_results(framework=self, test=self.PLAINTEXT, results=results['results'])
  634. ############################################################
  635. # End parse_all
  636. ############################################################
  637. ############################################################
  638. # __parse_test(test_type)
  639. ############################################################
  640. def __parse_test(self, test_type):
  641. log.info("__parse_test")
  642. try:
  643. results = dict()
  644. results['results'] = []
  645. if os.path.exists(self.benchmarker.get_output_file(self.name, test_type)):
  646. with open(self.benchmarker.output_file(self.name, test_type)) as raw_data:
  647. is_warmup = True
  648. rawData = None
  649. for line in raw_data:
  650. if "Queries:" in line or "Concurrency:" in line:
  651. is_warmup = False
  652. rawData = None
  653. continue
  654. if "Warmup" in line or "Primer" in line:
  655. is_warmup = True
  656. continue
  657. if not is_warmup:
  658. if rawData == None:
  659. rawData = dict()
  660. results['results'].append(rawData)
  661. #if "Requests/sec:" in line:
  662. # m = re.search("Requests/sec:\s+([0-9]+)", line)
  663. # rawData['reportedResults'] = m.group(1)
  664. # search for weighttp data such as succeeded and failed.
  665. if "Latency" in line:
  666. m = re.findall("([0-9]+\.*[0-9]*[us|ms|s|m|%]+)", line)
  667. if len(m) == 4:
  668. rawData['latencyAvg'] = m[0]
  669. rawData['latencyStdev'] = m[1]
  670. rawData['latencyMax'] = m[2]
  671. # rawData['latencyStdevPercent'] = m[3]
  672. #if "Req/Sec" in line:
  673. # m = re.findall("([0-9]+\.*[0-9]*[k|%]*)", line)
  674. # if len(m) == 4:
  675. # rawData['requestsAvg'] = m[0]
  676. # rawData['requestsStdev'] = m[1]
  677. # rawData['requestsMax'] = m[2]
  678. # rawData['requestsStdevPercent'] = m[3]
  679. #if "requests in" in line:
  680. # m = re.search("requests in ([0-9]+\.*[0-9]*[ms|s|m|h]+)", line)
  681. # if m != None:
  682. # # parse out the raw time, which may be in minutes or seconds
  683. # raw_time = m.group(1)
  684. # if "ms" in raw_time:
  685. # rawData['total_time'] = float(raw_time[:len(raw_time)-2]) / 1000.0
  686. # elif "s" in raw_time:
  687. # rawData['total_time'] = float(raw_time[:len(raw_time)-1])
  688. # elif "m" in raw_time:
  689. # rawData['total_time'] = float(raw_time[:len(raw_time)-1]) * 60.0
  690. # elif "h" in raw_time:
  691. # rawData['total_time'] = float(raw_time[:len(raw_time)-1]) * 3600.0
  692. if "requests in" in line:
  693. m = re.search("([0-9]+) requests in", line)
  694. if m != None:
  695. rawData['totalRequests'] = int(m.group(1))
  696. if "Socket errors" in line:
  697. if "connect" in line:
  698. m = re.search("connect ([0-9]+)", line)
  699. rawData['connect'] = int(m.group(1))
  700. if "read" in line:
  701. m = re.search("read ([0-9]+)", line)
  702. rawData['read'] = int(m.group(1))
  703. if "write" in line:
  704. m = re.search("write ([0-9]+)", line)
  705. rawData['write'] = int(m.group(1))
  706. if "timeout" in line:
  707. m = re.search("timeout ([0-9]+)", line)
  708. rawData['timeout'] = int(m.group(1))
  709. if "Non-2xx" in line:
  710. m = re.search("Non-2xx or 3xx responses: ([0-9]+)", line)
  711. if m != None:
  712. rawData['5xx'] = int(m.group(1))
  713. return results
  714. except IOError:
  715. return None
  716. ############################################################
  717. # End benchmark
  718. ############################################################
  719. ##########################################################################################
  720. # Private Methods
  721. ##########################################################################################
  722. ############################################################
  723. # __run_benchmark(script, output_file)
  724. # Runs a single benchmark using the script which is a bash
  725. # template that uses weighttp to run the test. All the results
  726. # outputed to the output_file.
  727. ############################################################
  728. def __run_benchmark(self, script, output_file, logger):
  729. err = WrapLogger(logger, logging.ERROR)
  730. with open(output_file, 'w') as raw_file:
  731. p = subprocess.Popen(self.benchmarker.client_ssh_string.split(" "), stdin=subprocess.PIPE, stdout=raw_file, stderr=err)
  732. p.communicate(script)
  733. ############################################################
  734. # End __run_benchmark
  735. ############################################################
  736. ############################################################
  737. # __generate_concurrency_script(url, port)
  738. # Generates the string containing the bash script that will
  739. # be run on the client to benchmark a single test. This
  740. # specifically works for the variable concurrency tests (JSON
  741. # and DB)
  742. ############################################################
  743. def __generate_concurrency_script(self, url, port, accept_header, wrk_command="wrk", intervals=[], pipeline=""):
  744. if len(intervals) == 0:
  745. intervals = self.benchmarker.concurrency_levels
  746. headers = self.__get_request_headers(accept_header)
  747. return self.concurrency_template.format(max_concurrency=self.benchmarker.max_concurrency,
  748. max_threads=self.benchmarker.max_threads, name=self.name, duration=self.benchmarker.duration,
  749. interval=" ".join("{}".format(item) for item in intervals),
  750. server_host=self.benchmarker.server_host, port=port, url=url, headers=headers, wrk=wrk_command,
  751. pipeline=pipeline)
  752. ############################################################
  753. # End __generate_concurrency_script
  754. ############################################################
  755. ############################################################
  756. # __generate_query_script(url, port)
  757. # Generates the string containing the bash script that will
  758. # be run on the client to benchmark a single test. This
  759. # specifically works for the variable query tests (Query)
  760. ############################################################
  761. def __generate_query_script(self, url, port, accept_header):
  762. headers = self.__get_request_headers(accept_header)
  763. return self.query_template.format(max_concurrency=self.benchmarker.max_concurrency,
  764. max_threads=self.benchmarker.max_threads, name=self.name, duration=self.benchmarker.duration,
  765. interval=" ".join("{}".format(item) for item in self.benchmarker.query_intervals),
  766. server_host=self.benchmarker.server_host, port=port, url=url, headers=headers)
  767. ############################################################
  768. # End __generate_query_script
  769. ############################################################
  770. ############################################################
  771. # __get_request_headers(accept_header)
  772. # Generates the complete HTTP header string
  773. ############################################################
  774. def __get_request_headers(self, accept_header):
  775. return self.headers_template.format(accept=accept_header)
  776. ############################################################
  777. # End __format_request_headers
  778. ############################################################
  779. ############################################################
  780. # __curl_url
  781. # Dump HTTP response and headers. Throw exception if there
  782. # is an HTTP error.
  783. ############################################################
  784. def __curl_url(self, url, testType, logger=log):
  785. # Send output to our benchmark's logger for archival to file,
  786. # but only show err on stdout by default
  787. (out, err) = WrapLogger(logger, logging.DEBUG), WrapLogger(logger, logging.ERROR)
  788. output = None
  789. try:
  790. # Use -m 15 to make curl stop trying after 15sec.
  791. # Use -i to output response with headers.
  792. # Don't use -f so that the HTTP response code is ignored.
  793. # Use --stderr - to redirect stderr to stdout so we get
  794. # error output for sure in stdout.
  795. # Use -sS to hide progress bar, but show errors.
  796. subprocess.check_call(["curl", "-m", "15", "-i", "-sS", url], stderr=err, stdout=out)
  797. # We need to get the respond body from the curl and return it.
  798. p = subprocess.Popen(["curl", "-m", "15", "-s", url], stdout=subprocess.PIPE)
  799. output = p.communicate()
  800. except:
  801. pass
  802. if output:
  803. # We have the response body - return it
  804. return output[0]
  805. ##############################################################
  806. # End __curl_url
  807. ##############################################################
  808. def requires_database(self):
  809. """Returns True/False if this test requires a database"""
  810. return (self.runTests[self.FORTUNE] or
  811. self.runTests[self.DB] or
  812. self.runTests[self.QUERY] or
  813. self.runTests[self.UPDATE])
  814. ##########################################################################################
  815. # Constructor
  816. ##########################################################################################
  817. def __init__(self, name, directory, benchmarker, runTests, args):
  818. self.name = name
  819. self.directory = directory
  820. self.benchmarker = benchmarker
  821. self.runTests = runTests
  822. self.fwroot = benchmarker.fwroot
  823. self.install_root="%s/%s" % (self.fwroot, "installs")
  824. if benchmarker.install_strategy is 'pertest':
  825. self.install_root="%s/pertest/%s" % (self.install_root, name)
  826. self.__dict__.update(args)
  827. # ensure directory has __init__.py file so that we can use it as a Python package
  828. if not os.path.exists(os.path.join(directory, "__init__.py")):
  829. open(os.path.join(directory, "__init__.py"), 'w').close()
  830. self.setup_module = setup_module = importlib.import_module(directory + '.' + self.setup_file)
  831. ############################################################
  832. # End __init__
  833. ############################################################
  834. ############################################################
  835. # End FrameworkTest
  836. ############################################################
  837. ##########################################################################################
  838. # Static methods
  839. ##########################################################################################
  840. ##############################################################
  841. # parse_config(config, directory, benchmarker)
  842. # parses a config file and returns a list of FrameworkTest
  843. # objects based on that config file.
  844. ##############################################################
  845. def parse_config(config, directory, benchmarker):
  846. tests = []
  847. # The config object can specify multiple tests, we neep to loop
  848. # over them and parse them out
  849. for test in config['tests']:
  850. for key, value in test.iteritems():
  851. test_name = config['framework']
  852. runTests = dict()
  853. runTests["json"] = (benchmarker.type == "all" or benchmarker.type == "json") and value.get("json_url", False)
  854. runTests["db"] = (benchmarker.type == "all" or benchmarker.type == "db") and value.get("db_url", False)
  855. runTests["query"] = (benchmarker.type == "all" or benchmarker.type == "query") and value.get("query_url", False)
  856. runTests["fortune"] = (benchmarker.type == "all" or benchmarker.type == "fortune") and value.get("fortune_url", False)
  857. runTests["update"] = (benchmarker.type == "all" or benchmarker.type == "update") and value.get("update_url", False)
  858. runTests["plaintext"] = (benchmarker.type == "all" or benchmarker.type == "plaintext") and value.get("plaintext_url", False)
  859. # if the test uses the 'defualt' keywork, then we don't
  860. # append anything to it's name. All configs should only have 1 default
  861. if key != 'default':
  862. # we need to use the key in the test_name
  863. test_name = test_name + "-" + key
  864. tests.append(FrameworkTest(test_name, directory, benchmarker, runTests, value))
  865. return tests
  866. ##############################################################
  867. # End parse_config
  868. ##############################################################