framework_test.py 40 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009
  1. from benchmark.fortune_html_parser import FortuneHTMLParser
  2. from setup.linux import setup_util
  3. import importlib
  4. import os
  5. import subprocess
  6. import time
  7. import re
  8. import pprint
  9. import sys
  10. import traceback
  11. import json
  12. import textwrap
  13. import logging
  14. log = logging.getLogger('framework_test')
  15. class FrameworkTest:
  16. ##########################################################################################
  17. # Class variables
  18. ##########################################################################################
  19. headers_template = "-H 'Host: localhost' -H '{accept}' -H 'Connection: keep-alive'"
  20. headers_full_template = "-H 'Host: localhost' -H '{accept}' -H 'Accept-Language: en-US,en;q=0.5' -H 'User-Agent: Mozilla/5.0 (X11; Linux x86_64) Gecko/20130501 Firefox/30.0 AppleWebKit/600.00 Chrome/30.0.0000.0 Trident/10.0 Safari/600.00' -H 'Cookie: uid=12345678901234567890; __utma=1.1234567890.1234567890.1234567890.1234567890.12; wd=2560x1600' -H 'Connection: keep-alive'"
  21. accept_json = "Accept: application/json,text/html;q=0.9,application/xhtml+xml;q=0.9,application/xml;q=0.8,*/*;q=0.7"
  22. accept_html = "Accept: text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8"
  23. accept_plaintext = "Accept: text/plain,text/html;q=0.9,application/xhtml+xml;q=0.9,application/xml;q=0.8,*/*;q=0.7"
  24. concurrency_template = """
  25. echo ""
  26. echo "---------------------------------------------------------"
  27. echo " Running Primer {name}"
  28. echo " {wrk} {headers} -d 5 -c 8 --timeout 8 -t 8 \"http://{server_host}:{port}{url}\""
  29. echo "---------------------------------------------------------"
  30. echo ""
  31. {wrk} {headers} -d 5 -c 8 --timeout 8 -t 8 "http://{server_host}:{port}{url}"
  32. sleep 5
  33. echo ""
  34. echo "---------------------------------------------------------"
  35. echo " Running Warmup {name}"
  36. echo " {wrk} {headers} -d {duration} -c {max_concurrency} --timeout {max_concurrency} -t {max_threads} \"http://{server_host}:{port}{url}\""
  37. echo "---------------------------------------------------------"
  38. echo ""
  39. {wrk} {headers} -d {duration} -c {max_concurrency} --timeout {max_concurrency} -t {max_threads} "http://{server_host}:{port}{url}"
  40. sleep 5
  41. for c in {interval}
  42. do
  43. echo ""
  44. echo "---------------------------------------------------------"
  45. echo " Concurrency: $c for {name}"
  46. echo " {wrk} {headers} -d {duration} -c $c --timeout $c -t $(($c>{max_threads}?{max_threads}:$c)) \"http://{server_host}:{port}{url}\" -s ~/pipeline.lua -- {pipeline}"
  47. echo "---------------------------------------------------------"
  48. echo ""
  49. {wrk} {headers} -d {duration} -c $c --timeout $c -t "$(($c>{max_threads}?{max_threads}:$c))" http://{server_host}:{port}{url} -s ~/pipeline.lua -- {pipeline}
  50. sleep 2
  51. done
  52. """
  53. query_template = """
  54. echo ""
  55. echo "---------------------------------------------------------"
  56. echo " Running Primer {name}"
  57. echo " wrk {headers} -d 5 -c 8 --timeout 8 -t 8 \"http://{server_host}:{port}{url}2\""
  58. echo "---------------------------------------------------------"
  59. echo ""
  60. wrk {headers} -d 5 -c 8 --timeout 8 -t 8 "http://{server_host}:{port}{url}2"
  61. sleep 5
  62. echo ""
  63. echo "---------------------------------------------------------"
  64. echo " Running Warmup {name}"
  65. echo " wrk {headers} -d {duration} -c {max_concurrency} --timeout {max_concurrency} -t {max_threads} \"http://{server_host}:{port}{url}2\""
  66. echo "---------------------------------------------------------"
  67. echo ""
  68. wrk {headers} -d {duration} -c {max_concurrency} --timeout {max_concurrency} -t {max_threads} "http://{server_host}:{port}{url}2"
  69. sleep 5
  70. for c in {interval}
  71. do
  72. echo ""
  73. echo "---------------------------------------------------------"
  74. echo " Queries: $c for {name}"
  75. echo " wrk {headers} -d {duration} -c {max_concurrency} --timeout {max_concurrency} -t {max_threads} \"http://{server_host}:{port}{url}$c\""
  76. echo "---------------------------------------------------------"
  77. echo ""
  78. wrk {headers} -d {duration} -c {max_concurrency} --timeout {max_concurrency} -t {max_threads} "http://{server_host}:{port}{url}$c"
  79. sleep 2
  80. done
  81. """
  82. language = None
  83. platform = None
  84. webserver = None
  85. classification = None
  86. database = None
  87. approach = None
  88. orm = None
  89. framework = None
  90. os = None
  91. database_os = None
  92. display_name = None
  93. notes = None
  94. versus = None
  95. ############################################################
  96. # Test Variables
  97. ############################################################
  98. JSON = "json"
  99. DB = "db"
  100. QUERY = "query"
  101. FORTUNE = "fortune"
  102. UPDATE = "update"
  103. PLAINTEXT = "plaintext"
  104. ##########################################################################################
  105. # Public Methods
  106. ##########################################################################################
  107. ############################################################
  108. # Validates the jsonString is a JSON object with a 'message'
  109. # key with the value "hello, world!" (case-insensitive).
  110. ############################################################
  111. def validateJson(self, jsonString, out, err):
  112. try:
  113. obj = {k.lower(): v for k,v in json.loads(jsonString).items()}
  114. if obj["message"].lower() == "hello, world!":
  115. return True
  116. except:
  117. pass
  118. return False
  119. ############################################################
  120. # Validates the jsonString is a JSON object that has an "id"
  121. # and a "randomNumber" key, and that both keys map to
  122. # integers.
  123. ############################################################
  124. def validateDb(self, jsonString, out, err):
  125. try:
  126. obj = {k.lower(): v for k,v in json.loads(jsonString).items()}
  127. # We are allowing the single-object array for the DB
  128. # test for now, but will likely remove this later.
  129. if type(obj) == list:
  130. obj = obj[0]
  131. # This will error out of the value could not parsed to a
  132. # float (this will work with ints, but it will turn them
  133. # into their float equivalent; i.e. "123" => 123.0)
  134. if (type(float(obj["id"])) == float and
  135. type(float(obj["randomnumber"])) == float):
  136. return True
  137. except:
  138. pass
  139. return False
  140. def validateDbStrict(self, jsonString, out, err):
  141. try:
  142. obj = {k.lower(): v for k,v in json.loads(jsonString).items()}
  143. # This will error out of the value could not parsed to a
  144. # float (this will work with ints, but it will turn them
  145. # into their float equivalent; i.e. "123" => 123.0)
  146. if (type(float(obj["id"])) == float and
  147. type(float(obj["randomnumber"])) == float):
  148. return True
  149. except:
  150. pass
  151. return False
  152. ############################################################
  153. # Validates the jsonString is an array with a length of
  154. # 2, that each entry in the array is a JSON object, that
  155. # each object has an "id" and a "randomNumber" key, and that
  156. # both keys map to integers.
  157. ############################################################
  158. def validateQuery(self, jsonString, out, err):
  159. try:
  160. arr = [{k.lower(): v for k,v in d.items()} for d in json.loads(jsonString)]
  161. if (type(float(arr[0]["id"])) == float and
  162. type(float(arr[0]["randomnumber"])) == float and
  163. type(float(arr[1]["id"])) == float and
  164. type(float(arr[1]["randomnumber"])) == float):
  165. return True
  166. except:
  167. pass
  168. return False
  169. ############################################################
  170. # Validates the jsonString is an array with a length of
  171. # 1, that each entry in the array is a JSON object, that
  172. # each object has an "id" and a "randomNumber" key, and that
  173. # both keys map to integers.
  174. ############################################################
  175. def validateQueryOneOrLess(self, jsonString, out, err):
  176. try:
  177. arr = {k.lower(): v for k,v in json.loads(jsonString).items()}
  178. if len(arr) != 1:
  179. return False
  180. for obj in arr:
  181. if (type(float(obj["id"])) != float or
  182. type(float(obj["randomnumber"])) != float or
  183. type(float(obj["id"])) != float or
  184. type(float(obj["randomnumber"])) != float):
  185. return False
  186. # By here, it's passed validation
  187. return True
  188. except:
  189. pass
  190. return False
  191. ############################################################
  192. # Validates the jsonString is an array with a length of
  193. # 500, that each entry in the array is a JSON object, that
  194. # each object has an "id" and a "randomNumber" key, and that
  195. # both keys map to integers.
  196. ############################################################
  197. def validateQueryFiveHundredOrMore(self, jsonString, out, err):
  198. try:
  199. arr = {k.lower(): v for k,v in json.loads(jsonString).items()}
  200. if len(arr) != 500:
  201. return False
  202. for obj in arr:
  203. if (type(float(obj["id"])) != float or
  204. type(float(obj["randomnumber"])) != float or
  205. type(float(obj["id"])) != float or
  206. type(float(obj["randomnumber"])) != float):
  207. return False
  208. # By here, it's passed validation
  209. return True
  210. except:
  211. pass
  212. return False
  213. ############################################################
  214. # Parses the given HTML string and asks a FortuneHTMLParser
  215. # whether the parsed string is a valid fortune return.
  216. ############################################################
  217. def validateFortune(self, htmlString, out, err):
  218. try:
  219. parser = FortuneHTMLParser()
  220. parser.feed(htmlString)
  221. return parser.isValidFortune()
  222. except:
  223. pass
  224. return False
  225. ############################################################
  226. # Validates the jsonString is an array with a length of
  227. # 2, that each entry in the array is a JSON object, that
  228. # each object has an "id" and a "randomNumber" key, and that
  229. # both keys map to integers.
  230. ############################################################
  231. def validateUpdate(self, jsonString, out, err):
  232. try:
  233. arr = [{k.lower(): v for k,v in d.items()} for d in json.loads(jsonString)]
  234. if (type(float(arr[0]["id"])) == float and
  235. type(float(arr[0]["randomnumber"])) == float and
  236. type(float(arr[1]["id"])) == float and
  237. type(float(arr[1]["randomnumber"])) == float):
  238. return True
  239. except:
  240. pass
  241. return False
  242. ############################################################
  243. #
  244. ############################################################
  245. def validatePlaintext(self, jsonString, out, err):
  246. try:
  247. return jsonString.lower().strip() == "hello, world!"
  248. except:
  249. pass
  250. return False
  251. ############################################################
  252. # start(benchmarker)
  253. # Start the test using it's setup file
  254. ############################################################
  255. def start(self, logger=log):
  256. log.info("start")
  257. # Load profile for this installation
  258. profile="%s/bash_profile.sh" % self.directory
  259. if not os.path.exists(profile):
  260. logging.warning("Framework %s does not have a bash_profile" % self.name)
  261. profile="$FWROOT/config/benchmark_profile"
  262. set_iroot="export IROOT=%s" % self.install_root
  263. setup_util.replace_environ(config=profile, command=set_iroot)
  264. (out, err) = WrapLogger(logger, 'out'), WrapLogger(logger, 'err')
  265. return self.setup_module.start(self.benchmarker, out, err)
  266. ############################################################
  267. # End start
  268. ############################################################
  269. ############################################################
  270. # stop(benchmarker)
  271. # Stops the test using it's setup file
  272. ############################################################
  273. def stop(self, logger=log):
  274. logger.info("stop")
  275. (out, err) = WrapLogger(logger, 'out'), WrapLogger(logger, 'err')
  276. return self.setup_module.stop(out, err)
  277. ############################################################
  278. # End stop
  279. ############################################################
  280. ############################################################
  281. # verify_urls
  282. # Verifys each of the URLs for this test. THis will sinply
  283. # curl the URL and check for it's return status.
  284. # For each url, a flag will be set on this object for whether
  285. # or not it passed
  286. ############################################################
  287. def verify_urls(self, logger=log):
  288. (out, err) = WrapLogger(logger, 'out'), WrapLogger(logger, 'err')
  289. # JSON
  290. if self.runTests[self.JSON]:
  291. out.write(textwrap.dedent("""
  292. -----------------------------------------------------
  293. VERIFYING JSON ({url})
  294. -----------------------------------------------------
  295. """.format(url = self.json_url)))
  296. out.flush()
  297. url = self.benchmarker.generate_url(self.json_url, self.port)
  298. output = self.__curl_url(url, self.JSON, out, err)
  299. out.write("VALIDATING JSON ... ")
  300. if self.validateJson(output, out, err):
  301. self.json_url_passed = True
  302. out.write("PASS\n\n")
  303. else:
  304. self.json_url_passed = False
  305. out.write("FAIL\n\n")
  306. out.flush
  307. # DB
  308. if self.runTests[self.DB]:
  309. out.write(textwrap.dedent("""
  310. -----------------------------------------------------
  311. VERIFYING DB ({url})
  312. -----------------------------------------------------
  313. """.format(url = self.db_url)))
  314. out.flush()
  315. url = self.benchmarker.generate_url(self.db_url, self.port)
  316. output = self.__curl_url(url, self.DB, out, err)
  317. if self.validateDb(output, out, err):
  318. self.db_url_passed = True
  319. else:
  320. self.db_url_passed = False
  321. if self.validateDbStrict(output, out, err):
  322. self.db_url_warn = False
  323. else:
  324. self.db_url_warn = True
  325. out.write("VALIDATING DB ... ")
  326. if self.db_url_passed:
  327. out.write("PASS")
  328. if self.db_url_warn:
  329. out.write(" (with warnings)")
  330. out.write("\n\n")
  331. else:
  332. out.write("FAIL\n\n")
  333. out.flush
  334. # Query
  335. if self.runTests[self.QUERY]:
  336. out.write(textwrap.dedent("""
  337. -----------------------------------------------------
  338. VERIFYING QUERY ({url})
  339. -----------------------------------------------------
  340. """.format(url=self.query_url+"2")))
  341. out.flush()
  342. url = self.benchmarker.generate_url(self.query_url + "2", self.port)
  343. output = self.__curl_url(url, self.QUERY, out, err)
  344. if self.validateQuery(output, out, err):
  345. self.query_url_passed = True
  346. out.write(self.query_url + "2 - PASS\n\n")
  347. else:
  348. self.query_url_passed = False
  349. out.write(self.query_url + "2 - FAIL\n\n")
  350. out.write("-----------------------------------------------------\n\n")
  351. out.flush()
  352. self.query_url_warn = False
  353. url2 = self.benchmarker.generate_url(self.query_url + "0", self.port)
  354. output2 = self.__curl_url(url2, self.QUERY, out, err)
  355. if not self.validateQueryOneOrLess(output2, out, err):
  356. self.query_url_warn = True
  357. out.write(self.query_url + "0 - WARNING\n\n")
  358. else:
  359. out.write(self.query_url + "0 - PASS\n\n")
  360. out.write("-----------------------------------------------------\n\n")
  361. out.flush()
  362. url3 = self.benchmarker.generate_url(self.query_url + "foo", self.port)
  363. output3 = self.__curl_url(url3, self.QUERY, out, err)
  364. if not self.validateQueryOneOrLess(output3, out, err):
  365. self.query_url_warn = True
  366. out.write(self.query_url + "foo - WARNING\n\n")
  367. else:
  368. out.write(self.query_url + "foo - PASS\n\n")
  369. out.write("-----------------------------------------------------\n\n")
  370. out.flush()
  371. url4 = self.benchmarker.generate_url(self.query_url + "501", self.port)
  372. output4 = self.__curl_url(url4, self.QUERY, out, err)
  373. if not self.validateQueryFiveHundredOrMore(output4, out, err):
  374. self.query_url_warn = True
  375. out.write(self.query_url + "501 - WARNING\n\n")
  376. else:
  377. out.write(self.query_url + "501 - PASS\n\n")
  378. out.write("-----------------------------------------------------\n\n\n")
  379. out.flush()
  380. out.write("VALIDATING QUERY ... ")
  381. if self.query_url_passed:
  382. out.write("PASS")
  383. if self.query_url_warn:
  384. out.write(" (with warnings)")
  385. out.write("\n\n")
  386. else:
  387. out.write("FAIL\n\n")
  388. out.flush
  389. # Fortune
  390. if self.runTests[self.FORTUNE]:
  391. out.write(textwrap.dedent("""
  392. -----------------------------------------------------
  393. VERIFYING FORTUNE ({url})
  394. -----------------------------------------------------
  395. """.format(url = self.fortune_url)))
  396. out.flush()
  397. url = self.benchmarker.generate_url(self.fortune_url, self.port)
  398. output = self.__curl_url(url, self.FORTUNE, out, err)
  399. out.write("VALIDATING FORTUNE ... ")
  400. if self.validateFortune(output, out, err):
  401. self.fortune_url_passed = True
  402. out.write("PASS\n\n")
  403. else:
  404. self.fortune_url_passed = False
  405. out.write("FAIL\n\n")
  406. out.flush
  407. # Update
  408. if self.runTests[self.UPDATE]:
  409. out.write(textwrap.dedent("""
  410. -----------------------------------------------------
  411. VERIFYING UPDATE ({url})
  412. -----------------------------------------------------
  413. """.format(url = self.update_url)))
  414. out.flush()
  415. url = self.benchmarker.generate_url(self.update_url + "2", self.port)
  416. output = self.__curl_url(url, self.UPDATE, out, err)
  417. out.write("VALIDATING UPDATE ... ")
  418. if self.validateUpdate(output, out, err):
  419. self.update_url_passed = True
  420. out.write("PASS\n\n")
  421. else:
  422. self.update_url_passed = False
  423. out.write("FAIL\n\n")
  424. out.flush
  425. # plaintext
  426. if self.runTests[self.PLAINTEXT]:
  427. out.write(textwrap.dedent("""
  428. -----------------------------------------------------
  429. VERIFYING PLAINTEXT ({url})
  430. -----------------------------------------------------
  431. """.format(url = self.plaintext_url)))
  432. out.flush()
  433. url = self.benchmarker.generate_url(self.plaintext_url, self.port)
  434. output = self.__curl_url(url, self.PLAINTEXT, out, err)
  435. out.write("VALIDATING PLAINTEXT ... ")
  436. if self.validatePlaintext(output, out, err):
  437. self.plaintext_url_passed = True
  438. out.write("PASS\n\n")
  439. else:
  440. self.plaintext_url_passed = False
  441. out.write("FAIL\n\n")
  442. out.flush
  443. ############################################################
  444. # End verify_urls
  445. ############################################################
  446. ############################################################
  447. # contains_type(type)
  448. # true if this test contains an implementation of the given
  449. # test type (json, db, etc.)
  450. ############################################################
  451. def contains_type(self, type):
  452. try:
  453. if type == self.JSON and self.json_url is not None:
  454. return True
  455. if type == self.DB and self.db_url is not None:
  456. return True
  457. if type == self.QUERY and self.query_url is not None:
  458. return True
  459. if type == self.FORTUNE and self.fortune_url is not None:
  460. return True
  461. if type == self.UPDATE and self.update_url is not None:
  462. return True
  463. if type == self.PLAINTEXT and self.plaintext_url is not None:
  464. return True
  465. except AttributeError:
  466. pass
  467. return False
  468. ############################################################
  469. # End stop
  470. ############################################################
  471. ############################################################
  472. # benchmark
  473. # Runs the benchmark for each type of test that it implements
  474. # JSON/DB/Query.
  475. ############################################################
  476. def benchmark(self, logger=log):
  477. (out, err) = WrapLogger(logger, 'out'), WrapLogger(logger, 'err')
  478. # JSON
  479. if self.runTests[self.JSON]:
  480. try:
  481. out.write("BENCHMARKING JSON ... ")
  482. out.flush()
  483. results = None
  484. output_file = self.benchmarker.output_file(self.name, self.JSON)
  485. if not os.path.exists(output_file):
  486. with open(output_file, 'w'):
  487. # Simply opening the file in write mode should create the empty file.
  488. pass
  489. if self.json_url_passed:
  490. remote_script = self.__generate_concurrency_script(self.json_url, self.port, self.accept_json)
  491. self.__run_benchmark(remote_script, output_file, err)
  492. results = self.__parse_test(self.JSON)
  493. self.benchmarker.report_results(framework=self, test=self.JSON, results=results['results'])
  494. out.write( "Complete\n" )
  495. out.flush()
  496. except AttributeError:
  497. pass
  498. # DB
  499. if self.runTests[self.DB]:
  500. try:
  501. out.write("BENCHMARKING DB ... ")
  502. out.flush()
  503. results = None
  504. output_file = self.benchmarker.output_file(self.name, self.DB)
  505. warning_file = self.benchmarker.warning_file(self.name, self.DB)
  506. if not os.path.exists(output_file):
  507. with open(output_file, 'w'):
  508. # Simply opening the file in write mode should create the empty file.
  509. pass
  510. if self.db_url_warn:
  511. with open(warning_file, 'w'):
  512. pass
  513. if self.db_url_passed:
  514. remote_script = self.__generate_concurrency_script(self.db_url, self.port, self.accept_json)
  515. self.__run_benchmark(remote_script, output_file, err)
  516. results = self.__parse_test(self.DB)
  517. self.benchmarker.report_results(framework=self, test=self.DB, results=results['results'])
  518. out.write( "Complete\n" )
  519. except AttributeError:
  520. pass
  521. # Query
  522. if self.runTests[self.QUERY]:
  523. try:
  524. out.write("BENCHMARKING Query ... ")
  525. out.flush()
  526. results = None
  527. output_file = self.benchmarker.output_file(self.name, self.QUERY)
  528. warning_file = self.benchmarker.warning_file(self.name, self.QUERY)
  529. if not os.path.exists(output_file):
  530. with open(output_file, 'w'):
  531. # Simply opening the file in write mode should create the empty file.
  532. pass
  533. if self.query_url_warn:
  534. with open(warning_file, 'w'):
  535. pass
  536. if self.query_url_passed:
  537. remote_script = self.__generate_query_script(self.query_url, self.port, self.accept_json)
  538. self.__run_benchmark(remote_script, output_file, err)
  539. results = self.__parse_test(self.QUERY)
  540. self.benchmarker.report_results(framework=self, test=self.QUERY, results=results['results'])
  541. out.write( "Complete\n" )
  542. out.flush()
  543. except AttributeError:
  544. pass
  545. # fortune
  546. if self.runTests[self.FORTUNE]:
  547. try:
  548. out.write("BENCHMARKING Fortune ... ")
  549. out.flush()
  550. results = None
  551. output_file = self.benchmarker.output_file(self.name, self.FORTUNE)
  552. if not os.path.exists(output_file):
  553. with open(output_file, 'w'):
  554. # Simply opening the file in write mode should create the empty file.
  555. pass
  556. if self.fortune_url_passed:
  557. remote_script = self.__generate_concurrency_script(self.fortune_url, self.port, self.accept_html)
  558. self.__run_benchmark(remote_script, output_file, err)
  559. results = self.__parse_test(self.FORTUNE)
  560. self.benchmarker.report_results(framework=self, test=self.FORTUNE, results=results['results'])
  561. out.write( "Complete\n" )
  562. out.flush()
  563. except AttributeError:
  564. pass
  565. # update
  566. if self.runTests[self.UPDATE]:
  567. try:
  568. out.write("BENCHMARKING Update ... ")
  569. out.flush()
  570. results = None
  571. output_file = self.benchmarker.output_file(self.name, self.UPDATE)
  572. if not os.path.exists(output_file):
  573. with open(output_file, 'w'):
  574. # Simply opening the file in write mode should create the empty file.
  575. pass
  576. if self.update_url_passed:
  577. remote_script = self.__generate_query_script(self.update_url, self.port, self.accept_json)
  578. self.__run_benchmark(remote_script, output_file, err)
  579. results = self.__parse_test(self.UPDATE)
  580. self.benchmarker.report_results(framework=self, test=self.UPDATE, results=results['results'])
  581. out.write( "Complete\n" )
  582. out.flush()
  583. except AttributeError:
  584. pass
  585. # plaintext
  586. if self.runTests[self.PLAINTEXT]:
  587. try:
  588. out.write("BENCHMARKING Plaintext ... ")
  589. out.flush()
  590. results = None
  591. output_file = self.benchmarker.output_file(self.name, self.PLAINTEXT)
  592. if not os.path.exists(output_file):
  593. with open(output_file, 'w'):
  594. # Simply opening the file in write mode should create the empty file.
  595. pass
  596. if self.plaintext_url_passed:
  597. remote_script = self.__generate_concurrency_script(self.plaintext_url, self.port, self.accept_plaintext, wrk_command="wrk", intervals=[256,1024,4096,16384], pipeline="16")
  598. self.__run_benchmark(remote_script, output_file, err)
  599. results = self.__parse_test(self.PLAINTEXT)
  600. self.benchmarker.report_results(framework=self, test=self.PLAINTEXT, results=results['results'])
  601. out.write( "Complete\n" )
  602. out.flush()
  603. except AttributeError:
  604. traceback.print_exc()
  605. pass
  606. ############################################################
  607. # End benchmark
  608. ############################################################
  609. ############################################################
  610. # parse_all
  611. # Method meant to be run for a given timestamp
  612. ############################################################
  613. def parse_all(self):
  614. log.info("parse_all")
  615. # JSON
  616. if os.path.exists(self.benchmarker.get_output_file(self.name, self.JSON)):
  617. results = self.__parse_test(self.JSON)
  618. self.benchmarker.report_results(framework=self, test=self.JSON, results=results['results'])
  619. # DB
  620. if os.path.exists(self.benchmarker.get_output_file(self.name, self.DB)):
  621. results = self.__parse_test(self.DB)
  622. self.benchmarker.report_results(framework=self, test=self.DB, results=results['results'])
  623. # Query
  624. if os.path.exists(self.benchmarker.get_output_file(self.name, self.QUERY)):
  625. results = self.__parse_test(self.QUERY)
  626. self.benchmarker.report_results(framework=self, test=self.QUERY, results=results['results'])
  627. # Fortune
  628. if os.path.exists(self.benchmarker.get_output_file(self.name, self.FORTUNE)):
  629. results = self.__parse_test(self.FORTUNE)
  630. self.benchmarker.report_results(framework=self, test=self.FORTUNE, results=results['results'])
  631. # Update
  632. if os.path.exists(self.benchmarker.get_output_file(self.name, self.UPDATE)):
  633. results = self.__parse_test(self.UPDATE)
  634. self.benchmarker.report_results(framework=self, test=self.UPDATE, results=results['results'])
  635. # Plaintext
  636. if os.path.exists(self.benchmarker.get_output_file(self.name, self.PLAINTEXT)):
  637. results = self.__parse_test(self.PLAINTEXT)
  638. self.benchmarker.report_results(framework=self, test=self.PLAINTEXT, results=results['results'])
  639. ############################################################
  640. # End parse_all
  641. ############################################################
  642. ############################################################
  643. # __parse_test(test_type)
  644. ############################################################
  645. def __parse_test(self, test_type):
  646. log.info("__parse_test")
  647. try:
  648. results = dict()
  649. results['results'] = []
  650. if os.path.exists(self.benchmarker.get_output_file(self.name, test_type)):
  651. with open(self.benchmarker.output_file(self.name, test_type)) as raw_data:
  652. is_warmup = True
  653. rawData = None
  654. for line in raw_data:
  655. if "Queries:" in line or "Concurrency:" in line:
  656. is_warmup = False
  657. rawData = None
  658. continue
  659. if "Warmup" in line or "Primer" in line:
  660. is_warmup = True
  661. continue
  662. if not is_warmup:
  663. if rawData == None:
  664. rawData = dict()
  665. results['results'].append(rawData)
  666. #if "Requests/sec:" in line:
  667. # m = re.search("Requests/sec:\s+([0-9]+)", line)
  668. # rawData['reportedResults'] = m.group(1)
  669. # search for weighttp data such as succeeded and failed.
  670. if "Latency" in line:
  671. m = re.findall("([0-9]+\.*[0-9]*[us|ms|s|m|%]+)", line)
  672. if len(m) == 4:
  673. rawData['latencyAvg'] = m[0]
  674. rawData['latencyStdev'] = m[1]
  675. rawData['latencyMax'] = m[2]
  676. # rawData['latencyStdevPercent'] = m[3]
  677. #if "Req/Sec" in line:
  678. # m = re.findall("([0-9]+\.*[0-9]*[k|%]*)", line)
  679. # if len(m) == 4:
  680. # rawData['requestsAvg'] = m[0]
  681. # rawData['requestsStdev'] = m[1]
  682. # rawData['requestsMax'] = m[2]
  683. # rawData['requestsStdevPercent'] = m[3]
  684. #if "requests in" in line:
  685. # m = re.search("requests in ([0-9]+\.*[0-9]*[ms|s|m|h]+)", line)
  686. # if m != None:
  687. # # parse out the raw time, which may be in minutes or seconds
  688. # raw_time = m.group(1)
  689. # if "ms" in raw_time:
  690. # rawData['total_time'] = float(raw_time[:len(raw_time)-2]) / 1000.0
  691. # elif "s" in raw_time:
  692. # rawData['total_time'] = float(raw_time[:len(raw_time)-1])
  693. # elif "m" in raw_time:
  694. # rawData['total_time'] = float(raw_time[:len(raw_time)-1]) * 60.0
  695. # elif "h" in raw_time:
  696. # rawData['total_time'] = float(raw_time[:len(raw_time)-1]) * 3600.0
  697. if "requests in" in line:
  698. m = re.search("([0-9]+) requests in", line)
  699. if m != None:
  700. rawData['totalRequests'] = int(m.group(1))
  701. if "Socket errors" in line:
  702. if "connect" in line:
  703. m = re.search("connect ([0-9]+)", line)
  704. rawData['connect'] = int(m.group(1))
  705. if "read" in line:
  706. m = re.search("read ([0-9]+)", line)
  707. rawData['read'] = int(m.group(1))
  708. if "write" in line:
  709. m = re.search("write ([0-9]+)", line)
  710. rawData['write'] = int(m.group(1))
  711. if "timeout" in line:
  712. m = re.search("timeout ([0-9]+)", line)
  713. rawData['timeout'] = int(m.group(1))
  714. if "Non-2xx" in line:
  715. m = re.search("Non-2xx or 3xx responses: ([0-9]+)", line)
  716. if m != None:
  717. rawData['5xx'] = int(m.group(1))
  718. return results
  719. except IOError:
  720. return None
  721. ############################################################
  722. # End benchmark
  723. ############################################################
  724. ##########################################################################################
  725. # Private Methods
  726. ##########################################################################################
  727. ############################################################
  728. # __run_benchmark(script, output_file)
  729. # Runs a single benchmark using the script which is a bash
  730. # template that uses weighttp to run the test. All the results
  731. # outputed to the output_file.
  732. ############################################################
  733. def __run_benchmark(self, script, output_file, err):
  734. with open(output_file, 'w') as raw_file:
  735. p = subprocess.Popen(self.benchmarker.client_ssh_string.split(" "), stdin=subprocess.PIPE, stdout=raw_file, stderr=err)
  736. p.communicate(script)
  737. err.flush()
  738. ############################################################
  739. # End __run_benchmark
  740. ############################################################
  741. ############################################################
  742. # __generate_concurrency_script(url, port)
  743. # Generates the string containing the bash script that will
  744. # be run on the client to benchmark a single test. This
  745. # specifically works for the variable concurrency tests (JSON
  746. # and DB)
  747. ############################################################
  748. def __generate_concurrency_script(self, url, port, accept_header, wrk_command="wrk", intervals=[], pipeline=""):
  749. if len(intervals) == 0:
  750. intervals = self.benchmarker.concurrency_levels
  751. headers = self.__get_request_headers(accept_header)
  752. return self.concurrency_template.format(max_concurrency=self.benchmarker.max_concurrency,
  753. max_threads=self.benchmarker.max_threads, name=self.name, duration=self.benchmarker.duration,
  754. interval=" ".join("{}".format(item) for item in intervals),
  755. server_host=self.benchmarker.server_host, port=port, url=url, headers=headers, wrk=wrk_command,
  756. pipeline=pipeline)
  757. ############################################################
  758. # End __generate_concurrency_script
  759. ############################################################
  760. ############################################################
  761. # __generate_query_script(url, port)
  762. # Generates the string containing the bash script that will
  763. # be run on the client to benchmark a single test. This
  764. # specifically works for the variable query tests (Query)
  765. ############################################################
  766. def __generate_query_script(self, url, port, accept_header):
  767. headers = self.__get_request_headers(accept_header)
  768. return self.query_template.format(max_concurrency=self.benchmarker.max_concurrency,
  769. max_threads=self.benchmarker.max_threads, name=self.name, duration=self.benchmarker.duration,
  770. interval=" ".join("{}".format(item) for item in self.benchmarker.query_intervals),
  771. server_host=self.benchmarker.server_host, port=port, url=url, headers=headers)
  772. ############################################################
  773. # End __generate_query_script
  774. ############################################################
  775. ############################################################
  776. # __get_request_headers(accept_header)
  777. # Generates the complete HTTP header string
  778. ############################################################
  779. def __get_request_headers(self, accept_header):
  780. return self.headers_template.format(accept=accept_header)
  781. ############################################################
  782. # End __format_request_headers
  783. ############################################################
  784. ############################################################
  785. # __curl_url
  786. # Dump HTTP response and headers. Throw exception if there
  787. # is an HTTP error.
  788. ############################################################
  789. def __curl_url(self, url, testType, out, err):
  790. output = None
  791. try:
  792. # Use -m 15 to make curl stop trying after 15sec.
  793. # Use -i to output response with headers.
  794. # Don't use -f so that the HTTP response code is ignored.
  795. # Use --stderr - to redirect stderr to stdout so we get
  796. # error output for sure in stdout.
  797. # Use -sS to hide progress bar, but show errors.
  798. subprocess.check_call(["curl", "-m", "15", "-i", "-sS", url], stderr=err, stdout=out)
  799. # HTTP output may not end in a newline, so add that here.
  800. out.write( "\n\n" )
  801. out.flush()
  802. err.flush()
  803. # We need to get the respond body from the curl and return it.
  804. p = subprocess.Popen(["curl", "-m", "15", "-s", url], stdout=subprocess.PIPE)
  805. output = p.communicate()
  806. except:
  807. pass
  808. if output:
  809. # We have the response body - return it
  810. return output[0]
  811. ##############################################################
  812. # End __curl_url
  813. ##############################################################
  814. def requires_database(self):
  815. """Returns True/False if this test requires a database"""
  816. return (self.contains_type(self.FORTUNE) or
  817. self.contains_type(self.DB) or
  818. self.contains_type(self.QUERY) or
  819. self.contains_type(self.UPDATE))
  820. ##########################################################################################
  821. # Constructor
  822. ##########################################################################################
  823. def __init__(self, name, directory, benchmarker, runTests, args):
  824. self.name = name
  825. self.directory = directory
  826. self.benchmarker = benchmarker
  827. self.runTests = runTests
  828. self.fwroot = benchmarker.fwroot
  829. # setup logging
  830. logging.basicConfig(stream=sys.stderr, level=logging.DEBUG)
  831. self.install_root="%s/%s" % (self.fwroot, "installs")
  832. if benchmarker.install_strategy is 'pertest':
  833. self.install_root="%s/pertest/%s" % (self.install_root, name)
  834. self.__dict__.update(args)
  835. # ensure directory has __init__.py file so that we can use it as a Python package
  836. if not os.path.exists(os.path.join(directory, "__init__.py")):
  837. open(os.path.join(directory, "__init__.py"), 'w').close()
  838. self.setup_module = setup_module = importlib.import_module(directory + '.' + self.setup_file)
  839. ############################################################
  840. # End __init__
  841. ############################################################
  842. ############################################################
  843. # End FrameworkTest
  844. ############################################################
  845. ##########################################################################################
  846. # Static methods
  847. ##########################################################################################
  848. ##############################################################
  849. # parse_config(config, directory, benchmarker)
  850. # parses a config file and returns a list of FrameworkTest
  851. # objects based on that config file.
  852. ##############################################################
  853. def parse_config(config, directory, benchmarker):
  854. tests = []
  855. # The config object can specify multiple tests, we neep to loop
  856. # over them and parse them out
  857. for test in config['tests']:
  858. for key, value in test.iteritems():
  859. test_name = config['framework']
  860. runTests = dict()
  861. runTests["json"] = (benchmarker.type == "all" or benchmarker.type == "json") and value.get("json_url", False)
  862. runTests["db"] = (benchmarker.type == "all" or benchmarker.type == "db") and value.get("db_url", False)
  863. runTests["query"] = (benchmarker.type == "all" or benchmarker.type == "query") and value.get("query_url", False)
  864. runTests["fortune"] = (benchmarker.type == "all" or benchmarker.type == "fortune") and value.get("fortune_url", False)
  865. runTests["update"] = (benchmarker.type == "all" or benchmarker.type == "update") and value.get("update_url", False)
  866. runTests["plaintext"] = (benchmarker.type == "all" or benchmarker.type == "plaintext") and value.get("plaintext_url", False)
  867. # if the test uses the 'defualt' keywork, then we don't
  868. # append anything to it's name. All configs should only have 1 default
  869. if key != 'default':
  870. # we need to use the key in the test_name
  871. test_name = test_name + "-" + key
  872. tests.append(FrameworkTest(test_name, directory, benchmarker, runTests, value))
  873. return tests
  874. ##############################################################
  875. # End parse_config
  876. ##############################################################
  877. import tempfile
  878. class WrapLogger():
  879. def __init__(self, logger, level):
  880. self.logger = logger
  881. self.level = level
  882. self.file = tempfile.TemporaryFile()
  883. def write(self, str):
  884. if self.level == "out":
  885. self.logger.info(str)
  886. elif self.level == "err":
  887. self.logger.error(str)
  888. else:
  889. self.logger.error("Unknown level %s" % self.level)
  890. self.logger.error(str)
  891. def __getattr__(self, name):
  892. return getattr(self.file, name)