framework_test.py 44 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102
  1. from benchmark.fortune_html_parser import FortuneHTMLParser
  2. from setup.linux import setup_util
  3. import importlib
  4. import os
  5. import subprocess
  6. import time
  7. import re
  8. import pprint
  9. import sys
  10. import traceback
  11. import json
  12. import textwrap
  13. import logging
  14. import csv
  15. import shlex
  16. class FrameworkTest:
  17. ##########################################################################################
  18. # Class variables
  19. ##########################################################################################
  20. headers_template = "-H 'Host: localhost' -H '{accept}' -H 'Connection: keep-alive'"
  21. headers_full_template = "-H 'Host: localhost' -H '{accept}' -H 'Accept-Language: en-US,en;q=0.5' -H 'User-Agent: Mozilla/5.0 (X11; Linux x86_64) Gecko/20130501 Firefox/30.0 AppleWebKit/600.00 Chrome/30.0.0000.0 Trident/10.0 Safari/600.00' -H 'Cookie: uid=12345678901234567890; __utma=1.1234567890.1234567890.1234567890.1234567890.12; wd=2560x1600' -H 'Connection: keep-alive'"
  22. accept_json = "Accept: application/json,text/html;q=0.9,application/xhtml+xml;q=0.9,application/xml;q=0.8,*/*;q=0.7"
  23. accept_html = "Accept: text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8"
  24. accept_plaintext = "Accept: text/plain,text/html;q=0.9,application/xhtml+xml;q=0.9,application/xml;q=0.8,*/*;q=0.7"
  25. concurrency_template = """
  26. echo ""
  27. echo "---------------------------------------------------------"
  28. echo " Running Primer {name}"
  29. echo " {wrk} {headers} -d 5 -c 8 --timeout 8 -t 8 \"http://{server_host}:{port}{url}\""
  30. echo "---------------------------------------------------------"
  31. echo ""
  32. {wrk} {headers} -d 5 -c 8 --timeout 8 -t 8 "http://{server_host}:{port}{url}"
  33. sleep 5
  34. echo ""
  35. echo "---------------------------------------------------------"
  36. echo " Running Warmup {name}"
  37. echo " {wrk} {headers} -d {duration} -c {max_concurrency} --timeout {max_concurrency} -t {max_threads} \"http://{server_host}:{port}{url}\""
  38. echo "---------------------------------------------------------"
  39. echo ""
  40. {wrk} {headers} -d {duration} -c {max_concurrency} --timeout {max_concurrency} -t {max_threads} "http://{server_host}:{port}{url}"
  41. sleep 5
  42. echo ""
  43. echo "---------------------------------------------------------"
  44. echo " Synchronizing time"
  45. echo "---------------------------------------------------------"
  46. echo ""
  47. ntpdate -s pool.ntp.org
  48. for c in {interval}
  49. do
  50. echo ""
  51. echo "---------------------------------------------------------"
  52. echo " Concurrency: $c for {name}"
  53. echo " {wrk} {headers} -d {duration} -c $c --timeout $c -t $(($c>{max_threads}?{max_threads}:$c)) \"http://{server_host}:{port}{url}\" -s ~/pipeline.lua -- {pipeline}"
  54. echo "---------------------------------------------------------"
  55. echo ""
  56. STARTTIME=$(date +"%s")
  57. {wrk} {headers} -d {duration} -c $c --timeout $c -t "$(($c>{max_threads}?{max_threads}:$c))" http://{server_host}:{port}{url} -s ~/pipeline.lua -- {pipeline}
  58. echo "STARTTIME $STARTTIME"
  59. echo "ENDTIME $(date +"%s")"
  60. sleep 2
  61. done
  62. """
  63. query_template = """
  64. echo ""
  65. echo "---------------------------------------------------------"
  66. echo " Running Primer {name}"
  67. echo " wrk {headers} -d 5 -c 8 --timeout 8 -t 8 \"http://{server_host}:{port}{url}2\""
  68. echo "---------------------------------------------------------"
  69. echo ""
  70. wrk {headers} -d 5 -c 8 --timeout 8 -t 8 "http://{server_host}:{port}{url}2"
  71. sleep 5
  72. echo ""
  73. echo "---------------------------------------------------------"
  74. echo " Running Warmup {name}"
  75. echo " wrk {headers} -d {duration} -c {max_concurrency} --timeout {max_concurrency} -t {max_threads} \"http://{server_host}:{port}{url}2\""
  76. echo "---------------------------------------------------------"
  77. echo ""
  78. wrk {headers} -d {duration} -c {max_concurrency} --timeout {max_concurrency} -t {max_threads} "http://{server_host}:{port}{url}2"
  79. sleep 5
  80. echo ""
  81. echo "---------------------------------------------------------"
  82. echo " Synchronizing time"
  83. echo "---------------------------------------------------------"
  84. echo ""
  85. ntpdate -s pool.ntp.org
  86. for c in {interval}
  87. do
  88. echo ""
  89. echo "---------------------------------------------------------"
  90. echo " Queries: $c for {name}"
  91. echo " wrk {headers} -d {duration} -c {max_concurrency} --timeout {max_concurrency} -t {max_threads} \"http://{server_host}:{port}{url}$c\""
  92. echo "---------------------------------------------------------"
  93. echo ""
  94. STARTTIME=$(date +"%s")
  95. wrk {headers} -d {duration} -c {max_concurrency} --timeout {max_concurrency} -t {max_threads} "http://{server_host}:{port}{url}$c"
  96. echo "STARTTIME $STARTTIME"
  97. echo "ENDTIME $(date +"%s")"
  98. sleep 2
  99. done
  100. """
  101. language = None
  102. platform = None
  103. webserver = None
  104. classification = None
  105. database = None
  106. approach = None
  107. orm = None
  108. framework = None
  109. os = None
  110. database_os = None
  111. display_name = None
  112. notes = None
  113. versus = None
  114. ############################################################
  115. # Test Variables
  116. ############################################################
  117. JSON = "json"
  118. DB = "db"
  119. QUERY = "query"
  120. FORTUNE = "fortune"
  121. UPDATE = "update"
  122. PLAINTEXT = "plaintext"
  123. ##########################################################################################
  124. # Public Methods
  125. ##########################################################################################
  126. ############################################################
  127. # Validates the jsonString is a JSON object with a 'message'
  128. # key with the value "hello, world!" (case-insensitive).
  129. ############################################################
  130. def validateJson(self, jsonString, out, err):
  131. try:
  132. obj = {k.lower(): v for k,v in json.loads(jsonString).items()}
  133. if obj["message"].lower() == "hello, world!":
  134. return True
  135. except:
  136. pass
  137. return False
  138. ############################################################
  139. # Validates the jsonString is a JSON object that has an "id"
  140. # and a "randomNumber" key, and that both keys map to
  141. # integers.
  142. ############################################################
  143. def validateDb(self, jsonString, out, err):
  144. try:
  145. obj = {k.lower(): v for k,v in json.loads(jsonString).items()}
  146. # We are allowing the single-object array for the DB
  147. # test for now, but will likely remove this later.
  148. if type(obj) == list:
  149. obj = obj[0]
  150. # This will error out of the value could not parsed to a
  151. # float (this will work with ints, but it will turn them
  152. # into their float equivalent; i.e. "123" => 123.0)
  153. if (type(float(obj["id"])) == float and
  154. type(float(obj["randomnumber"])) == float):
  155. return True
  156. except:
  157. pass
  158. return False
  159. def validateDbStrict(self, jsonString, out, err):
  160. try:
  161. obj = {k.lower(): v for k,v in json.loads(jsonString).items()}
  162. # This will error out of the value could not parsed to a
  163. # float (this will work with ints, but it will turn them
  164. # into their float equivalent; i.e. "123" => 123.0)
  165. if (type(float(obj["id"])) == float and
  166. type(float(obj["randomnumber"])) == float):
  167. return True
  168. except:
  169. pass
  170. return False
  171. ############################################################
  172. # Validates the jsonString is an array with a length of
  173. # 2, that each entry in the array is a JSON object, that
  174. # each object has an "id" and a "randomNumber" key, and that
  175. # both keys map to integers.
  176. ############################################################
  177. def validateQuery(self, jsonString, out, err):
  178. try:
  179. arr = [{k.lower(): v for k,v in d.items()} for d in json.loads(jsonString)]
  180. if (type(float(arr[0]["id"])) == float and
  181. type(float(arr[0]["randomnumber"])) == float and
  182. type(float(arr[1]["id"])) == float and
  183. type(float(arr[1]["randomnumber"])) == float):
  184. return True
  185. except:
  186. pass
  187. return False
  188. ############################################################
  189. # Validates the jsonString is an array with a length of
  190. # 1, that each entry in the array is a JSON object, that
  191. # each object has an "id" and a "randomNumber" key, and that
  192. # both keys map to integers.
  193. ############################################################
  194. def validateQueryOneOrLess(self, jsonString, out, err):
  195. try:
  196. arr = {k.lower(): v for k,v in json.loads(jsonString).items()}
  197. if len(arr) != 1:
  198. return False
  199. for obj in arr:
  200. if (type(float(obj["id"])) != float or
  201. type(float(obj["randomnumber"])) != float or
  202. type(float(obj["id"])) != float or
  203. type(float(obj["randomnumber"])) != float):
  204. return False
  205. # By here, it's passed validation
  206. return True
  207. except:
  208. pass
  209. return False
  210. ############################################################
  211. # Validates the jsonString is an array with a length of
  212. # 500, that each entry in the array is a JSON object, that
  213. # each object has an "id" and a "randomNumber" key, and that
  214. # both keys map to integers.
  215. ############################################################
  216. def validateQueryFiveHundredOrMore(self, jsonString, out, err):
  217. try:
  218. arr = {k.lower(): v for k,v in json.loads(jsonString).items()}
  219. if len(arr) != 500:
  220. return False
  221. for obj in arr:
  222. if (type(float(obj["id"])) != float or
  223. type(float(obj["randomnumber"])) != float or
  224. type(float(obj["id"])) != float or
  225. type(float(obj["randomnumber"])) != float):
  226. return False
  227. # By here, it's passed validation
  228. return True
  229. except:
  230. pass
  231. return False
  232. ############################################################
  233. # Parses the given HTML string and asks a FortuneHTMLParser
  234. # whether the parsed string is a valid fortune return.
  235. ############################################################
  236. def validateFortune(self, htmlString, out, err):
  237. try:
  238. parser = FortuneHTMLParser()
  239. parser.feed(htmlString)
  240. return parser.isValidFortune()
  241. except:
  242. pass
  243. return False
  244. ############################################################
  245. # Validates the jsonString is an array with a length of
  246. # 2, that each entry in the array is a JSON object, that
  247. # each object has an "id" and a "randomNumber" key, and that
  248. # both keys map to integers.
  249. ############################################################
  250. def validateUpdate(self, jsonString, out, err):
  251. try:
  252. arr = [{k.lower(): v for k,v in d.items()} for d in json.loads(jsonString)]
  253. if (type(float(arr[0]["id"])) == float and
  254. type(float(arr[0]["randomnumber"])) == float and
  255. type(float(arr[1]["id"])) == float and
  256. type(float(arr[1]["randomnumber"])) == float):
  257. return True
  258. except:
  259. pass
  260. return False
  261. ############################################################
  262. #
  263. ############################################################
  264. def validatePlaintext(self, jsonString, out, err):
  265. try:
  266. return jsonString.lower().strip() == "hello, world!"
  267. except:
  268. pass
  269. return False
  270. ############################################################
  271. # start(benchmarker)
  272. # Start the test using it's setup file
  273. ############################################################
  274. def start(self, out, err):
  275. # Load profile for this installation
  276. profile="%s/bash_profile.sh" % self.directory
  277. if not os.path.exists(profile):
  278. logging.warning("Framework %s does not have a bash_profile" % self.name)
  279. profile="$FWROOT/config/benchmark_profile"
  280. set_iroot="export IROOT=%s" % self.install_root
  281. setup_util.replace_environ(config=profile, command=set_iroot)
  282. return self.setup_module.start(self.benchmarker, out, err)
  283. ############################################################
  284. # End start
  285. ############################################################
  286. ############################################################
  287. # stop(benchmarker)
  288. # Stops the test using it's setup file
  289. ############################################################
  290. def stop(self, out, err):
  291. return self.setup_module.stop(out, err)
  292. ############################################################
  293. # End stop
  294. ############################################################
  295. ############################################################
  296. # verify_urls
  297. # Verifys each of the URLs for this test. THis will sinply
  298. # curl the URL and check for it's return status.
  299. # For each url, a flag will be set on this object for whether
  300. # or not it passed
  301. # Returns True if all verifications succeeded
  302. ############################################################
  303. def verify_urls(self, out, err):
  304. result = True
  305. # JSON
  306. if self.runTests[self.JSON]:
  307. out.write(textwrap.dedent("""
  308. -----------------------------------------------------
  309. VERIFYING JSON ({url})
  310. -----------------------------------------------------
  311. """.format(url = self.json_url)))
  312. out.flush()
  313. url = self.benchmarker.generate_url(self.json_url, self.port)
  314. output = self.__curl_url(url, self.JSON, out, err)
  315. out.write("VALIDATING JSON ... ")
  316. if self.validateJson(output, out, err):
  317. self.json_url_passed = True
  318. out.write("PASS\n\n")
  319. else:
  320. self.json_url_passed = False
  321. out.write("FAIL\n\n")
  322. result = False
  323. out.flush
  324. # DB
  325. if self.runTests[self.DB]:
  326. out.write(textwrap.dedent("""
  327. -----------------------------------------------------
  328. VERIFYING DB ({url})
  329. -----------------------------------------------------
  330. """.format(url = self.db_url)))
  331. out.flush()
  332. url = self.benchmarker.generate_url(self.db_url, self.port)
  333. output = self.__curl_url(url, self.DB, out, err)
  334. if self.validateDb(output, out, err):
  335. self.db_url_passed = True
  336. else:
  337. self.db_url_passed = False
  338. if self.validateDbStrict(output, out, err):
  339. self.db_url_warn = False
  340. else:
  341. self.db_url_warn = True
  342. out.write("VALIDATING DB ... ")
  343. if self.db_url_passed:
  344. out.write("PASS")
  345. if self.db_url_warn:
  346. out.write(" (with warnings)")
  347. out.write("\n\n")
  348. else:
  349. out.write("FAIL\n\n")
  350. result = False
  351. out.flush
  352. # Query
  353. if self.runTests[self.QUERY]:
  354. out.write(textwrap.dedent("""
  355. -----------------------------------------------------
  356. VERIFYING QUERY ({url})
  357. -----------------------------------------------------
  358. """.format(url=self.query_url+"2")))
  359. out.flush()
  360. url = self.benchmarker.generate_url(self.query_url + "2", self.port)
  361. output = self.__curl_url(url, self.QUERY, out, err)
  362. if self.validateQuery(output, out, err):
  363. self.query_url_passed = True
  364. out.write(self.query_url + "2 - PASS\n\n")
  365. else:
  366. self.query_url_passed = False
  367. out.write(self.query_url + "2 - FAIL\n\n")
  368. out.write("-----------------------------------------------------\n\n")
  369. out.flush()
  370. self.query_url_warn = False
  371. url2 = self.benchmarker.generate_url(self.query_url + "0", self.port)
  372. output2 = self.__curl_url(url2, self.QUERY, out, err)
  373. if not self.validateQueryOneOrLess(output2, out, err):
  374. self.query_url_warn = True
  375. out.write(self.query_url + "0 - WARNING\n\n")
  376. else:
  377. out.write(self.query_url + "0 - PASS\n\n")
  378. out.write("-----------------------------------------------------\n\n")
  379. out.flush()
  380. url3 = self.benchmarker.generate_url(self.query_url + "foo", self.port)
  381. output3 = self.__curl_url(url3, self.QUERY, out, err)
  382. if not self.validateQueryOneOrLess(output3, out, err):
  383. self.query_url_warn = True
  384. out.write(self.query_url + "foo - WARNING\n\n")
  385. else:
  386. out.write(self.query_url + "foo - PASS\n\n")
  387. out.write("-----------------------------------------------------\n\n")
  388. out.flush()
  389. url4 = self.benchmarker.generate_url(self.query_url + "501", self.port)
  390. output4 = self.__curl_url(url4, self.QUERY, out, err)
  391. if not self.validateQueryFiveHundredOrMore(output4, out, err):
  392. self.query_url_warn = True
  393. out.write(self.query_url + "501 - WARNING\n\n")
  394. else:
  395. out.write(self.query_url + "501 - PASS\n\n")
  396. out.write("-----------------------------------------------------\n\n\n")
  397. out.flush()
  398. out.write("VALIDATING QUERY ... ")
  399. if self.query_url_passed:
  400. out.write("PASS")
  401. if self.query_url_warn:
  402. out.write(" (with warnings)")
  403. out.write("\n\n")
  404. else:
  405. out.write("FAIL\n\n")
  406. result = False
  407. out.flush
  408. # Fortune
  409. if self.runTests[self.FORTUNE]:
  410. out.write(textwrap.dedent("""
  411. -----------------------------------------------------
  412. VERIFYING FORTUNE ({url})
  413. -----------------------------------------------------
  414. """.format(url = self.fortune_url)))
  415. out.flush()
  416. url = self.benchmarker.generate_url(self.fortune_url, self.port)
  417. output = self.__curl_url(url, self.FORTUNE, out, err)
  418. out.write("VALIDATING FORTUNE ... ")
  419. if self.validateFortune(output, out, err):
  420. self.fortune_url_passed = True
  421. out.write("PASS\n\n")
  422. else:
  423. self.fortune_url_passed = False
  424. out.write("FAIL\n\n")
  425. result = False
  426. out.flush
  427. # Update
  428. if self.runTests[self.UPDATE]:
  429. out.write(textwrap.dedent("""
  430. -----------------------------------------------------
  431. VERIFYING UPDATE ({url})
  432. -----------------------------------------------------
  433. """.format(url = self.update_url)))
  434. out.flush()
  435. url = self.benchmarker.generate_url(self.update_url + "2", self.port)
  436. output = self.__curl_url(url, self.UPDATE, out, err)
  437. out.write("VALIDATING UPDATE ... ")
  438. if self.validateUpdate(output, out, err):
  439. self.update_url_passed = True
  440. out.write("PASS\n\n")
  441. else:
  442. self.update_url_passed = False
  443. out.write("FAIL\n\n")
  444. result = False
  445. out.flush
  446. # plaintext
  447. if self.runTests[self.PLAINTEXT]:
  448. out.write(textwrap.dedent("""
  449. -----------------------------------------------------
  450. VERIFYING PLAINTEXT ({url})
  451. -----------------------------------------------------
  452. """.format(url = self.plaintext_url)))
  453. out.flush()
  454. url = self.benchmarker.generate_url(self.plaintext_url, self.port)
  455. output = self.__curl_url(url, self.PLAINTEXT, out, err)
  456. out.write("VALIDATING PLAINTEXT ... ")
  457. if self.validatePlaintext(output, out, err):
  458. self.plaintext_url_passed = True
  459. out.write("PASS\n\n")
  460. else:
  461. self.plaintext_url_passed = False
  462. out.write("FAIL\n\n")
  463. result = False
  464. out.flush
  465. return result
  466. ############################################################
  467. # End verify_urls
  468. ############################################################
  469. ############################################################
  470. # contains_type(type)
  471. # true if this test contains an implementation of the given
  472. # test type (json, db, etc.)
  473. ############################################################
  474. def contains_type(self, type):
  475. try:
  476. if type == self.JSON and self.json_url is not None:
  477. return True
  478. if type == self.DB and self.db_url is not None:
  479. return True
  480. if type == self.QUERY and self.query_url is not None:
  481. return True
  482. if type == self.FORTUNE and self.fortune_url is not None:
  483. return True
  484. if type == self.UPDATE and self.update_url is not None:
  485. return True
  486. if type == self.PLAINTEXT and self.plaintext_url is not None:
  487. return True
  488. except AttributeError:
  489. pass
  490. return False
  491. ############################################################
  492. # End stop
  493. ############################################################
  494. ############################################################
  495. # benchmark
  496. # Runs the benchmark for each type of test that it implements
  497. # JSON/DB/Query.
  498. ############################################################
  499. def benchmark(self, out, err):
  500. # JSON
  501. if self.runTests[self.JSON]:
  502. try:
  503. out.write("BENCHMARKING JSON ... ")
  504. out.flush()
  505. results = None
  506. output_file = self.benchmarker.output_file(self.name, self.JSON)
  507. if not os.path.exists(output_file):
  508. with open(output_file, 'w'):
  509. # Simply opening the file in write mode should create the empty file.
  510. pass
  511. if self.json_url_passed:
  512. remote_script = self.__generate_concurrency_script(self.json_url, self.port, self.accept_json)
  513. self.__begin_logging(self.JSON)
  514. self.__run_benchmark(remote_script, output_file, err)
  515. self.__end_logging()
  516. results = self.__parse_test(self.JSON)
  517. print results
  518. self.benchmarker.report_results(framework=self, test=self.JSON, results=results['results'])
  519. out.write( "Complete\n" )
  520. out.flush()
  521. except AttributeError:
  522. pass
  523. # DB
  524. if self.runTests[self.DB]:
  525. try:
  526. out.write("BENCHMARKING DB ... ")
  527. out.flush()
  528. results = None
  529. output_file = self.benchmarker.output_file(self.name, self.DB)
  530. warning_file = self.benchmarker.warning_file(self.name, self.DB)
  531. if not os.path.exists(output_file):
  532. with open(output_file, 'w'):
  533. # Simply opening the file in write mode should create the empty file.
  534. pass
  535. if self.db_url_warn:
  536. with open(warning_file, 'w'):
  537. pass
  538. if self.db_url_passed:
  539. remote_script = self.__generate_concurrency_script(self.db_url, self.port, self.accept_json)
  540. self.__begin_logging(self.DB)
  541. self.__run_benchmark(remote_script, output_file, err)
  542. self.__end_logging()
  543. results = self.__parse_test(self.DB)
  544. self.benchmarker.report_results(framework=self, test=self.DB, results=results['results'])
  545. out.write( "Complete\n" )
  546. except AttributeError:
  547. pass
  548. # Query
  549. if self.runTests[self.QUERY]:
  550. try:
  551. out.write("BENCHMARKING Query ... ")
  552. out.flush()
  553. results = None
  554. output_file = self.benchmarker.output_file(self.name, self.QUERY)
  555. warning_file = self.benchmarker.warning_file(self.name, self.QUERY)
  556. if not os.path.exists(output_file):
  557. with open(output_file, 'w'):
  558. # Simply opening the file in write mode should create the empty file.
  559. pass
  560. if self.query_url_warn:
  561. with open(warning_file, 'w'):
  562. pass
  563. if self.query_url_passed:
  564. remote_script = self.__generate_query_script(self.query_url, self.port, self.accept_json)
  565. self.__begin_logging(self.QUERY)
  566. self.__run_benchmark(remote_script, output_file, err)
  567. self.__end_logging()
  568. results = self.__parse_test(self.QUERY)
  569. self.benchmarker.report_results(framework=self, test=self.QUERY, results=results['results'])
  570. out.write( "Complete\n" )
  571. out.flush()
  572. except AttributeError:
  573. pass
  574. # fortune
  575. if self.runTests[self.FORTUNE]:
  576. try:
  577. out.write("BENCHMARKING Fortune ... ")
  578. out.flush()
  579. results = None
  580. output_file = self.benchmarker.output_file(self.name, self.FORTUNE)
  581. if not os.path.exists(output_file):
  582. with open(output_file, 'w'):
  583. # Simply opening the file in write mode should create the empty file.
  584. pass
  585. if self.fortune_url_passed:
  586. remote_script = self.__generate_concurrency_script(self.fortune_url, self.port, self.accept_html)
  587. self.__begin_logging(self.FORTUNE)
  588. self.__run_benchmark(remote_script, output_file, err)
  589. self.__end_logging()
  590. results = self.__parse_test(self.FORTUNE)
  591. self.benchmarker.report_results(framework=self, test=self.FORTUNE, results=results['results'])
  592. out.write( "Complete\n" )
  593. out.flush()
  594. except AttributeError:
  595. pass
  596. # update
  597. if self.runTests[self.UPDATE]:
  598. try:
  599. out.write("BENCHMARKING Update ... ")
  600. out.flush()
  601. results = None
  602. output_file = self.benchmarker.output_file(self.name, self.UPDATE)
  603. if not os.path.exists(output_file):
  604. with open(output_file, 'w'):
  605. # Simply opening the file in write mode should create the empty file.
  606. pass
  607. if self.update_url_passed:
  608. remote_script = self.__generate_query_script(self.update_url, self.port, self.accept_json)
  609. self.__begin_logging(self.UPDATE)
  610. self.__run_benchmark(remote_script, output_file, err)
  611. self.__end_logging()
  612. results = self.__parse_test(self.UPDATE)
  613. self.benchmarker.report_results(framework=self, test=self.UPDATE, results=results['results'])
  614. out.write( "Complete\n" )
  615. out.flush()
  616. except AttributeError:
  617. pass
  618. # plaintext
  619. if self.runTests[self.PLAINTEXT]:
  620. try:
  621. out.write("BENCHMARKING Plaintext ... ")
  622. out.flush()
  623. results = None
  624. output_file = self.benchmarker.output_file(self.name, self.PLAINTEXT)
  625. if not os.path.exists(output_file):
  626. with open(output_file, 'w'):
  627. # Simply opening the file in write mode should create the empty file.
  628. pass
  629. if self.plaintext_url_passed:
  630. remote_script = self.__generate_concurrency_script(self.plaintext_url, self.port, self.accept_plaintext, wrk_command="wrk", intervals=[256,1024,4096,16384], pipeline="16")
  631. self.__begin_logging(self.PLAINTEXT)
  632. self.__run_benchmark(remote_script, output_file, err)
  633. self.__end_logging()
  634. results = self.__parse_test(self.PLAINTEXT)
  635. self.benchmarker.report_results(framework=self, test=self.PLAINTEXT, results=results['results'])
  636. out.write( "Complete\n" )
  637. out.flush()
  638. except AttributeError:
  639. traceback.print_exc()
  640. pass
  641. ############################################################
  642. # End benchmark
  643. ############################################################
  644. ############################################################
  645. # parse_all
  646. # Method meant to be run for a given timestamp
  647. ############################################################
  648. def parse_all(self):
  649. # JSON
  650. if os.path.exists(self.benchmarker.get_output_file(self.name, self.JSON)):
  651. results = self.__parse_test(self.JSON)
  652. self.benchmarker.report_results(framework=self, test=self.JSON, results=results['results'])
  653. # DB
  654. if os.path.exists(self.benchmarker.get_output_file(self.name, self.DB)):
  655. results = self.__parse_test(self.DB)
  656. self.benchmarker.report_results(framework=self, test=self.DB, results=results['results'])
  657. # Query
  658. if os.path.exists(self.benchmarker.get_output_file(self.name, self.QUERY)):
  659. results = self.__parse_test(self.QUERY)
  660. self.benchmarker.report_results(framework=self, test=self.QUERY, results=results['results'])
  661. # Fortune
  662. if os.path.exists(self.benchmarker.get_output_file(self.name, self.FORTUNE)):
  663. results = self.__parse_test(self.FORTUNE)
  664. self.benchmarker.report_results(framework=self, test=self.FORTUNE, results=results['results'])
  665. # Update
  666. if os.path.exists(self.benchmarker.get_output_file(self.name, self.UPDATE)):
  667. results = self.__parse_test(self.UPDATE)
  668. self.benchmarker.report_results(framework=self, test=self.UPDATE, results=results['results'])
  669. # Plaintext
  670. if os.path.exists(self.benchmarker.get_output_file(self.name, self.PLAINTEXT)):
  671. results = self.__parse_test(self.PLAINTEXT)
  672. self.benchmarker.report_results(framework=self, test=self.PLAINTEXT, results=results['results'])
  673. ############################################################
  674. # End parse_all
  675. ############################################################
  676. ############################################################
  677. # __parse_test(test_type)
  678. ############################################################
  679. def __parse_test(self, test_type):
  680. try:
  681. results = dict()
  682. results['results'] = []
  683. if os.path.exists(self.benchmarker.get_output_file(self.name, test_type)):
  684. with open(self.benchmarker.output_file(self.name, test_type)) as raw_data:
  685. is_warmup = True
  686. rawData = None
  687. for line in raw_data:
  688. if "Queries:" in line or "Concurrency:" in line:
  689. is_warmup = False
  690. rawData = None
  691. continue
  692. if "Warmup" in line or "Primer" in line:
  693. is_warmup = True
  694. continue
  695. if not is_warmup:
  696. if rawData == None:
  697. rawData = dict()
  698. results['results'].append(rawData)
  699. #if "Requests/sec:" in line:
  700. # m = re.search("Requests/sec:\s+([0-9]+)", line)
  701. # rawData['reportedResults'] = m.group(1)
  702. # search for weighttp data such as succeeded and failed.
  703. if "Latency" in line:
  704. m = re.findall("([0-9]+\.*[0-9]*[us|ms|s|m|%]+)", line)
  705. if len(m) == 4:
  706. rawData['latencyAvg'] = m[0]
  707. rawData['latencyStdev'] = m[1]
  708. rawData['latencyMax'] = m[2]
  709. # rawData['latencyStdevPercent'] = m[3]
  710. #if "Req/Sec" in line:
  711. # m = re.findall("([0-9]+\.*[0-9]*[k|%]*)", line)
  712. # if len(m) == 4:
  713. # rawData['requestsAvg'] = m[0]
  714. # rawData['requestsStdev'] = m[1]
  715. # rawData['requestsMax'] = m[2]
  716. # rawData['requestsStdevPercent'] = m[3]
  717. #if "requests in" in line:
  718. # m = re.search("requests in ([0-9]+\.*[0-9]*[ms|s|m|h]+)", line)
  719. # if m != None:
  720. # # parse out the raw time, which may be in minutes or seconds
  721. # raw_time = m.group(1)
  722. # if "ms" in raw_time:
  723. # rawData['total_time'] = float(raw_time[:len(raw_time)-2]) / 1000.0
  724. # elif "s" in raw_time:
  725. # rawData['total_time'] = float(raw_time[:len(raw_time)-1])
  726. # elif "m" in raw_time:
  727. # rawData['total_time'] = float(raw_time[:len(raw_time)-1]) * 60.0
  728. # elif "h" in raw_time:
  729. # rawData['total_time'] = float(raw_time[:len(raw_time)-1]) * 3600.0
  730. if "requests in" in line:
  731. m = re.search("([0-9]+) requests in", line)
  732. if m != None:
  733. rawData['totalRequests'] = int(m.group(1))
  734. if "Socket errors" in line:
  735. if "connect" in line:
  736. m = re.search("connect ([0-9]+)", line)
  737. rawData['connect'] = int(m.group(1))
  738. if "read" in line:
  739. m = re.search("read ([0-9]+)", line)
  740. rawData['read'] = int(m.group(1))
  741. if "write" in line:
  742. m = re.search("write ([0-9]+)", line)
  743. rawData['write'] = int(m.group(1))
  744. if "timeout" in line:
  745. m = re.search("timeout ([0-9]+)", line)
  746. rawData['timeout'] = int(m.group(1))
  747. if "Non-2xx" in line:
  748. m = re.search("Non-2xx or 3xx responses: ([0-9]+)", line)
  749. if m != None:
  750. rawData['5xx'] = int(m.group(1))
  751. if "STARTTIME" in line:
  752. m = re.search("[0-9]+", line)
  753. rawData["startTime"] = int(m.group(0))
  754. if "ENDTIME" in line:
  755. m = re.search("[0-9]+", line)
  756. rawData["endTime"] = int(m.group(0))
  757. stats = self.__parse_stats(test_type, rawData["startTime"], rawData["endTime"], 1)
  758. with open(self.benchmarker.stats_file(self.name, test_type) + ".json", "w") as stats_file:
  759. json.dump(stats, stats_file)
  760. return results
  761. except IOError:
  762. return None
  763. ############################################################
  764. # End benchmark
  765. ############################################################
  766. ##########################################################################################
  767. # Private Methods
  768. ##########################################################################################
  769. ############################################################
  770. # __run_benchmark(script, output_file)
  771. # Runs a single benchmark using the script which is a bash
  772. # template that uses weighttp to run the test. All the results
  773. # outputed to the output_file.
  774. ############################################################
  775. def __run_benchmark(self, script, output_file, err):
  776. with open(output_file, 'w') as raw_file:
  777. p = subprocess.Popen(self.benchmarker.client_ssh_string.split(" "), stdin=subprocess.PIPE, stdout=raw_file, stderr=err)
  778. p.communicate(script)
  779. err.flush()
  780. ############################################################
  781. # End __run_benchmark
  782. ############################################################
  783. ############################################################
  784. # __generate_concurrency_script(url, port)
  785. # Generates the string containing the bash script that will
  786. # be run on the client to benchmark a single test. This
  787. # specifically works for the variable concurrency tests (JSON
  788. # and DB)
  789. ############################################################
  790. def __generate_concurrency_script(self, url, port, accept_header, wrk_command="wrk", intervals=[], pipeline=""):
  791. if len(intervals) == 0:
  792. intervals = self.benchmarker.concurrency_levels
  793. headers = self.__get_request_headers(accept_header)
  794. return self.concurrency_template.format(max_concurrency=self.benchmarker.max_concurrency,
  795. max_threads=self.benchmarker.max_threads, name=self.name, duration=self.benchmarker.duration,
  796. interval=" ".join("{}".format(item) for item in intervals),
  797. server_host=self.benchmarker.server_host, port=port, url=url, headers=headers, wrk=wrk_command,
  798. pipeline=pipeline)
  799. ############################################################
  800. # End __generate_concurrency_script
  801. ############################################################
  802. ############################################################
  803. # __generate_query_script(url, port)
  804. # Generates the string containing the bash script that will
  805. # be run on the client to benchmark a single test. This
  806. # specifically works for the variable query tests (Query)
  807. ############################################################
  808. def __generate_query_script(self, url, port, accept_header):
  809. headers = self.__get_request_headers(accept_header)
  810. return self.query_template.format(max_concurrency=self.benchmarker.max_concurrency,
  811. max_threads=self.benchmarker.max_threads, name=self.name, duration=self.benchmarker.duration,
  812. interval=" ".join("{}".format(item) for item in self.benchmarker.query_intervals),
  813. server_host=self.benchmarker.server_host, port=port, url=url, headers=headers)
  814. ############################################################
  815. # End __generate_query_script
  816. ############################################################
  817. ############################################################
  818. # __get_request_headers(accept_header)
  819. # Generates the complete HTTP header string
  820. ############################################################
  821. def __get_request_headers(self, accept_header):
  822. return self.headers_template.format(accept=accept_header)
  823. ############################################################
  824. # End __format_request_headers
  825. ############################################################
  826. ############################################################
  827. # __curl_url
  828. # Dump HTTP response and headers. Throw exception if there
  829. # is an HTTP error.
  830. ############################################################
  831. def __curl_url(self, url, testType, out, err):
  832. output = None
  833. try:
  834. # Use -m 15 to make curl stop trying after 15sec.
  835. # Use -i to output response with headers.
  836. # Don't use -f so that the HTTP response code is ignored.
  837. # Use --stderr - to redirect stderr to stdout so we get
  838. # error output for sure in stdout.
  839. # Use -sS to hide progress bar, but show errors.
  840. subprocess.check_call(["curl", "-m", "15", "-i", "-sS", url], stderr=err, stdout=out)
  841. # HTTP output may not end in a newline, so add that here.
  842. out.write( "\n\n" )
  843. out.flush()
  844. err.flush()
  845. # We need to get the respond body from the curl and return it.
  846. p = subprocess.Popen(["curl", "-m", "15", "-s", url], stdout=subprocess.PIPE)
  847. output = p.communicate()
  848. except:
  849. pass
  850. if output:
  851. # We have the response body - return it
  852. return output[0]
  853. ##############################################################
  854. # End __curl_url
  855. ##############################################################
  856. def requires_database(self):
  857. """Returns True/False if this test requires a database"""
  858. return (self.contains_type(self.FORTUNE) or
  859. self.contains_type(self.DB) or
  860. self.contains_type(self.QUERY) or
  861. self.contains_type(self.UPDATE))
  862. ############################################################
  863. # __begin_logging
  864. # Starts a thread to monitor the resource usage, to be synced with the client's time
  865. # TODO: MySQL and InnoDB are possible. Figure out how to implement them.
  866. ############################################################
  867. def __begin_logging(self, test_name):
  868. output_file = "{file_name}".format(file_name=self.benchmarker.get_stats_file(self.name, test_name))
  869. dstat_string = "dstat -afilmprsT --aio --fs --ipc --lock --raw --socket --tcp \
  870. --raw --socket --tcp --udp --unix --vm --disk-util \
  871. --rpc --rpcd --output {output_file}".format(output_file=output_file)
  872. cmd = shlex.split(dstat_string)
  873. dev_null = open(os.devnull, "w")
  874. self.subprocess_handle = subprocess.Popen(cmd, stdout=dev_null)
  875. ##############################################################
  876. # End __begin_logging
  877. ##############################################################
  878. ##############################################################
  879. # Begin __end_logging
  880. # Stops the logger thread and blocks until shutdown is complete.
  881. ##############################################################
  882. def __end_logging(self):
  883. self.subprocess_handle.terminate()
  884. self.subprocess_handle.communicate()
  885. ##############################################################
  886. # End __end_logging
  887. ##############################################################
  888. ##############################################################
  889. # Begin __parse_stats
  890. # For each test type, process all the statistics, and return a multi-layered dictionary
  891. # that has a structure as follows:
  892. # (timestamp)
  893. # | (main header) - group that the stat is in
  894. # | | (sub header) - title of the stat
  895. # | | | (stat) - the stat itself, usually a floating point number
  896. ##############################################################
  897. def __parse_stats(self, test_type, start_time, end_time, interval):
  898. stats_dict = dict()
  899. stats_file = self.benchmarker.stats_file(self.name, test_type)
  900. with open(stats_file) as stats:
  901. while(stats.next() != "\n"):
  902. pass
  903. stats_reader = csv.reader(stats)
  904. h1= stats_reader.next()
  905. h2 = stats_reader.next()
  906. time_row = h2.index("epoch")
  907. int_counter = 0
  908. for row in stats_reader:
  909. time = float(row[time_row])
  910. int_counter+=1
  911. if time < start_time:
  912. continue
  913. elif time > end_time:
  914. return stats_dict
  915. if int_counter % interval != 0:
  916. continue
  917. row_dict = dict()
  918. for nextheader in h1:
  919. if nextheader != "":
  920. row_dict[nextheader] = dict()
  921. header = ""
  922. for item_num in range(len(row)):
  923. if(len(h1[item_num]) != 0):
  924. header = h1[item_num]
  925. row_dict[header][h2[item_num]] = row[item_num]
  926. stats_dict[time] = row_dict
  927. return stats_dict
  928. ##############################################################
  929. # End __parse_stats
  930. ##############################################################
  931. ##########################################################################################
  932. # Constructor
  933. ##########################################################################################
  934. def __init__(self, name, directory, benchmarker, runTests, args):
  935. self.name = name
  936. self.directory = directory
  937. self.benchmarker = benchmarker
  938. self.runTests = runTests
  939. self.fwroot = benchmarker.fwroot
  940. # setup logging
  941. logging.basicConfig(stream=sys.stderr, level=logging.DEBUG)
  942. self.install_root="%s/%s" % (self.fwroot, "installs")
  943. if benchmarker.install_strategy is 'pertest':
  944. self.install_root="%s/pertest/%s" % (self.install_root, name)
  945. self.__dict__.update(args)
  946. # ensure directory has __init__.py file so that we can use it as a Python package
  947. if not os.path.exists(os.path.join(directory, "__init__.py")):
  948. open(os.path.join(directory, "__init__.py"), 'w').close()
  949. self.setup_module = setup_module = importlib.import_module(directory + '.' + self.setup_file)
  950. ############################################################
  951. # End __init__
  952. ############################################################
  953. ############################################################
  954. # End FrameworkTest
  955. ############################################################
  956. ##########################################################################################
  957. # Static methods
  958. ##########################################################################################
  959. ##############################################################
  960. # parse_config(config, directory, benchmarker)
  961. # parses a config file and returns a list of FrameworkTest
  962. # objects based on that config file.
  963. ##############################################################
  964. def parse_config(config, directory, benchmarker):
  965. tests = []
  966. # The config object can specify multiple tests, we neep to loop
  967. # over them and parse them out
  968. for test in config['tests']:
  969. for key, value in test.iteritems():
  970. test_name = config['framework']
  971. runTests = dict()
  972. runTests["json"] = (benchmarker.type == "all" or benchmarker.type == "json") and value.get("json_url", False)
  973. runTests["db"] = (benchmarker.type == "all" or benchmarker.type == "db") and value.get("db_url", False)
  974. runTests["query"] = (benchmarker.type == "all" or benchmarker.type == "query") and value.get("query_url", False)
  975. runTests["fortune"] = (benchmarker.type == "all" or benchmarker.type == "fortune") and value.get("fortune_url", False)
  976. runTests["update"] = (benchmarker.type == "all" or benchmarker.type == "update") and value.get("update_url", False)
  977. runTests["plaintext"] = (benchmarker.type == "all" or benchmarker.type == "plaintext") and value.get("plaintext_url", False)
  978. # if the test uses the 'defualt' keywork, then we don't
  979. # append anything to it's name. All configs should only have 1 default
  980. if key != 'default':
  981. # we need to use the key in the test_name
  982. test_name = test_name + "-" + key
  983. tests.append(FrameworkTest(test_name, directory, benchmarker, runTests, value))
  984. return tests
  985. ##############################################################
  986. # End parse_config
  987. ##############################################################