framework_test.py 58 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395
  1. from benchmark.fortune_html_parser import FortuneHTMLParser
  2. from setup.linux import setup_util
  3. from benchmark.test_types import *
  4. import importlib
  5. import os
  6. import subprocess
  7. import time
  8. import re
  9. from pprint import pprint
  10. import sys
  11. import traceback
  12. import json
  13. import logging
  14. import csv
  15. import shlex
  16. import math
  17. from threading import Thread
  18. from threading import Event
  19. from utils import header
  20. class FrameworkTest:
  21. headers_template = "-H 'Host: localhost' -H '{accept}' -H 'Connection: keep-alive'"
  22. # Used for test types that do not require a database -
  23. # These tests are run at multiple concurrency levels
  24. concurrency_template = """
  25. echo ""
  26. echo "---------------------------------------------------------"
  27. echo " Running Primer {name}"
  28. echo " {wrk} {headers} -d 5 -c 8 --timeout 8 -t 8 \"http://{server_host}:{port}{url}\""
  29. echo "---------------------------------------------------------"
  30. echo ""
  31. {wrk} {headers} -d 5 -c 8 --timeout 8 -t 8 "http://{server_host}:{port}{url}"
  32. sleep 5
  33. echo ""
  34. echo "---------------------------------------------------------"
  35. echo " Running Warmup {name}"
  36. echo " {wrk} {headers} -d {duration} -c {max_concurrency} --timeout {max_concurrency} -t {max_threads} \"http://{server_host}:{port}{url}\""
  37. echo "---------------------------------------------------------"
  38. echo ""
  39. {wrk} {headers} -d {duration} -c {max_concurrency} --timeout {max_concurrency} -t {max_threads} "http://{server_host}:{port}{url}"
  40. sleep 5
  41. echo ""
  42. echo "---------------------------------------------------------"
  43. echo " Synchronizing time"
  44. echo "---------------------------------------------------------"
  45. echo ""
  46. ntpdate -s pool.ntp.org
  47. for c in {interval}
  48. do
  49. echo ""
  50. echo "---------------------------------------------------------"
  51. echo " Concurrency: $c for {name}"
  52. echo " {wrk} {headers} -d {duration} -c $c --timeout $c -t $(($c>{max_threads}?{max_threads}:$c)) \"http://{server_host}:{port}{url}\" -s ~/pipeline.lua -- {pipeline}"
  53. echo "---------------------------------------------------------"
  54. echo ""
  55. STARTTIME=$(date +"%s")
  56. {wrk} {headers} -d {duration} -c $c --timeout $c -t "$(($c>{max_threads}?{max_threads}:$c))" http://{server_host}:{port}{url} -s ~/pipeline.lua -- {pipeline}
  57. echo "STARTTIME $STARTTIME"
  58. echo "ENDTIME $(date +"%s")"
  59. sleep 2
  60. done
  61. """
  62. # Used for test types that require a database -
  63. # These tests run at a static concurrency level and vary the size of
  64. # the query sent with each request
  65. query_template = """
  66. echo ""
  67. echo "---------------------------------------------------------"
  68. echo " Running Primer {name}"
  69. echo " wrk {headers} -d 5 -c 8 --timeout 8 -t 8 \"http://{server_host}:{port}{url}2\""
  70. echo "---------------------------------------------------------"
  71. echo ""
  72. wrk {headers} -d 5 -c 8 --timeout 8 -t 8 "http://{server_host}:{port}{url}2"
  73. sleep 5
  74. echo ""
  75. echo "---------------------------------------------------------"
  76. echo " Running Warmup {name}"
  77. echo " wrk {headers} -d {duration} -c {max_concurrency} --timeout {max_concurrency} -t {max_threads} \"http://{server_host}:{port}{url}2\""
  78. echo "---------------------------------------------------------"
  79. echo ""
  80. wrk {headers} -d {duration} -c {max_concurrency} --timeout {max_concurrency} -t {max_threads} "http://{server_host}:{port}{url}2"
  81. sleep 5
  82. echo ""
  83. echo "---------------------------------------------------------"
  84. echo " Synchronizing time"
  85. echo "---------------------------------------------------------"
  86. echo ""
  87. ntpdate -s pool.ntp.org
  88. for c in {interval}
  89. do
  90. echo ""
  91. echo "---------------------------------------------------------"
  92. echo " Queries: $c for {name}"
  93. echo " wrk {headers} -d {duration} -c {max_concurrency} --timeout {max_concurrency} -t {max_threads} \"http://{server_host}:{port}{url}$c\""
  94. echo "---------------------------------------------------------"
  95. echo ""
  96. STARTTIME=$(date +"%s")
  97. wrk {headers} -d {duration} -c {max_concurrency} --timeout {max_concurrency} -t {max_threads} "http://{server_host}:{port}{url}$c"
  98. echo "STARTTIME $STARTTIME"
  99. echo "ENDTIME $(date +"%s")"
  100. sleep 2
  101. done
  102. """
  103. ############################################################
  104. # Test Variables
  105. ############################################################
  106. JSON = "json"
  107. DB = "db"
  108. QUERY = "query"
  109. FORTUNE = "fortune"
  110. UPDATE = "update"
  111. PLAINTEXT = "plaintext"
  112. ##########################################################################################
  113. # Public Methods
  114. ##########################################################################################
  115. ############################################################
  116. # Validates the jsonString is a JSON object that has an "id"
  117. # and a "randomNumber" key, and that both keys map to
  118. # integers.
  119. ############################################################
  120. def validateDb(self, jsonString, out, err):
  121. err_str = ""
  122. if jsonString is None or len(jsonString) == 0:
  123. err_str += "Empty Response"
  124. return (False, err_str)
  125. try:
  126. obj = {k.lower(): v for k,v in json.loads(jsonString).iteritems()}
  127. # We are allowing the single-object array for the DB
  128. # test for now, but will likely remove this later.
  129. if type(obj) == list:
  130. obj = obj[0]
  131. if "id" not in obj or "randomnumber" not in obj:
  132. err_str += "Expected keys id and randomNumber to be in JSON string. "
  133. return (False, err_str)
  134. # This will error out of the value could not parsed to a
  135. # float (this will work with ints, but it will turn them
  136. # into their float equivalent; i.e. "123" => 123.0)
  137. id_ret_val = True
  138. try:
  139. if not isinstance(float(obj["id"]), float):
  140. id_ret_val=False
  141. except:
  142. id_ret_val=False
  143. if not id_ret_val:
  144. err_str += "Expected id to be type int or float, got '{rand}' ".format(rand=obj["randomnumber"])
  145. random_num_ret_val = True
  146. try:
  147. if not isinstance(float(obj["randomnumber"]), float):
  148. random_num_ret_val=False
  149. except:
  150. random_num_ret_val=False
  151. if not random_num_ret_val:
  152. err_str += "Expected id to be type int or float, got '{rand}' ".format(rand=obj["randomnumber"])
  153. except:
  154. err_str += "Got exception when trying to validate the db test: {exception}".format(exception=traceback.format_exc())
  155. return (True, ) if len(err_str) == 0 else (False, err_str)
  156. def validateDbStrict(self, jsonString, out, err):
  157. err_str = ""
  158. if jsonString is None or len(jsonString) == 0:
  159. err_str += "Empty Response "
  160. return (False, err_str)
  161. try:
  162. obj = {k.lower(): v for k,v in json.loads(jsonString).iteritems()}
  163. # This will error out of the value could not parsed to a
  164. # float (this will work with ints, but it will turn them
  165. # into their float equivalent; i.e. "123" => 123.0)
  166. id_ret_val = True
  167. try:
  168. if not isinstance(float(obj["id"]), float):
  169. id_ret_val=False
  170. except:
  171. id_ret_val=False
  172. if not id_ret_val:
  173. err_str += "Expected id to be type int or float, got '{rand}' ".format(rand=obj["randomnumber"])
  174. random_num_ret_val = True
  175. try:
  176. if not isinstance(float(obj["randomnumber"]), float):
  177. random_num_ret_val=False
  178. except:
  179. random_num_ret_val=False
  180. if not random_num_ret_val:
  181. err_str += "Expected id to be type int or float, got '{rand}' ".format(rand=obj["randomnumber"])
  182. return id_ret_val and random_num_ret_val
  183. except:
  184. err_str += "Got exception when trying to validate the db test: {exception}".format(exception=traceback.format_exc())
  185. return (True, ) if len(err_str) == 0 else (False, err_str)
  186. ############################################################
  187. # Validates the jsonString is an array with a length of
  188. # 2, that each entry in the array is a JSON object, that
  189. # each object has an "id" and a "randomNumber" key, and that
  190. # both keys map to integers.
  191. ############################################################
  192. def validateQuery(self, jsonString, out, err):
  193. err_str = ""
  194. if jsonString is None or len(jsonString) == 0:
  195. err_str += "Empty Response"
  196. return (False, err_str)
  197. try:
  198. arr = [{k.lower(): v for k,v in d.iteritems()} for d in json.loads(jsonString)]
  199. if len(arr) != 2:
  200. err_str += "Expected array of length 2. Got length {length}. ".format(length=len(arr))
  201. for obj in arr:
  202. id_ret_val = True
  203. random_num_ret_val = True
  204. if "id" not in obj or "randomnumber" not in obj:
  205. err_str += "Expected keys id and randomNumber to be in JSON string. "
  206. break
  207. try:
  208. if not isinstance(float(obj["id"]), float):
  209. id_ret_val=False
  210. except:
  211. id_ret_val=False
  212. if not id_ret_val:
  213. err_str += "Expected id to be type int or float, got '{rand}' ".format(rand=obj["randomnumber"])
  214. try:
  215. if not isinstance(float(obj["randomnumber"]), float):
  216. random_num_ret_val=False
  217. except:
  218. random_num_ret_val=False
  219. if not random_num_ret_val:
  220. err_str += "Expected randomNumber to be type int or float, got '{rand}' ".format(rand=obj["randomnumber"])
  221. except:
  222. err_str += "Got exception when trying to validate the query test: {exception}".format(exception=traceback.format_exc())
  223. return (True, ) if len(err_str) == 0 else (False, err_str)
  224. ############################################################
  225. # Validates the jsonString is an array with a length of
  226. # 1, that each entry in the array is a JSON object, that
  227. # each object has an "id" and a "randomNumber" key, and that
  228. # both keys map to integers.
  229. ############################################################
  230. def validateQueryOneOrLess(self, jsonString, out, err):
  231. err_str = ""
  232. if jsonString is None or len(jsonString) == 0:
  233. err_str += "Empty Response"
  234. else:
  235. try:
  236. json_load = json.loads(jsonString)
  237. if not isinstance(json_load, list):
  238. err_str += "Expected JSON array, got {typeObj}. ".format(typeObj=type(json_load))
  239. if len(json_load) != 1:
  240. err_str += "Expected array of length 1. Got length {length}. ".format(length=len(json_load))
  241. obj = {k.lower(): v for k,v in json_load[0].iteritems()}
  242. id_ret_val = True
  243. random_num_ret_val = True
  244. if "id" not in obj or "randomnumber" not in obj:
  245. err_str += "Expected keys id and randomNumber to be in JSON string. "
  246. try:
  247. if not isinstance(float(obj["id"]), float):
  248. id_ret_val=False
  249. except:
  250. id_ret_val=False
  251. if not id_ret_val:
  252. err_str += "Expected id to be type int or float, got '{rand}'. ".format(rand=obj["randomnumber"])
  253. try:
  254. if not isinstance(float(obj["randomnumber"]), float):
  255. random_num_ret_val=False
  256. except:
  257. random_num_ret_val=False
  258. if not random_num_ret_val:
  259. err_str += "Expected randomNumber to be type int or float, got '{rand}'. ".format(rand=obj["randomnumber"])
  260. except:
  261. err_str += "Got exception when trying to validate the query test: {exception} ".format(exception=traceback.format_exc())
  262. return (True, ) if len(err_str) == 0 else (False, err_str)
  263. ############################################################
  264. # Validates the jsonString is an array with a length of
  265. # 500, that each entry in the array is a JSON object, that
  266. # each object has an "id" and a "randomNumber" key, and that
  267. # both keys map to integers.
  268. ############################################################
  269. def validateQueryFiveHundredOrMore(self, jsonString, out, err):
  270. err_str = ""
  271. if jsonString is None or len(jsonString) == 0:
  272. err_str += "Empty Response"
  273. return (False, err_str)
  274. try:
  275. arr = [{k.lower(): v for k,v in d.iteritems()} for d in json.loads(jsonString)]
  276. if len(arr) != 500:
  277. err_str += "Expected array of length 500. Got length {length}. ".format(length=len(arr))
  278. return (False, err_str)
  279. for obj in arr:
  280. id_ret_val = True
  281. random_num_ret_val = True
  282. if "id" not in obj or "randomnumber" not in obj:
  283. err_str += "Expected keys id and randomNumber to be in JSON string. "
  284. break
  285. try:
  286. if not isinstance(float(obj["id"]), float):
  287. id_ret_val=False
  288. except:
  289. id_ret_val=False
  290. if not id_ret_val:
  291. err_str += "Expected id to be type int or float, got '{rand}'. ".format(rand=obj["randomnumber"])
  292. try:
  293. if not isinstance(float(obj["randomnumber"]), float):
  294. random_num_ret_val=False
  295. except:
  296. random_num_ret_val=False
  297. if not random_num_ret_val:
  298. err_str += "Expected randomNumber to be type int or float, got '{rand}'. ".format(rand=obj["randomnumber"])
  299. except:
  300. err_str += "Got exception when trying to validate the query test: {exception} ".format(exception=traceback.format_exc())
  301. return (True, ) if len(err_str) == 0 else (False, err_str)
  302. ############################################################
  303. # Parses the given HTML string and asks a FortuneHTMLParser
  304. # whether the parsed string is a valid fortune return.
  305. ############################################################
  306. def validateFortune(self, htmlString, out, err):
  307. err_str = ""
  308. if htmlString is None or len(htmlString) == 0:
  309. err_str += "Empty Response"
  310. return (False, err_str)
  311. try:
  312. parser = FortuneHTMLParser()
  313. parser.feed(htmlString)
  314. valid = parser.isValidFortune(out)
  315. return (valid, '' if valid else 'Did not pass validation')
  316. except:
  317. print "Got exception when trying to validate the fortune test: {exception} ".format(exception=traceback.format_exc())
  318. return (False, err_str)
  319. ############################################################
  320. # Validates the jsonString is an array with a length of
  321. # 2, that each entry in the array is a JSON object, that
  322. # each object has an "id" and a "randomNumber" key, and that
  323. # both keys map to integers.
  324. ############################################################
  325. def validateUpdate(self, jsonString, out, err):
  326. err_str = ""
  327. if jsonString is None or len(jsonString) == 0:
  328. err_str += "Empty Response"
  329. return (False, err_str)
  330. try:
  331. arr = [{k.lower(): v for k,v in d.iteritems()} for d in json.loads(jsonString)]
  332. if len(arr) != 2:
  333. err_str += "Expected array of length 2. Got length {length}.\n".format(length=len(arr))
  334. for obj in arr:
  335. id_ret_val = True
  336. random_num_ret_val = True
  337. if "id" not in obj or "randomnumber" not in obj:
  338. err_str += "Expected keys id and randomNumber to be in JSON string.\n"
  339. return (False, err_str)
  340. try:
  341. if not isinstance(float(obj["id"]), float):
  342. id_ret_val=False
  343. except:
  344. id_ret_val=False
  345. if not id_ret_val:
  346. err_str += "Expected id to be type int or float, got '{rand}'.\n".format(rand=obj["randomnumber"])
  347. try:
  348. if not isinstance(float(obj["randomnumber"]), float):
  349. random_num_ret_val=False
  350. except:
  351. random_num_ret_val=False
  352. if not random_num_ret_val:
  353. err_str += "Expected randomNumber to be type int or float, got '{rand}'.\n".format(rand=obj["randomnumber"])
  354. except:
  355. err_str += "Got exception when trying to validate the update test: {exception}\n".format(exception=traceback.format_exc())
  356. return (True, ) if len(err_str) == 0 else (False, err_str)
  357. ############################################################
  358. # start(benchmarker)
  359. # Start the test using it's setup file
  360. ############################################################
  361. def start(self, out, err):
  362. # Load profile for this installation
  363. profile="%s/bash_profile.sh" % self.directory
  364. if not os.path.exists(profile):
  365. logging.warning("Directory %s does not have a bash_profile.sh" % self.directory)
  366. profile="$FWROOT/config/benchmark_profile"
  367. # Setup variables for TROOT and IROOT
  368. setup_util.replace_environ(config=profile,
  369. command='export TROOT=%s && export IROOT=%s' %
  370. (self.directory, self.install_root))
  371. # Because start can take so long, we print a dot to let the user know
  372. # we are working
  373. class ProgressPrinterThread(Thread):
  374. def __init__(self, event):
  375. Thread.__init__(self)
  376. self.stopped = event
  377. def run(self):
  378. while not self.stopped.wait(20):
  379. sys.stderr.write("Waiting for start to return...\n")
  380. stopFlag = Event()
  381. thread = ProgressPrinterThread(stopFlag)
  382. thread.start()
  383. # Run the module start (inside parent of TROOT)
  384. # - we use the parent as a historical accident - a lot of tests
  385. # use subprocess's cwd argument already
  386. previousDir = os.getcwd()
  387. os.chdir(os.path.dirname(self.troot))
  388. logging.info("Running setup module start (cwd=%s)", os.path.dirname(self.troot))
  389. try:
  390. retcode = self.setup_module.start(self, out, err)
  391. if retcode == None:
  392. retcode = 0
  393. except Exception:
  394. retcode = 1
  395. st = traceback.format_exc()
  396. st = '\n'.join((4 * ' ') + x for x in st.splitlines())
  397. st = "Start exception:\n%s" % st
  398. logging.info(st)
  399. err.write(st + '\n')
  400. os.chdir(previousDir)
  401. # Stop the progress printer
  402. stopFlag.set()
  403. logging.info("Start completed, running %s", self.benchmarker.mode)
  404. return retcode
  405. ############################################################
  406. # End start
  407. ############################################################
  408. ############################################################
  409. # stop(benchmarker)
  410. # Stops the test using it's setup file
  411. ############################################################
  412. def stop(self, out, err):
  413. # Load profile for this installation
  414. profile="%s/bash_profile.sh" % self.directory
  415. if not os.path.exists(profile):
  416. logging.warning("Directory %s does not have a bash_profile.sh" % self.directory)
  417. profile="$FWROOT/config/benchmark_profile"
  418. setup_util.replace_environ(config=profile,
  419. command='export TROOT=%s && export IROOT=%s' %
  420. (self.directory, self.install_root))
  421. # Run the module stop (inside parent of TROOT)
  422. # - we use the parent as a historical accident - a lot of tests
  423. # use subprocess's cwd argument already
  424. previousDir = os.getcwd()
  425. os.chdir(os.path.dirname(self.troot))
  426. logging.info("Running setup module stop (cwd=%s)", os.path.dirname(self.troot))
  427. try:
  428. retcode = self.setup_module.stop(out, err)
  429. if retcode == None:
  430. retcode = 0
  431. except Exception:
  432. retcode = 1
  433. st = traceback.format_exc()
  434. st = '\n'.join((4 * ' ') + x for x in st.splitlines())
  435. st = "Stop exception:\n%s\n" % st
  436. logging.info(st)
  437. err.write(st + '\n')
  438. os.chdir(previousDir)
  439. # Give processes sent a SIGTERM a moment to shut down gracefully
  440. time.sleep(5)
  441. return retcode
  442. ############################################################
  443. # End stop
  444. ############################################################
  445. ############################################################
  446. # verify_urls
  447. # Verifys each of the URLs for this test. THis will sinply
  448. # curl the URL and check for it's return status.
  449. # For each url, a flag will be set on this object for whether
  450. # or not it passed
  451. # Returns True if all verifications succeeded
  452. ############################################################
  453. def verify_urls(self, out, err):
  454. result = True
  455. def verify_type(test_type):
  456. test = self.runTests[test_type]
  457. out.write(header("VERIFYING %s" % test_type.upper()))
  458. base_url = "http://%s:%s" % (self.benchmarker.server_host, self.port)
  459. results = test.verify(base_url)
  460. test.failed = any(result is 'fail' for (result, reason, url) in results)
  461. test.warned = any(result is 'warn' for (result, reason, url) in results)
  462. test.passed = all(result is 'pass' for (result, reason, url) in results)
  463. def output_result(result, reason, url):
  464. out.write(" %s for %s\n" % (result.upper(), url))
  465. print " %s for %s" % (result.upper(), url)
  466. if reason is not None and len(reason) != 0:
  467. for line in reason.splitlines():
  468. out.write(" " + line + '\n')
  469. print " " + line
  470. [output_result(r1,r2,url) for (r1, r2, url) in results]
  471. if test.failed:
  472. self.benchmarker.report_verify_results(self, test_type, 'fail')
  473. elif test.warned:
  474. self.benchmarker.report_verify_results(self, test_type, 'warn')
  475. elif test.passed:
  476. self.benchmarker.report_verify_results(self, test_type, 'pass')
  477. else:
  478. raise Exception("What the hell")
  479. # JSON
  480. if self.runTests[self.JSON]:
  481. out.write(header("VERIFYING JSON (%s)" % self.json_url))
  482. out.flush()
  483. url = self.benchmarker.generate_url(self.json_url, self.port)
  484. output = self.__curl_url(url, self.JSON, out, err)
  485. out.write("VALIDATING JSON ... ")
  486. ret_tuple = self.validateJson(output, out, err)
  487. if ret_tuple[0]:
  488. self.json_url_passed = True
  489. out.write("PASS\n\n")
  490. self.benchmarker.report_verify_results(self, self.JSON, 'pass')
  491. else:
  492. self.json_url_passed = False
  493. out.write("\nFAIL" + ret_tuple[1] + "\n\n")
  494. self.benchmarker.report_verify_results(self, self.JSON, 'fail')
  495. result = False
  496. out.flush()
  497. # DB
  498. if self.runTests[self.DB]:
  499. out.write(header("VERIFYING DB (%s)" % self.db_url))
  500. out.flush()
  501. url = self.benchmarker.generate_url(self.db_url, self.port)
  502. output = self.__curl_url(url, self.DB, out, err)
  503. validate_ret_tuple = self.validateDb(output, out, err)
  504. validate_strict_ret_tuple = self.validateDbStrict(output, out, err)
  505. if validate_ret_tuple[0]:
  506. self.db_url_passed = True
  507. else:
  508. self.db_url_passed = False
  509. if validate_strict_ret_tuple:
  510. self.db_url_warn = False
  511. else:
  512. self.db_url_warn = True
  513. out.write("VALIDATING DB ... ")
  514. if self.db_url_passed:
  515. out.write("PASS")
  516. self.benchmarker.report_verify_results(self, self.DB, 'pass')
  517. if self.db_url_warn:
  518. out.write(" (with warnings) " + validate_strict_ret_tuple[1])
  519. self.benchmarker.report_verify_results(self, self.DB, 'warn')
  520. out.write("\n\n")
  521. else:
  522. self.benchmarker.report_verify_results(self, self.DB, 'fail')
  523. out.write("\nFAIL" + validate_ret_tuple[1])
  524. result = False
  525. out.flush()
  526. # Query
  527. if self.runTests[self.QUERY]:
  528. out.write(header("VERIFYING QUERY (%s)" % self.query_url+"2"))
  529. out.flush()
  530. url = self.benchmarker.generate_url(self.query_url + "2", self.port)
  531. output = self.__curl_url(url, self.QUERY, out, err)
  532. ret_tuple = self.validateQuery(output, out, err)
  533. if ret_tuple[0]:
  534. self.query_url_passed = True
  535. out.write(self.query_url + "2 - PASS\n\n")
  536. else:
  537. self.query_url_passed = False
  538. out.write(self.query_url + "2 - FAIL " + ret_tuple[1] + "\n\n")
  539. out.write("-----------------------------------------------------\n\n")
  540. out.flush()
  541. self.query_url_warn = False
  542. url2 = self.benchmarker.generate_url(self.query_url + "0", self.port)
  543. output2 = self.__curl_url(url2, self.QUERY, out, err)
  544. ret_tuple = self.validateQueryOneOrLess(output2, out, err)
  545. if not ret_tuple[0]:
  546. self.query_url_warn = True
  547. out.write(self.query_url + "0 - WARNING " + ret_tuple[1] + "\n\n")
  548. else:
  549. out.write(self.query_url + "0 - PASS\n\n")
  550. out.write("-----------------------------------------------------\n\n")
  551. out.flush()
  552. url3 = self.benchmarker.generate_url(self.query_url + "foo", self.port)
  553. output3 = self.__curl_url(url3, self.QUERY, out, err)
  554. ret_tuple = self.validateQueryOneOrLess(output3, out, err)
  555. if not ret_tuple[0]:
  556. self.query_url_warn = True
  557. out.write(self.query_url + "foo - WARNING " + ret_tuple[1] + "\n\n")
  558. else:
  559. out.write(self.query_url + "foo - PASS\n\n")
  560. out.write("-----------------------------------------------------\n\n")
  561. out.flush()
  562. url4 = self.benchmarker.generate_url(self.query_url + "501", self.port)
  563. output4 = self.__curl_url(url4, self.QUERY, out, err)
  564. ret_tuple = self.validateQueryFiveHundredOrMore(output4, out, err)
  565. if not ret_tuple[0]:
  566. self.query_url_warn = True
  567. out.write(self.query_url + "501 - WARNING " + ret_tuple[1] + "\n\n")
  568. else:
  569. out.write(self.query_url + "501 - PASS\n\n")
  570. out.write("-----------------------------------------------------\n\n\n")
  571. out.flush()
  572. out.write("VALIDATING QUERY ... ")
  573. if self.query_url_passed:
  574. out.write("PASS")
  575. self.benchmarker.report_verify_results(self, self.QUERY, 'pass')
  576. if self.query_url_warn:
  577. out.write(" (with warnings)")
  578. self.benchmarker.report_verify_results(self, self.QUERY, 'warn')
  579. out.write("\n\n")
  580. else:
  581. out.write("\nFAIL " + ret_tuple[1] + "\n\n")
  582. self.benchmarker.report_verify_results(self, self.QUERY, 'fail')
  583. result = False
  584. out.flush()
  585. # Fortune
  586. if self.runTests[self.FORTUNE]:
  587. out.write(header("VERIFYING FORTUNE (%s)" % self.fortune_url))
  588. out.flush()
  589. url = self.benchmarker.generate_url(self.fortune_url, self.port)
  590. output = self.__curl_url(url, self.FORTUNE, out, err)
  591. out.write("VALIDATING FORTUNE ... ")
  592. ret_tuple = self.validateFortune(output, out, err)
  593. if ret_tuple[0]:
  594. self.fortune_url_passed = True
  595. out.write("PASS\n\n")
  596. self.benchmarker.report_verify_results(self, self.FORTUNE, 'pass')
  597. else:
  598. self.fortune_url_passed = False
  599. out.write("\nFAIL " + ret_tuple[1] + "\n\n")
  600. self.benchmarker.report_verify_results(self, self.FORTUNE, 'fail')
  601. result = False
  602. out.flush()
  603. # Update
  604. if self.runTests[self.UPDATE]:
  605. out.write(header("VERIFYING UPDATE (%s)" % self.update_url))
  606. out.flush()
  607. url = self.benchmarker.generate_url(self.update_url + "2", self.port)
  608. output = self.__curl_url(url, self.UPDATE, out, err)
  609. out.write("VALIDATING UPDATE ... ")
  610. ret_tuple = self.validateUpdate(output, out, err)
  611. if ret_tuple[0]:
  612. self.update_url_passed = True
  613. out.write("PASS\n\n")
  614. self.benchmarker.report_verify_results(self, self.UPDATE, 'pass')
  615. else:
  616. self.update_url_passed = False
  617. out.write("\nFAIL " + ret_tuple[1] + "\n\n")
  618. self.benchmarker.report_verify_results(self, self.UPDATE, 'fail')
  619. result = False
  620. out.flush()
  621. # plaintext
  622. if self.runTests[self.PLAINTEXT]:
  623. out.write(header("VERIFYING PLAINTEXT (%s)" % self.plaintext_url))
  624. out.flush()
  625. url = self.benchmarker.generate_url(self.plaintext_url, self.port)
  626. output = self.__curl_url(url, self.PLAINTEXT, out, err)
  627. out.write("VALIDATING PLAINTEXT ... ")
  628. ret_tuple = self.validatePlaintext(output, out, err)
  629. if ret_tuple[0]:
  630. self.plaintext_url_passed = True
  631. out.write("PASS\n\n")
  632. self.benchmarker.report_verify_results(self, self.PLAINTEXT, 'pass')
  633. else:
  634. self.plaintext_url_passed = False
  635. out.write("\nFAIL\n\n" + ret_tuple[1] + "\n\n")
  636. self.benchmarker.report_verify_results(self, self.PLAINTEXT, 'fail')
  637. result = False
  638. out.flush()
  639. return result
  640. ############################################################
  641. # End verify_urls
  642. ############################################################
  643. ############################################################
  644. # benchmark
  645. # Runs the benchmark for each type of test that it implements
  646. # JSON/DB/Query.
  647. ############################################################
  648. def benchmark(self, out, err):
  649. def benchmark_type(test_type):
  650. out.write("BENCHMARKING %s ... " % test_type.upper())
  651. test = self.runTests[test_type]
  652. output_file = self.benchmarker.output_file(self.name, test_type)
  653. if not os.path.exists(output_file):
  654. # Open to create the empty file
  655. with open(output_file, 'w'):
  656. pass
  657. if test.passed:
  658. if test.requires_db:
  659. remote_script = self.__generate_query_script(test.get_url(), self.port, test.accept_header)
  660. else:
  661. remote_script = self.__generate_concurrency_script(test.get_url(), self.port, test.accept_header)
  662. self.__begin_logging(test_type)
  663. self.__run_benchmark(remote_script, output_file, err)
  664. self.__end_logging()
  665. results = self.__parse_test(test_type)
  666. print "Benchmark results:"
  667. pprint(results)
  668. self.benchmarker.report_benchmark_results(framework=self, test=test_type, results=results['results'])
  669. out.write( "Complete\n" )
  670. out.flush()
  671. # JSON
  672. if self.runTests[self.JSON]:
  673. try:
  674. out.write("BENCHMARKING JSON ... ")
  675. out.flush()
  676. results = None
  677. output_file = self.benchmarker.output_file(self.name, self.JSON)
  678. if not os.path.exists(output_file):
  679. with open(output_file, 'w'):
  680. # Simply opening the file in write mode should create the empty file.
  681. pass
  682. if self.json_url_passed:
  683. remote_script = self.__generate_concurrency_script(self.json_url, self.port, self.accept_json)
  684. self.__begin_logging(self.JSON)
  685. self.__run_benchmark(remote_script, output_file, err)
  686. self.__end_logging()
  687. results = self.__parse_test(self.JSON)
  688. print results
  689. self.benchmarker.report_benchmark_results(framework=self, test=self.JSON, results=results['results'])
  690. out.write( "Complete\n" )
  691. out.flush()
  692. except AttributeError:
  693. pass
  694. # DB
  695. if self.runTests[self.DB]:
  696. try:
  697. out.write("BENCHMARKING DB ... ")
  698. out.flush()
  699. results = None
  700. output_file = self.benchmarker.output_file(self.name, self.DB)
  701. if not os.path.exists(output_file):
  702. with open(output_file, 'w'):
  703. # Simply opening the file in write mode should create the empty file.
  704. pass
  705. if self.db_url_passed:
  706. self.benchmarker.report_verify_results(self, self.DB, 'pass')
  707. remote_script = self.__generate_concurrency_script(self.db_url, self.port, self.accept_json)
  708. self.__begin_logging(self.DB)
  709. self.__run_benchmark(remote_script, output_file, err)
  710. self.__end_logging()
  711. results = self.__parse_test(self.DB)
  712. self.benchmarker.report_benchmark_results(framework=self, test=self.DB, results=results['results'])
  713. out.write( "Complete\n" )
  714. except AttributeError:
  715. pass
  716. # Query
  717. if self.runTests[self.QUERY]:
  718. try:
  719. out.write("BENCHMARKING Query ... ")
  720. out.flush()
  721. results = None
  722. output_file = self.benchmarker.output_file(self.name, self.QUERY)
  723. if not os.path.exists(output_file):
  724. with open(output_file, 'w'):
  725. # Simply opening the file in write mode should create the empty file.
  726. pass
  727. if self.query_url_passed:
  728. remote_script = self.__generate_query_script(self.query_url, self.port, self.accept_json)
  729. self.__begin_logging(self.QUERY)
  730. self.__run_benchmark(remote_script, output_file, err)
  731. self.__end_logging()
  732. results = self.__parse_test(self.QUERY)
  733. self.benchmarker.report_benchmark_results(framework=self, test=self.QUERY, results=results['results'])
  734. out.write( "Complete\n" )
  735. out.flush()
  736. except AttributeError:
  737. pass
  738. # fortune
  739. if self.runTests[self.FORTUNE]:
  740. try:
  741. out.write("BENCHMARKING Fortune ... ")
  742. out.flush()
  743. results = None
  744. output_file = self.benchmarker.output_file(self.name, self.FORTUNE)
  745. if not os.path.exists(output_file):
  746. with open(output_file, 'w'):
  747. # Simply opening the file in write mode should create the empty file.
  748. pass
  749. if self.fortune_url_passed:
  750. remote_script = self.__generate_concurrency_script(self.fortune_url, self.port, self.accept_html)
  751. self.__begin_logging(self.FORTUNE)
  752. self.__run_benchmark(remote_script, output_file, err)
  753. self.__end_logging()
  754. results = self.__parse_test(self.FORTUNE)
  755. self.benchmarker.report_benchmark_results(framework=self, test=self.FORTUNE, results=results['results'])
  756. out.write( "Complete\n" )
  757. out.flush()
  758. except AttributeError:
  759. pass
  760. # update
  761. if self.runTests[self.UPDATE]:
  762. try:
  763. out.write("BENCHMARKING Update ... ")
  764. out.flush()
  765. results = None
  766. output_file = self.benchmarker.output_file(self.name, self.UPDATE)
  767. if not os.path.exists(output_file):
  768. with open(output_file, 'w'):
  769. # Simply opening the file in write mode should create the empty file.
  770. pass
  771. if self.update_url_passed:
  772. remote_script = self.__generate_query_script(self.update_url, self.port, self.accept_json)
  773. self.__begin_logging(self.UPDATE)
  774. self.__run_benchmark(remote_script, output_file, err)
  775. self.__end_logging()
  776. results = self.__parse_test(self.UPDATE)
  777. self.benchmarker.report_benchmark_results(framework=self, test=self.UPDATE, results=results['results'])
  778. out.write( "Complete\n" )
  779. out.flush()
  780. except AttributeError:
  781. pass
  782. # plaintext
  783. if self.runTests[self.PLAINTEXT]:
  784. try:
  785. out.write("BENCHMARKING Plaintext ... ")
  786. out.flush()
  787. results = None
  788. output_file = self.benchmarker.output_file(self.name, self.PLAINTEXT)
  789. if not os.path.exists(output_file):
  790. with open(output_file, 'w'):
  791. # Simply opening the file in write mode should create the empty file.
  792. pass
  793. if self.plaintext_url_passed:
  794. remote_script = self.__generate_concurrency_script(self.plaintext_url, self.port, self.accept_plaintext, wrk_command="wrk", intervals=[256,1024,4096,16384], pipeline="16")
  795. self.__begin_logging(self.PLAINTEXT)
  796. self.__run_benchmark(remote_script, output_file, err)
  797. self.__end_logging()
  798. results = self.__parse_test(self.PLAINTEXT)
  799. self.benchmarker.report_benchmark_results(framework=self, test=self.PLAINTEXT, results=results['results'])
  800. out.write( "Complete\n" )
  801. out.flush()
  802. except AttributeError:
  803. traceback.print_exc()
  804. pass
  805. ############################################################
  806. # End benchmark
  807. ############################################################
  808. ############################################################
  809. # parse_all
  810. # Method meant to be run for a given timestamp
  811. ############################################################
  812. def parse_all(self):
  813. # JSON
  814. if os.path.exists(self.benchmarker.get_output_file(self.name, self.JSON)):
  815. results = self.__parse_test(self.JSON)
  816. self.benchmarker.report_benchmark_results(framework=self, test=self.JSON, results=results['results'])
  817. # DB
  818. if os.path.exists(self.benchmarker.get_output_file(self.name, self.DB)):
  819. results = self.__parse_test(self.DB)
  820. self.benchmarker.report_benchmark_results(framework=self, test=self.DB, results=results['results'])
  821. # Query
  822. if os.path.exists(self.benchmarker.get_output_file(self.name, self.QUERY)):
  823. results = self.__parse_test(self.QUERY)
  824. self.benchmarker.report_benchmark_results(framework=self, test=self.QUERY, results=results['results'])
  825. # Fortune
  826. if os.path.exists(self.benchmarker.get_output_file(self.name, self.FORTUNE)):
  827. results = self.__parse_test(self.FORTUNE)
  828. self.benchmarker.report_benchmark_results(framework=self, test=self.FORTUNE, results=results['results'])
  829. # Update
  830. if os.path.exists(self.benchmarker.get_output_file(self.name, self.UPDATE)):
  831. results = self.__parse_test(self.UPDATE)
  832. self.benchmarker.report_benchmark_results(framework=self, test=self.UPDATE, results=results['results'])
  833. # Plaintext
  834. if os.path.exists(self.benchmarker.get_output_file(self.name, self.PLAINTEXT)):
  835. results = self.__parse_test(self.PLAINTEXT)
  836. self.benchmarker.report_benchmark_results(framework=self, test=self.PLAINTEXT, results=results['results'])
  837. ############################################################
  838. # End parse_all
  839. ############################################################
  840. ############################################################
  841. # __parse_test(test_type)
  842. ############################################################
  843. def __parse_test(self, test_type):
  844. try:
  845. results = dict()
  846. results['results'] = []
  847. stats = []
  848. if os.path.exists(self.benchmarker.get_output_file(self.name, test_type)):
  849. with open(self.benchmarker.output_file(self.name, test_type)) as raw_data:
  850. is_warmup = True
  851. rawData = None
  852. for line in raw_data:
  853. if "Queries:" in line or "Concurrency:" in line:
  854. is_warmup = False
  855. rawData = None
  856. continue
  857. if "Warmup" in line or "Primer" in line:
  858. is_warmup = True
  859. continue
  860. if not is_warmup:
  861. if rawData == None:
  862. rawData = dict()
  863. results['results'].append(rawData)
  864. #if "Requests/sec:" in line:
  865. # m = re.search("Requests/sec:\s+([0-9]+)", line)
  866. # rawData['reportedResults'] = m.group(1)
  867. # search for weighttp data such as succeeded and failed.
  868. if "Latency" in line:
  869. m = re.findall("([0-9]+\.*[0-9]*[us|ms|s|m|%]+)", line)
  870. if len(m) == 4:
  871. rawData['latencyAvg'] = m[0]
  872. rawData['latencyStdev'] = m[1]
  873. rawData['latencyMax'] = m[2]
  874. # rawData['latencyStdevPercent'] = m[3]
  875. #if "Req/Sec" in line:
  876. # m = re.findall("([0-9]+\.*[0-9]*[k|%]*)", line)
  877. # if len(m) == 4:
  878. # rawData['requestsAvg'] = m[0]
  879. # rawData['requestsStdev'] = m[1]
  880. # rawData['requestsMax'] = m[2]
  881. # rawData['requestsStdevPercent'] = m[3]
  882. #if "requests in" in line:
  883. # m = re.search("requests in ([0-9]+\.*[0-9]*[ms|s|m|h]+)", line)
  884. # if m != None:
  885. # # parse out the raw time, which may be in minutes or seconds
  886. # raw_time = m.group(1)
  887. # if "ms" in raw_time:
  888. # rawData['total_time'] = float(raw_time[:len(raw_time)-2]) / 1000.0
  889. # elif "s" in raw_time:
  890. # rawData['total_time'] = float(raw_time[:len(raw_time)-1])
  891. # elif "m" in raw_time:
  892. # rawData['total_time'] = float(raw_time[:len(raw_time)-1]) * 60.0
  893. # elif "h" in raw_time:
  894. # rawData['total_time'] = float(raw_time[:len(raw_time)-1]) * 3600.0
  895. if "requests in" in line:
  896. m = re.search("([0-9]+) requests in", line)
  897. if m != None:
  898. rawData['totalRequests'] = int(m.group(1))
  899. if "Socket errors" in line:
  900. if "connect" in line:
  901. m = re.search("connect ([0-9]+)", line)
  902. rawData['connect'] = int(m.group(1))
  903. if "read" in line:
  904. m = re.search("read ([0-9]+)", line)
  905. rawData['read'] = int(m.group(1))
  906. if "write" in line:
  907. m = re.search("write ([0-9]+)", line)
  908. rawData['write'] = int(m.group(1))
  909. if "timeout" in line:
  910. m = re.search("timeout ([0-9]+)", line)
  911. rawData['timeout'] = int(m.group(1))
  912. if "Non-2xx" in line:
  913. m = re.search("Non-2xx or 3xx responses: ([0-9]+)", line)
  914. if m != None:
  915. rawData['5xx'] = int(m.group(1))
  916. if "STARTTIME" in line:
  917. m = re.search("[0-9]+", line)
  918. rawData["startTime"] = int(m.group(0))
  919. if "ENDTIME" in line:
  920. m = re.search("[0-9]+", line)
  921. rawData["endTime"] = int(m.group(0))
  922. test_stats = self.__parse_stats(test_type, rawData["startTime"], rawData["endTime"], 1)
  923. # rawData["averageStats"] = self.__calculate_average_stats(test_stats)
  924. stats.append(test_stats)
  925. with open(self.benchmarker.stats_file(self.name, test_type) + ".json", "w") as stats_file:
  926. json.dump(stats, stats_file)
  927. return results
  928. except IOError:
  929. return None
  930. ############################################################
  931. # End benchmark
  932. ############################################################
  933. ##########################################################################################
  934. # Private Methods
  935. ##########################################################################################
  936. ############################################################
  937. # __run_benchmark(script, output_file)
  938. # Runs a single benchmark using the script which is a bash
  939. # template that uses weighttp to run the test. All the results
  940. # outputed to the output_file.
  941. ############################################################
  942. def __run_benchmark(self, script, output_file, err):
  943. with open(output_file, 'w') as raw_file:
  944. p = subprocess.Popen(self.benchmarker.client_ssh_string.split(" "), stdin=subprocess.PIPE, stdout=raw_file, stderr=err)
  945. p.communicate(script)
  946. err.flush()
  947. ############################################################
  948. # End __run_benchmark
  949. ############################################################
  950. ############################################################
  951. # __generate_concurrency_script(url, port)
  952. # Generates the string containing the bash script that will
  953. # be run on the client to benchmark a single test. This
  954. # specifically works for the variable concurrency tests (JSON
  955. # and DB)
  956. ############################################################
  957. def __generate_concurrency_script(self, url, port, accept_header, wrk_command="wrk", intervals=[], pipeline=""):
  958. if len(intervals) == 0:
  959. intervals = self.benchmarker.concurrency_levels
  960. headers = self.__get_request_headers(accept_header)
  961. return self.concurrency_template.format(max_concurrency=self.benchmarker.max_concurrency,
  962. max_threads=self.benchmarker.max_threads, name=self.name, duration=self.benchmarker.duration,
  963. interval=" ".join("{}".format(item) for item in intervals),
  964. server_host=self.benchmarker.server_host, port=port, url=url, headers=headers, wrk=wrk_command,
  965. pipeline=pipeline)
  966. ############################################################
  967. # End __generate_concurrency_script
  968. ############################################################
  969. ############################################################
  970. # __generate_query_script(url, port)
  971. # Generates the string containing the bash script that will
  972. # be run on the client to benchmark a single test. This
  973. # specifically works for the variable query tests (Query)
  974. ############################################################
  975. def __generate_query_script(self, url, port, accept_header):
  976. headers = self.__get_request_headers(accept_header)
  977. return self.query_template.format(max_concurrency=self.benchmarker.max_concurrency,
  978. max_threads=self.benchmarker.max_threads, name=self.name, duration=self.benchmarker.duration,
  979. interval=" ".join("{}".format(item) for item in self.benchmarker.query_intervals),
  980. server_host=self.benchmarker.server_host, port=port, url=url, headers=headers)
  981. ############################################################
  982. # End __generate_query_script
  983. ############################################################
  984. ############################################################
  985. # __get_request_headers(accept_header)
  986. # Generates the complete HTTP header string
  987. ############################################################
  988. def __get_request_headers(self, accept_header):
  989. return self.headers_template.format(accept=accept_header)
  990. ############################################################
  991. # End __format_request_headers
  992. ############################################################
  993. ############################################################
  994. # __curl_url
  995. # Dump HTTP response and headers. Throw exception if there
  996. # is an HTTP error.
  997. ############################################################
  998. def __curl_url(self, url, testType, out, err):
  999. output = None
  1000. try:
  1001. # Use -m 15 to make curl stop trying after 15sec.
  1002. # Use -i to output response with headers.
  1003. # Don't use -f so that the HTTP response code is ignored.
  1004. # Use --stderr - to redirect stderr to stdout so we get
  1005. # error output for sure in stdout.
  1006. # Use -sS to hide progress bar, but show errors.
  1007. subprocess.check_call(["curl", "-m", "15", "-i", "-sS", url], stderr=err, stdout=out)
  1008. # HTTP output may not end in a newline, so add that here.
  1009. out.write( "\n\n" )
  1010. out.flush()
  1011. err.flush()
  1012. # We need to get the respond body from the curl and return it.
  1013. p = subprocess.Popen(["curl", "-m", "15", "-s", url], stdout=subprocess.PIPE)
  1014. output = p.communicate()
  1015. except:
  1016. pass
  1017. if output:
  1018. # We have the response body - return it
  1019. return output[0]
  1020. ##############################################################
  1021. # End __curl_url
  1022. ##############################################################
  1023. def requires_database(self):
  1024. '''Returns True/False if this test requires a database'''
  1025. return any(tobj.requires_db for (ttype,tobj) in self.runTests.iteritems())
  1026. ############################################################
  1027. # __begin_logging
  1028. # Starts a thread to monitor the resource usage, to be synced with the client's time
  1029. # TODO: MySQL and InnoDB are possible. Figure out how to implement them.
  1030. ############################################################
  1031. def __begin_logging(self, test_name):
  1032. output_file = "{file_name}".format(file_name=self.benchmarker.get_stats_file(self.name, test_name))
  1033. dstat_string = "dstat -afilmprsT --aio --fs --ipc --lock --raw --socket --tcp \
  1034. --raw --socket --tcp --udp --unix --vm --disk-util \
  1035. --rpc --rpcd --output {output_file}".format(output_file=output_file)
  1036. cmd = shlex.split(dstat_string)
  1037. dev_null = open(os.devnull, "w")
  1038. self.subprocess_handle = subprocess.Popen(cmd, stdout=dev_null)
  1039. ##############################################################
  1040. # End __begin_logging
  1041. ##############################################################
  1042. ##############################################################
  1043. # Begin __end_logging
  1044. # Stops the logger thread and blocks until shutdown is complete.
  1045. ##############################################################
  1046. def __end_logging(self):
  1047. self.subprocess_handle.terminate()
  1048. self.subprocess_handle.communicate()
  1049. ##############################################################
  1050. # End __end_logging
  1051. ##############################################################
  1052. ##############################################################
  1053. # Begin __parse_stats
  1054. # For each test type, process all the statistics, and return a multi-layered dictionary
  1055. # that has a structure as follows:
  1056. # (timestamp)
  1057. # | (main header) - group that the stat is in
  1058. # | | (sub header) - title of the stat
  1059. # | | | (stat) - the stat itself, usually a floating point number
  1060. ##############################################################
  1061. def __parse_stats(self, test_type, start_time, end_time, interval):
  1062. stats_dict = dict()
  1063. stats_file = self.benchmarker.stats_file(self.name, test_type)
  1064. with open(stats_file) as stats:
  1065. while(stats.next() != "\n"): # dstat doesn't output a completely compliant CSV file - we need to strip the header
  1066. pass
  1067. stats_reader = csv.reader(stats)
  1068. main_header = stats_reader.next()
  1069. sub_header = stats_reader.next()
  1070. time_row = sub_header.index("epoch")
  1071. int_counter = 0
  1072. for row in stats_reader:
  1073. time = float(row[time_row])
  1074. int_counter+=1
  1075. if time < start_time:
  1076. continue
  1077. elif time > end_time:
  1078. return stats_dict
  1079. if int_counter % interval != 0:
  1080. continue
  1081. row_dict = dict()
  1082. for nextheader in main_header:
  1083. if nextheader != "":
  1084. row_dict[nextheader] = dict()
  1085. header = ""
  1086. for item_num, column in enumerate(row):
  1087. if(len(main_header[item_num]) != 0):
  1088. header = main_header[item_num]
  1089. row_dict[header][sub_header[item_num]] = float(column) # all the stats are numbers, so we want to make sure that they stay that way in json
  1090. stats_dict[time] = row_dict
  1091. return stats_dict
  1092. ##############################################################
  1093. # End __parse_stats
  1094. ##############################################################
  1095. def __getattr__(self, name):
  1096. """For backwards compatibility, we used to pass benchmarker
  1097. as the argument to the setup.py files"""
  1098. try:
  1099. x = getattr(self.benchmarker, name)
  1100. except AttributeError:
  1101. print "AttributeError: %s not a member of FrameworkTest or Benchmarker" % name
  1102. print "This is probably a bug"
  1103. raise
  1104. return x
  1105. ##############################################################
  1106. # Begin __calculate_average_stats
  1107. # We have a large amount of raw data for the statistics that
  1108. # may be useful for the stats nerds, but most people care about
  1109. # a couple of numbers. For now, we're only going to supply:
  1110. # * Average CPU
  1111. # * Average Memory
  1112. # * Total network use
  1113. # * Total disk use
  1114. # More may be added in the future. If they are, please update
  1115. # the above list.
  1116. # Note: raw_stats is directly from the __parse_stats method.
  1117. # Recall that this consists of a dictionary of timestamps,
  1118. # each of which contain a dictionary of stat categories which
  1119. # contain a dictionary of stats
  1120. ##############################################################
  1121. def __calculate_average_stats(self, raw_stats):
  1122. raw_stat_collection = dict()
  1123. for timestamp, time_dict in raw_stats.items():
  1124. for main_header, sub_headers in time_dict.items():
  1125. item_to_append = None
  1126. if 'cpu' in main_header:
  1127. # We want to take the idl stat and subtract it from 100
  1128. # to get the time that the CPU is NOT idle.
  1129. item_to_append = sub_headers['idl'] - 100.0
  1130. elif main_header == 'memory usage':
  1131. item_to_append = sub_headers['used']
  1132. elif 'net' in main_header:
  1133. # Network stats have two parts - recieve and send. We'll use a tuple of
  1134. # style (recieve, send)
  1135. item_to_append = (sub_headers['recv'], sub_headers['send'])
  1136. elif 'dsk' or 'io' in main_header:
  1137. # Similar for network, except our tuple looks like (read, write)
  1138. item_to_append = (sub_headers['read'], sub_headers['writ'])
  1139. if item_to_append is not None:
  1140. if main_header not in raw_stat_collection:
  1141. raw_stat_collection[main_header] = list()
  1142. raw_stat_collection[main_header].append(item_to_append)
  1143. # Simple function to determine human readable size
  1144. # http://stackoverflow.com/questions/1094841/reusable-library-to-get-human-readable-version-of-file-size
  1145. def sizeof_fmt(num):
  1146. # We'll assume that any number we get is convertable to a float, just in case
  1147. num = float(num)
  1148. for x in ['bytes','KB','MB','GB']:
  1149. if num < 1024.0 and num > -1024.0:
  1150. return "%3.1f%s" % (num, x)
  1151. num /= 1024.0
  1152. return "%3.1f%s" % (num, 'TB')
  1153. # Now we have our raw stats in a readable format - we need to format it for display
  1154. # We need a floating point sum, so the built in sum doesn't cut it
  1155. display_stat_collection = dict()
  1156. for header, values in raw_stat_collection.items():
  1157. display_stat = None
  1158. if 'cpu' in header:
  1159. display_stat = sizeof_fmt(math.fsum(values) / len(values))
  1160. elif main_header == 'memory usage':
  1161. display_stat = sizeof_fmt(math.fsum(values) / len(values))
  1162. elif 'net' in main_header:
  1163. receive, send = zip(*values) # unzip
  1164. display_stat = {'receive': sizeof_fmt(math.fsum(receive)), 'send': sizeof_fmt(math.fsum(send))}
  1165. else: # if 'dsk' or 'io' in header:
  1166. read, write = zip(*values) # unzip
  1167. display_stat = {'read': sizeof_fmt(math.fsum(read)), 'write': sizeof_fmt(math.fsum(write))}
  1168. display_stat_collection[header] = display_stat
  1169. return display_stat
  1170. ###########################################################################################
  1171. # End __calculate_average_stats
  1172. #########################################################################################
  1173. ##########################################################################################
  1174. # Constructor
  1175. ##########################################################################################
  1176. def __init__(self, name, directory, benchmarker, runTests, args):
  1177. self.name = name
  1178. self.directory = directory
  1179. self.benchmarker = benchmarker
  1180. self.runTests = runTests
  1181. self.fwroot = benchmarker.fwroot
  1182. # setup logging
  1183. logging.basicConfig(stream=sys.stderr, level=logging.INFO)
  1184. self.install_root="%s/%s" % (self.fwroot, "installs")
  1185. if benchmarker.install_strategy is 'pertest':
  1186. self.install_root="%s/pertest/%s" % (self.install_root, name)
  1187. # Used in setup.py scripts for consistency with
  1188. # the bash environment variables
  1189. self.troot = self.directory
  1190. self.iroot = self.install_root
  1191. self.__dict__.update(args)
  1192. # ensure directory has __init__.py file so that we can use it as a Python package
  1193. if not os.path.exists(os.path.join(directory, "__init__.py")):
  1194. logging.warning("Please add an empty __init__.py file to directory %s", directory)
  1195. open(os.path.join(directory, "__init__.py"), 'w').close()
  1196. # Import the module (TODO - consider using sys.meta_path)
  1197. # Note: You can see the log output if you really want to, but it's a *ton*
  1198. dir_rel_to_fwroot = os.path.relpath(os.path.dirname(directory), self.fwroot)
  1199. if dir_rel_to_fwroot != ".":
  1200. sys.path.append("%s/%s" % (self.fwroot, dir_rel_to_fwroot))
  1201. logging.log(0, "Adding %s to import %s.%s", dir_rel_to_fwroot, os.path.basename(directory), self.setup_file)
  1202. self.setup_module = setup_module = importlib.import_module(os.path.basename(directory) + '.' + self.setup_file)
  1203. sys.path.remove("%s/%s" % (self.fwroot, dir_rel_to_fwroot))
  1204. else:
  1205. logging.log(0, "Importing %s.%s", directory, self.setup_file)
  1206. self.setup_module = setup_module = importlib.import_module(os.path.basename(directory) + '.' + self.setup_file)
  1207. ############################################################
  1208. # End __init__
  1209. ############################################################
  1210. ############################################################
  1211. # End FrameworkTest
  1212. ############################################################
  1213. ##########################################################################################
  1214. # Static methods
  1215. ##########################################################################################
  1216. ##############################################################
  1217. # parse_config(config, directory, benchmarker)
  1218. # parses a config file and returns a list of FrameworkTest
  1219. # objects based on that config file.
  1220. ##############################################################
  1221. def parse_config(config, directory, benchmarker):
  1222. tests = []
  1223. # The config object can specify multiple tests
  1224. # Loop over them and parse each into a FrameworkTest
  1225. for test in config['tests']:
  1226. for test_name, test_keys in test.iteritems():
  1227. # Prefix all test names with framework except 'default' test
  1228. if test_name == 'default':
  1229. test_name = config['framework']
  1230. else:
  1231. test_name = "%s-%s" % (config['framework'], test_name)
  1232. # Ensure FrameworkTest.framework is available
  1233. if not test_keys['framework']:
  1234. test_keys['framework'] = config['framework']
  1235. #if test_keys['framework'].lower() != config['framework'].lower():
  1236. # print Exception("benchmark_config for test %s is invalid - test framework '%s' must match benchmark_config framework '%s'" %
  1237. # (test_name, test_keys['framework'], config['framework']))
  1238. # Confirm required keys are present
  1239. # TODO have a TechEmpower person confirm this list - I don't know what the website requires....
  1240. required = ['language','webserver','classification','database','approach','orm','framework','os','database_os']
  1241. if not all (key in test_keys for key in required):
  1242. raise Exception("benchmark_config for test %s is invalid - missing required keys" % test_name)
  1243. # Map test type to a parsed FrameworkTestType object
  1244. runTests = dict()
  1245. for type_name, type_obj in benchmarker.types.iteritems():
  1246. try:
  1247. runTests[type_name] = type_obj.copy().parse(test_keys)
  1248. except AttributeError as ae:
  1249. # This is quite common - most tests don't support all types
  1250. # Quitely log it and move on
  1251. logging.debug("Missing arguments for test type %s for framework test %s", type_name, test_name)
  1252. pass
  1253. # By passing the entire set of keys, each FrameworkTest will have a member for each key
  1254. tests.append(FrameworkTest(test_name, directory, benchmarker, runTests, test_keys))
  1255. return tests
  1256. ##############################################################
  1257. # End parse_config
  1258. ##############################################################