framework_test.py 59 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404
  1. from benchmark.fortune_html_parser import FortuneHTMLParser
  2. from setup.linux import setup_util
  3. import importlib
  4. import os
  5. import subprocess
  6. import time
  7. import re
  8. import pprint
  9. import sys
  10. import traceback
  11. import json
  12. import logging
  13. import csv
  14. import shlex
  15. import math
  16. from threading import Thread
  17. from threading import Event
  18. from utils import header
  19. class FrameworkTest:
  20. ##########################################################################################
  21. # Class variables
  22. ##########################################################################################
  23. headers_template = "-H 'Host: localhost' -H '{accept}' -H 'Connection: keep-alive'"
  24. headers_full_template = "-H 'Host: localhost' -H '{accept}' -H 'Accept-Language: en-US,en;q=0.5' -H 'User-Agent: Mozilla/5.0 (X11; Linux x86_64) Gecko/20130501 Firefox/30.0 AppleWebKit/600.00 Chrome/30.0.0000.0 Trident/10.0 Safari/600.00' -H 'Cookie: uid=12345678901234567890; __utma=1.1234567890.1234567890.1234567890.1234567890.12; wd=2560x1600' -H 'Connection: keep-alive'"
  25. accept_json = "Accept: application/json,text/html;q=0.9,application/xhtml+xml;q=0.9,application/xml;q=0.8,*/*;q=0.7"
  26. accept_html = "Accept: text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8"
  27. accept_plaintext = "Accept: text/plain,text/html;q=0.9,application/xhtml+xml;q=0.9,application/xml;q=0.8,*/*;q=0.7"
  28. concurrency_template = """
  29. echo ""
  30. echo "---------------------------------------------------------"
  31. echo " Running Primer {name}"
  32. echo " {wrk} {headers} -d 5 -c 8 --timeout 8 -t 8 \"http://{server_host}:{port}{url}\""
  33. echo "---------------------------------------------------------"
  34. echo ""
  35. {wrk} {headers} -d 5 -c 8 --timeout 8 -t 8 "http://{server_host}:{port}{url}"
  36. sleep 5
  37. echo ""
  38. echo "---------------------------------------------------------"
  39. echo " Running Warmup {name}"
  40. echo " {wrk} {headers} -d {duration} -c {max_concurrency} --timeout {max_concurrency} -t {max_threads} \"http://{server_host}:{port}{url}\""
  41. echo "---------------------------------------------------------"
  42. echo ""
  43. {wrk} {headers} -d {duration} -c {max_concurrency} --timeout {max_concurrency} -t {max_threads} "http://{server_host}:{port}{url}"
  44. sleep 5
  45. echo ""
  46. echo "---------------------------------------------------------"
  47. echo " Synchronizing time"
  48. echo "---------------------------------------------------------"
  49. echo ""
  50. ntpdate -s pool.ntp.org
  51. for c in {interval}
  52. do
  53. echo ""
  54. echo "---------------------------------------------------------"
  55. echo " Concurrency: $c for {name}"
  56. echo " {wrk} {headers} -d {duration} -c $c --timeout $c -t $(($c>{max_threads}?{max_threads}:$c)) \"http://{server_host}:{port}{url}\" -s ~/pipeline.lua -- {pipeline}"
  57. echo "---------------------------------------------------------"
  58. echo ""
  59. STARTTIME=$(date +"%s")
  60. {wrk} {headers} -d {duration} -c $c --timeout $c -t "$(($c>{max_threads}?{max_threads}:$c))" http://{server_host}:{port}{url} -s ~/pipeline.lua -- {pipeline}
  61. echo "STARTTIME $STARTTIME"
  62. echo "ENDTIME $(date +"%s")"
  63. sleep 2
  64. done
  65. """
  66. query_template = """
  67. echo ""
  68. echo "---------------------------------------------------------"
  69. echo " Running Primer {name}"
  70. echo " wrk {headers} -d 5 -c 8 --timeout 8 -t 8 \"http://{server_host}:{port}{url}2\""
  71. echo "---------------------------------------------------------"
  72. echo ""
  73. wrk {headers} -d 5 -c 8 --timeout 8 -t 8 "http://{server_host}:{port}{url}2"
  74. sleep 5
  75. echo ""
  76. echo "---------------------------------------------------------"
  77. echo " Running Warmup {name}"
  78. echo " wrk {headers} -d {duration} -c {max_concurrency} --timeout {max_concurrency} -t {max_threads} \"http://{server_host}:{port}{url}2\""
  79. echo "---------------------------------------------------------"
  80. echo ""
  81. wrk {headers} -d {duration} -c {max_concurrency} --timeout {max_concurrency} -t {max_threads} "http://{server_host}:{port}{url}2"
  82. sleep 5
  83. echo ""
  84. echo "---------------------------------------------------------"
  85. echo " Synchronizing time"
  86. echo "---------------------------------------------------------"
  87. echo ""
  88. ntpdate -s pool.ntp.org
  89. for c in {interval}
  90. do
  91. echo ""
  92. echo "---------------------------------------------------------"
  93. echo " Queries: $c for {name}"
  94. echo " wrk {headers} -d {duration} -c {max_concurrency} --timeout {max_concurrency} -t {max_threads} \"http://{server_host}:{port}{url}$c\""
  95. echo "---------------------------------------------------------"
  96. echo ""
  97. STARTTIME=$(date +"%s")
  98. wrk {headers} -d {duration} -c {max_concurrency} --timeout {max_concurrency} -t {max_threads} "http://{server_host}:{port}{url}$c"
  99. echo "STARTTIME $STARTTIME"
  100. echo "ENDTIME $(date +"%s")"
  101. sleep 2
  102. done
  103. """
  104. ############################################################
  105. # Test Variables
  106. ############################################################
  107. JSON = "json"
  108. DB = "db"
  109. QUERY = "query"
  110. FORTUNE = "fortune"
  111. UPDATE = "update"
  112. PLAINTEXT = "plaintext"
  113. ##########################################################################################
  114. # Public Methods
  115. ##########################################################################################
  116. ############################################################
  117. # Validates the jsonString is a JSON object with a 'message'
  118. # key with the value "hello, world!" (case-insensitive).
  119. ############################################################
  120. def validateJson(self, jsonString, out, err):
  121. err_str = ""
  122. if jsonString is None or len(jsonString) == 0:
  123. err_str += "Empty Response"
  124. return (False, err_str)
  125. try:
  126. obj = {k.lower(): v for k,v in json.loads(jsonString).iteritems()}
  127. if "message" not in obj:
  128. err_str += "Expected key 'message' to be in JSON string "
  129. if obj["message"].lower() != "hello, world!":
  130. err_str += "Message was '{message}', should have been 'Hello, World!' ".format(message=obj["message"])
  131. except:
  132. err_str += "Got exception when trying to validate the JSON test: {exception}".format(exception=traceback.format_exc())
  133. return (True, ) if len(err_str) == 0 else (False, err_str)
  134. ############################################################
  135. # Validates the jsonString is a JSON object that has an "id"
  136. # and a "randomNumber" key, and that both keys map to
  137. # integers.
  138. ############################################################
  139. def validateDb(self, jsonString, out, err):
  140. err_str = ""
  141. if jsonString is None or len(jsonString) == 0:
  142. err_str += "Empty Response"
  143. return (False, err_str)
  144. try:
  145. obj = {k.lower(): v for k,v in json.loads(jsonString).iteritems()}
  146. # We are allowing the single-object array for the DB
  147. # test for now, but will likely remove this later.
  148. if type(obj) == list:
  149. obj = obj[0]
  150. if "id" not in obj or "randomnumber" not in obj:
  151. err_str += "Expected keys id and randomNumber to be in JSON string. "
  152. return (False, err_str)
  153. # This will error out of the value could not parsed to a
  154. # float (this will work with ints, but it will turn them
  155. # into their float equivalent; i.e. "123" => 123.0)
  156. id_ret_val = True
  157. try:
  158. if not isinstance(float(obj["id"]), float):
  159. id_ret_val=False
  160. except:
  161. id_ret_val=False
  162. if not id_ret_val:
  163. err_str += "Expected id to be type int or float, got '{rand}' ".format(rand=obj["randomnumber"])
  164. random_num_ret_val = True
  165. try:
  166. if not isinstance(float(obj["randomnumber"]), float):
  167. random_num_ret_val=False
  168. except:
  169. random_num_ret_val=False
  170. if not random_num_ret_val:
  171. err_str += "Expected id to be type int or float, got '{rand}' ".format(rand=obj["randomnumber"])
  172. except:
  173. err_str += "Got exception when trying to validate the db test: {exception}".format(exception=traceback.format_exc())
  174. return (True, ) if len(err_str) == 0 else (False, err_str)
  175. def validateDbStrict(self, jsonString, out, err):
  176. err_str = ""
  177. if jsonString is None or len(jsonString) == 0:
  178. err_str += "Empty Response "
  179. return (False, err_str)
  180. try:
  181. obj = {k.lower(): v for k,v in json.loads(jsonString).iteritems()}
  182. # This will error out of the value could not parsed to a
  183. # float (this will work with ints, but it will turn them
  184. # into their float equivalent; i.e. "123" => 123.0)
  185. id_ret_val = True
  186. try:
  187. if not isinstance(float(obj["id"]), float):
  188. id_ret_val=False
  189. except:
  190. id_ret_val=False
  191. if not id_ret_val:
  192. err_str += "Expected id to be type int or float, got '{rand}' ".format(rand=obj["randomnumber"])
  193. random_num_ret_val = True
  194. try:
  195. if not isinstance(float(obj["randomnumber"]), float):
  196. random_num_ret_val=False
  197. except:
  198. random_num_ret_val=False
  199. if not random_num_ret_val:
  200. err_str += "Expected id to be type int or float, got '{rand}' ".format(rand=obj["randomnumber"])
  201. return id_ret_val and random_num_ret_val
  202. except:
  203. err_str += "Got exception when trying to validate the db test: {exception}".format(exception=traceback.format_exc())
  204. return (True, ) if len(err_str) == 0 else (False, err_str)
  205. ############################################################
  206. # Validates the jsonString is an array with a length of
  207. # 2, that each entry in the array is a JSON object, that
  208. # each object has an "id" and a "randomNumber" key, and that
  209. # both keys map to integers.
  210. ############################################################
  211. def validateQuery(self, jsonString, out, err):
  212. err_str = ""
  213. if jsonString is None or len(jsonString) == 0:
  214. err_str += "Empty Response"
  215. return (False, err_str)
  216. try:
  217. arr = [{k.lower(): v for k,v in d.iteritems()} for d in json.loads(jsonString)]
  218. if len(arr) != 2:
  219. err_str += "Expected array of length 2. Got length {length}. ".format(length=len(arr))
  220. for obj in arr:
  221. id_ret_val = True
  222. random_num_ret_val = True
  223. if "id" not in obj or "randomnumber" not in obj:
  224. err_str += "Expected keys id and randomNumber to be in JSON string. "
  225. break
  226. try:
  227. if not isinstance(float(obj["id"]), float):
  228. id_ret_val=False
  229. except:
  230. id_ret_val=False
  231. if not id_ret_val:
  232. err_str += "Expected id to be type int or float, got '{rand}' ".format(rand=obj["randomnumber"])
  233. try:
  234. if not isinstance(float(obj["randomnumber"]), float):
  235. random_num_ret_val=False
  236. except:
  237. random_num_ret_val=False
  238. if not random_num_ret_val:
  239. err_str += "Expected randomNumber to be type int or float, got '{rand}' ".format(rand=obj["randomnumber"])
  240. except:
  241. err_str += "Got exception when trying to validate the query test: {exception}".format(exception=traceback.format_exc())
  242. return (True, ) if len(err_str) == 0 else (False, err_str)
  243. ############################################################
  244. # Validates the jsonString is an array with a length of
  245. # 1, that each entry in the array is a JSON object, that
  246. # each object has an "id" and a "randomNumber" key, and that
  247. # both keys map to integers.
  248. ############################################################
  249. def validateQueryOneOrLess(self, jsonString, out, err):
  250. err_str = ""
  251. if jsonString is None or len(jsonString) == 0:
  252. err_str += "Empty Response"
  253. else:
  254. try:
  255. json_load = json.loads(jsonString)
  256. if not isinstance(json_load, list):
  257. err_str += "Expected JSON array, got {typeObj}. ".format(typeObj=type(json_load))
  258. if len(json_load) != 1:
  259. err_str += "Expected array of length 1. Got length {length}. ".format(length=len(json_load))
  260. obj = {k.lower(): v for k,v in json_load[0].iteritems()}
  261. id_ret_val = True
  262. random_num_ret_val = True
  263. if "id" not in obj or "randomnumber" not in obj:
  264. err_str += "Expected keys id and randomNumber to be in JSON string. "
  265. try:
  266. if not isinstance(float(obj["id"]), float):
  267. id_ret_val=False
  268. except:
  269. id_ret_val=False
  270. if not id_ret_val:
  271. err_str += "Expected id to be type int or float, got '{rand}'. ".format(rand=obj["randomnumber"])
  272. try:
  273. if not isinstance(float(obj["randomnumber"]), float):
  274. random_num_ret_val=False
  275. except:
  276. random_num_ret_val=False
  277. if not random_num_ret_val:
  278. err_str += "Expected randomNumber to be type int or float, got '{rand}'. ".format(rand=obj["randomnumber"])
  279. except:
  280. err_str += "Got exception when trying to validate the query test: {exception} ".format(exception=traceback.format_exc())
  281. return (True, ) if len(err_str) == 0 else (False, err_str)
  282. ############################################################
  283. # Validates the jsonString is an array with a length of
  284. # 500, that each entry in the array is a JSON object, that
  285. # each object has an "id" and a "randomNumber" key, and that
  286. # both keys map to integers.
  287. ############################################################
  288. def validateQueryFiveHundredOrMore(self, jsonString, out, err):
  289. err_str = ""
  290. if jsonString is None or len(jsonString) == 0:
  291. err_str += "Empty Response"
  292. return (False, err_str)
  293. try:
  294. arr = [{k.lower(): v for k,v in d.iteritems()} for d in json.loads(jsonString)]
  295. if len(arr) != 500:
  296. err_str += "Expected array of length 500. Got length {length}. ".format(length=len(arr))
  297. return (False, err_str)
  298. for obj in arr:
  299. id_ret_val = True
  300. random_num_ret_val = True
  301. if "id" not in obj or "randomnumber" not in obj:
  302. err_str += "Expected keys id and randomNumber to be in JSON string. "
  303. break
  304. try:
  305. if not isinstance(float(obj["id"]), float):
  306. id_ret_val=False
  307. except:
  308. id_ret_val=False
  309. if not id_ret_val:
  310. err_str += "Expected id to be type int or float, got '{rand}'. ".format(rand=obj["randomnumber"])
  311. try:
  312. if not isinstance(float(obj["randomnumber"]), float):
  313. random_num_ret_val=False
  314. except:
  315. random_num_ret_val=False
  316. if not random_num_ret_val:
  317. err_str += "Expected randomNumber to be type int or float, got '{rand}'. ".format(rand=obj["randomnumber"])
  318. except:
  319. err_str += "Got exception when trying to validate the query test: {exception} ".format(exception=traceback.format_exc())
  320. return (True, ) if len(err_str) == 0 else (False, err_str)
  321. ############################################################
  322. # Parses the given HTML string and asks a FortuneHTMLParser
  323. # whether the parsed string is a valid fortune return.
  324. ############################################################
  325. def validateFortune(self, htmlString, out, err):
  326. err_str = ""
  327. if htmlString is None or len(htmlString) == 0:
  328. err_str += "Empty Response"
  329. return (False, err_str)
  330. try:
  331. parser = FortuneHTMLParser()
  332. parser.feed(htmlString)
  333. valid = parser.isValidFortune(out)
  334. return (valid, '' if valid else 'Did not pass validation')
  335. except:
  336. print "Got exception when trying to validate the fortune test: {exception} ".format(exception=traceback.format_exc())
  337. return (False, err_str)
  338. ############################################################
  339. # Validates the jsonString is an array with a length of
  340. # 2, that each entry in the array is a JSON object, that
  341. # each object has an "id" and a "randomNumber" key, and that
  342. # both keys map to integers.
  343. ############################################################
  344. def validateUpdate(self, jsonString, out, err):
  345. err_str = ""
  346. if jsonString is None or len(jsonString) == 0:
  347. err_str += "Empty Response"
  348. return (False, err_str)
  349. try:
  350. arr = [{k.lower(): v for k,v in d.iteritems()} for d in json.loads(jsonString)]
  351. if len(arr) != 2:
  352. err_str += "Expected array of length 2. Got length {length}.\n".format(length=len(arr))
  353. for obj in arr:
  354. id_ret_val = True
  355. random_num_ret_val = True
  356. if "id" not in obj or "randomnumber" not in obj:
  357. err_str += "Expected keys id and randomNumber to be in JSON string.\n"
  358. return (False, err_str)
  359. try:
  360. if not isinstance(float(obj["id"]), float):
  361. id_ret_val=False
  362. except:
  363. id_ret_val=False
  364. if not id_ret_val:
  365. err_str += "Expected id to be type int or float, got '{rand}'.\n".format(rand=obj["randomnumber"])
  366. try:
  367. if not isinstance(float(obj["randomnumber"]), float):
  368. random_num_ret_val=False
  369. except:
  370. random_num_ret_val=False
  371. if not random_num_ret_val:
  372. err_str += "Expected randomNumber to be type int or float, got '{rand}'.\n".format(rand=obj["randomnumber"])
  373. except:
  374. err_str += "Got exception when trying to validate the update test: {exception}\n".format(exception=traceback.format_exc())
  375. return (True, ) if len(err_str) == 0 else (False, err_str)
  376. ############################################################
  377. #
  378. ############################################################
  379. def validatePlaintext(self, jsonString, out, err):
  380. err_str = ""
  381. if jsonString is None or len(jsonString) == 0:
  382. err_str += "Empty Response"
  383. return (False, err_str)
  384. try:
  385. if not jsonString.lower().strip() == "hello, world!":
  386. err_str += "Expected 'Hello, World!', got '{message}'.\n".format(message=jsonString.strip())
  387. except:
  388. err_str += "Got exception when trying to validate the plaintext test: {exception}\n".format(exception=traceback.format_exc())
  389. return (True, ) if len(err_str) == 0 else (False, err_str)
  390. ############################################################
  391. # start(benchmarker)
  392. # Start the test using it's setup file
  393. ############################################################
  394. def start(self, out, err):
  395. # Load profile for this installation
  396. profile="%s/bash_profile.sh" % self.directory
  397. if not os.path.exists(profile):
  398. logging.warning("Directory %s does not have a bash_profile.sh" % self.directory)
  399. profile="$FWROOT/config/benchmark_profile"
  400. # Setup variables for TROOT and IROOT
  401. setup_util.replace_environ(config=profile,
  402. command='export TROOT=%s && export IROOT=%s' %
  403. (self.directory, self.install_root))
  404. # Because start can take so long, we print a dot to let the user know
  405. # we are working
  406. class ProgressPrinterThread(Thread):
  407. def __init__(self, event):
  408. Thread.__init__(self)
  409. self.stopped = event
  410. def run(self):
  411. while not self.stopped.wait(20):
  412. sys.stderr.write("Waiting for start to return...\n")
  413. stopFlag = Event()
  414. thread = ProgressPrinterThread(stopFlag)
  415. thread.start()
  416. # Run the module start (inside parent of TROOT)
  417. # - we use the parent as a historical accident - a lot of tests
  418. # use subprocess's cwd argument already
  419. previousDir = os.getcwd()
  420. os.chdir(os.path.dirname(self.troot))
  421. logging.info("Running setup module start (cwd=%s)", os.path.dirname(self.troot))
  422. try:
  423. retcode = self.setup_module.start(self, out, err)
  424. if retcode == None:
  425. retcode = 0
  426. except Exception:
  427. retcode = 1
  428. st = traceback.format_exc()
  429. st = '\n'.join((4 * ' ') + x for x in st.splitlines())
  430. st = "Start exception:\n%s" % st
  431. logging.info(st)
  432. err.write(st + '\n')
  433. os.chdir(previousDir)
  434. # Stop the progress printer
  435. stopFlag.set()
  436. logging.info("Start completed, running %s", self.benchmarker.mode)
  437. return retcode
  438. ############################################################
  439. # End start
  440. ############################################################
  441. ############################################################
  442. # stop(benchmarker)
  443. # Stops the test using it's setup file
  444. ############################################################
  445. def stop(self, out, err):
  446. # Load profile for this installation
  447. profile="%s/bash_profile.sh" % self.directory
  448. if not os.path.exists(profile):
  449. logging.warning("Directory %s does not have a bash_profile.sh" % self.directory)
  450. profile="$FWROOT/config/benchmark_profile"
  451. setup_util.replace_environ(config=profile,
  452. command='export TROOT=%s && export IROOT=%s' %
  453. (self.directory, self.install_root))
  454. # Run the module stop (inside parent of TROOT)
  455. # - we use the parent as a historical accident - a lot of tests
  456. # use subprocess's cwd argument already
  457. previousDir = os.getcwd()
  458. os.chdir(os.path.dirname(self.troot))
  459. logging.info("Running setup module stop (cwd=%s)", os.path.dirname(self.troot))
  460. try:
  461. retcode = self.setup_module.stop(out, err)
  462. if retcode == None:
  463. retcode = 0
  464. except Exception:
  465. retcode = 1
  466. st = traceback.format_exc()
  467. st = '\n'.join((4 * ' ') + x for x in st.splitlines())
  468. st = "Stop exception:\n%s\n" % st
  469. logging.info(st)
  470. err.write(st + '\n')
  471. os.chdir(previousDir)
  472. # Give processes sent a SIGTERM a moment to shut down gracefully
  473. time.sleep(5)
  474. return retcode
  475. ############################################################
  476. # End stop
  477. ############################################################
  478. ############################################################
  479. # verify_urls
  480. # Verifys each of the URLs for this test. THis will sinply
  481. # curl the URL and check for it's return status.
  482. # For each url, a flag will be set on this object for whether
  483. # or not it passed
  484. # Returns True if all verifications succeeded
  485. ############################################################
  486. def verify_urls(self, out, err):
  487. result = True
  488. # JSON
  489. if self.runTests[self.JSON]:
  490. out.write(header("VERIFYING JSON (%s)" % self.json_url))
  491. out.flush()
  492. url = self.benchmarker.generate_url(self.json_url, self.port)
  493. output = self.__curl_url(url, self.JSON, out, err)
  494. out.write("VALIDATING JSON ... ")
  495. ret_tuple = self.validateJson(output, out, err)
  496. if ret_tuple[0]:
  497. self.json_url_passed = True
  498. out.write("PASS\n\n")
  499. self.benchmarker.report_verify_results(self, self.JSON, 'pass')
  500. else:
  501. self.json_url_passed = False
  502. out.write("\nFAIL" + ret_tuple[1] + "\n\n")
  503. self.benchmarker.report_verify_results(self, self.JSON, 'fail')
  504. result = False
  505. out.flush()
  506. # DB
  507. if self.runTests[self.DB]:
  508. out.write(header("VERIFYING DB (%s)" % self.db_url))
  509. out.flush()
  510. url = self.benchmarker.generate_url(self.db_url, self.port)
  511. output = self.__curl_url(url, self.DB, out, err)
  512. validate_ret_tuple = self.validateDb(output, out, err)
  513. validate_strict_ret_tuple = self.validateDbStrict(output, out, err)
  514. if validate_ret_tuple[0]:
  515. self.db_url_passed = True
  516. else:
  517. self.db_url_passed = False
  518. if validate_strict_ret_tuple:
  519. self.db_url_warn = False
  520. else:
  521. self.db_url_warn = True
  522. out.write("VALIDATING DB ... ")
  523. if self.db_url_passed:
  524. out.write("PASS")
  525. self.benchmarker.report_verify_results(self, self.DB, 'pass')
  526. if self.db_url_warn:
  527. out.write(" (with warnings) " + validate_strict_ret_tuple[1])
  528. self.benchmarker.report_verify_results(self, self.DB, 'warn')
  529. out.write("\n\n")
  530. else:
  531. self.benchmarker.report_verify_results(self, self.DB, 'fail')
  532. out.write("\nFAIL" + validate_ret_tuple[1])
  533. result = False
  534. out.flush()
  535. # Query
  536. if self.runTests[self.QUERY]:
  537. out.write(header("VERIFYING QUERY (%s)" % self.query_url+"2"))
  538. out.flush()
  539. url = self.benchmarker.generate_url(self.query_url + "2", self.port)
  540. output = self.__curl_url(url, self.QUERY, out, err)
  541. ret_tuple = self.validateQuery(output, out, err)
  542. if ret_tuple[0]:
  543. self.query_url_passed = True
  544. out.write(self.query_url + "2 - PASS\n\n")
  545. else:
  546. self.query_url_passed = False
  547. out.write(self.query_url + "2 - FAIL " + ret_tuple[1] + "\n\n")
  548. out.write("-----------------------------------------------------\n\n")
  549. out.flush()
  550. self.query_url_warn = False
  551. url2 = self.benchmarker.generate_url(self.query_url + "0", self.port)
  552. output2 = self.__curl_url(url2, self.QUERY, out, err)
  553. ret_tuple = self.validateQueryOneOrLess(output2, out, err)
  554. if not ret_tuple[0]:
  555. self.query_url_warn = True
  556. out.write(self.query_url + "0 - WARNING " + ret_tuple[1] + "\n\n")
  557. else:
  558. out.write(self.query_url + "0 - PASS\n\n")
  559. out.write("-----------------------------------------------------\n\n")
  560. out.flush()
  561. url3 = self.benchmarker.generate_url(self.query_url + "foo", self.port)
  562. output3 = self.__curl_url(url3, self.QUERY, out, err)
  563. ret_tuple = self.validateQueryOneOrLess(output3, out, err)
  564. if not ret_tuple[0]:
  565. self.query_url_warn = True
  566. out.write(self.query_url + "foo - WARNING " + ret_tuple[1] + "\n\n")
  567. else:
  568. out.write(self.query_url + "foo - PASS\n\n")
  569. out.write("-----------------------------------------------------\n\n")
  570. out.flush()
  571. url4 = self.benchmarker.generate_url(self.query_url + "501", self.port)
  572. output4 = self.__curl_url(url4, self.QUERY, out, err)
  573. ret_tuple = self.validateQueryFiveHundredOrMore(output4, out, err)
  574. if not ret_tuple[0]:
  575. self.query_url_warn = True
  576. out.write(self.query_url + "501 - WARNING " + ret_tuple[1] + "\n\n")
  577. else:
  578. out.write(self.query_url + "501 - PASS\n\n")
  579. out.write("-----------------------------------------------------\n\n\n")
  580. out.flush()
  581. out.write("VALIDATING QUERY ... ")
  582. if self.query_url_passed:
  583. out.write("PASS")
  584. self.benchmarker.report_verify_results(self, self.QUERY, 'pass')
  585. if self.query_url_warn:
  586. out.write(" (with warnings)")
  587. self.benchmarker.report_verify_results(self, self.QUERY, 'warn')
  588. out.write("\n\n")
  589. else:
  590. out.write("\nFAIL " + ret_tuple[1] + "\n\n")
  591. self.benchmarker.report_verify_results(self, self.QUERY, 'fail')
  592. result = False
  593. out.flush()
  594. # Fortune
  595. if self.runTests[self.FORTUNE]:
  596. out.write(header("VERIFYING FORTUNE (%s)" % self.fortune_url))
  597. out.flush()
  598. url = self.benchmarker.generate_url(self.fortune_url, self.port)
  599. output = self.__curl_url(url, self.FORTUNE, out, err)
  600. out.write("VALIDATING FORTUNE ... ")
  601. ret_tuple = self.validateFortune(output, out, err)
  602. if ret_tuple[0]:
  603. self.fortune_url_passed = True
  604. out.write("PASS\n\n")
  605. self.benchmarker.report_verify_results(self, self.FORTUNE, 'pass')
  606. else:
  607. self.fortune_url_passed = False
  608. out.write("\nFAIL " + ret_tuple[1] + "\n\n")
  609. self.benchmarker.report_verify_results(self, self.FORTUNE, 'fail')
  610. result = False
  611. out.flush()
  612. # Update
  613. if self.runTests[self.UPDATE]:
  614. out.write(header("VERIFYING UPDATE (%s)" % self.update_url))
  615. out.flush()
  616. url = self.benchmarker.generate_url(self.update_url + "2", self.port)
  617. output = self.__curl_url(url, self.UPDATE, out, err)
  618. out.write("VALIDATING UPDATE ... ")
  619. ret_tuple = self.validateUpdate(output, out, err)
  620. if ret_tuple[0]:
  621. self.update_url_passed = True
  622. out.write("PASS\n\n")
  623. self.benchmarker.report_verify_results(self, self.UPDATE, 'pass')
  624. else:
  625. self.update_url_passed = False
  626. out.write("\nFAIL " + ret_tuple[1] + "\n\n")
  627. self.benchmarker.report_verify_results(self, self.UPDATE, 'fail')
  628. result = False
  629. out.flush()
  630. # plaintext
  631. if self.runTests[self.PLAINTEXT]:
  632. out.write(header("VERIFYING PLAINTEXT (%s)" % self.plaintext_url))
  633. out.flush()
  634. url = self.benchmarker.generate_url(self.plaintext_url, self.port)
  635. output = self.__curl_url(url, self.PLAINTEXT, out, err)
  636. out.write("VALIDATING PLAINTEXT ... ")
  637. ret_tuple = self.validatePlaintext(output, out, err)
  638. if ret_tuple[0]:
  639. self.plaintext_url_passed = True
  640. out.write("PASS\n\n")
  641. self.benchmarker.report_verify_results(self, self.PLAINTEXT, 'pass')
  642. else:
  643. self.plaintext_url_passed = False
  644. out.write("\nFAIL\n\n" + ret_tuple[1] + "\n\n")
  645. self.benchmarker.report_verify_results(self, self.PLAINTEXT, 'fail')
  646. result = False
  647. out.flush()
  648. return result
  649. ############################################################
  650. # End verify_urls
  651. ############################################################
  652. ############################################################
  653. # contains_type(type)
  654. # true if this test contains an implementation of the given
  655. # test type (json, db, etc.)
  656. ############################################################
  657. def contains_type(self, type):
  658. try:
  659. if type == self.JSON and self.json_url is not None:
  660. return True
  661. if type == self.DB and self.db_url is not None:
  662. return True
  663. if type == self.QUERY and self.query_url is not None:
  664. return True
  665. if type == self.FORTUNE and self.fortune_url is not None:
  666. return True
  667. if type == self.UPDATE and self.update_url is not None:
  668. return True
  669. if type == self.PLAINTEXT and self.plaintext_url is not None:
  670. return True
  671. except AttributeError:
  672. pass
  673. return False
  674. ############################################################
  675. # End stop
  676. ############################################################
  677. ############################################################
  678. # benchmark
  679. # Runs the benchmark for each type of test that it implements
  680. # JSON/DB/Query.
  681. ############################################################
  682. def benchmark(self, out, err):
  683. # JSON
  684. if self.runTests[self.JSON]:
  685. try:
  686. out.write("BENCHMARKING JSON ... ")
  687. out.flush()
  688. results = None
  689. output_file = self.benchmarker.output_file(self.name, self.JSON)
  690. if not os.path.exists(output_file):
  691. with open(output_file, 'w'):
  692. # Simply opening the file in write mode should create the empty file.
  693. pass
  694. if self.json_url_passed:
  695. remote_script = self.__generate_concurrency_script(self.json_url, self.port, self.accept_json)
  696. self.__begin_logging(self.JSON)
  697. self.__run_benchmark(remote_script, output_file, err)
  698. self.__end_logging()
  699. results = self.__parse_test(self.JSON)
  700. print results
  701. self.benchmarker.report_benchmark_results(framework=self, test=self.JSON, results=results['results'])
  702. out.write( "Complete\n" )
  703. out.flush()
  704. except AttributeError:
  705. pass
  706. # DB
  707. if self.runTests[self.DB]:
  708. try:
  709. out.write("BENCHMARKING DB ... ")
  710. out.flush()
  711. results = None
  712. output_file = self.benchmarker.output_file(self.name, self.DB)
  713. if not os.path.exists(output_file):
  714. with open(output_file, 'w'):
  715. # Simply opening the file in write mode should create the empty file.
  716. pass
  717. if self.db_url_passed:
  718. self.benchmarker.report_verify_results(self, self.DB, 'pass')
  719. remote_script = self.__generate_concurrency_script(self.db_url, self.port, self.accept_json)
  720. self.__begin_logging(self.DB)
  721. self.__run_benchmark(remote_script, output_file, err)
  722. self.__end_logging()
  723. results = self.__parse_test(self.DB)
  724. self.benchmarker.report_benchmark_results(framework=self, test=self.DB, results=results['results'])
  725. out.write( "Complete\n" )
  726. except AttributeError:
  727. pass
  728. # Query
  729. if self.runTests[self.QUERY]:
  730. try:
  731. out.write("BENCHMARKING Query ... ")
  732. out.flush()
  733. results = None
  734. output_file = self.benchmarker.output_file(self.name, self.QUERY)
  735. if not os.path.exists(output_file):
  736. with open(output_file, 'w'):
  737. # Simply opening the file in write mode should create the empty file.
  738. pass
  739. if self.query_url_passed:
  740. remote_script = self.__generate_query_script(self.query_url, self.port, self.accept_json)
  741. self.__begin_logging(self.QUERY)
  742. self.__run_benchmark(remote_script, output_file, err)
  743. self.__end_logging()
  744. results = self.__parse_test(self.QUERY)
  745. self.benchmarker.report_benchmark_results(framework=self, test=self.QUERY, results=results['results'])
  746. out.write( "Complete\n" )
  747. out.flush()
  748. except AttributeError:
  749. pass
  750. # fortune
  751. if self.runTests[self.FORTUNE]:
  752. try:
  753. out.write("BENCHMARKING Fortune ... ")
  754. out.flush()
  755. results = None
  756. output_file = self.benchmarker.output_file(self.name, self.FORTUNE)
  757. if not os.path.exists(output_file):
  758. with open(output_file, 'w'):
  759. # Simply opening the file in write mode should create the empty file.
  760. pass
  761. if self.fortune_url_passed:
  762. remote_script = self.__generate_concurrency_script(self.fortune_url, self.port, self.accept_html)
  763. self.__begin_logging(self.FORTUNE)
  764. self.__run_benchmark(remote_script, output_file, err)
  765. self.__end_logging()
  766. results = self.__parse_test(self.FORTUNE)
  767. self.benchmarker.report_benchmark_results(framework=self, test=self.FORTUNE, results=results['results'])
  768. out.write( "Complete\n" )
  769. out.flush()
  770. except AttributeError:
  771. pass
  772. # update
  773. if self.runTests[self.UPDATE]:
  774. try:
  775. out.write("BENCHMARKING Update ... ")
  776. out.flush()
  777. results = None
  778. output_file = self.benchmarker.output_file(self.name, self.UPDATE)
  779. if not os.path.exists(output_file):
  780. with open(output_file, 'w'):
  781. # Simply opening the file in write mode should create the empty file.
  782. pass
  783. if self.update_url_passed:
  784. remote_script = self.__generate_query_script(self.update_url, self.port, self.accept_json)
  785. self.__begin_logging(self.UPDATE)
  786. self.__run_benchmark(remote_script, output_file, err)
  787. self.__end_logging()
  788. results = self.__parse_test(self.UPDATE)
  789. self.benchmarker.report_benchmark_results(framework=self, test=self.UPDATE, results=results['results'])
  790. out.write( "Complete\n" )
  791. out.flush()
  792. except AttributeError:
  793. pass
  794. # plaintext
  795. if self.runTests[self.PLAINTEXT]:
  796. try:
  797. out.write("BENCHMARKING Plaintext ... ")
  798. out.flush()
  799. results = None
  800. output_file = self.benchmarker.output_file(self.name, self.PLAINTEXT)
  801. if not os.path.exists(output_file):
  802. with open(output_file, 'w'):
  803. # Simply opening the file in write mode should create the empty file.
  804. pass
  805. if self.plaintext_url_passed:
  806. remote_script = self.__generate_concurrency_script(self.plaintext_url, self.port, self.accept_plaintext, wrk_command="wrk", intervals=[256,1024,4096,16384], pipeline="16")
  807. self.__begin_logging(self.PLAINTEXT)
  808. self.__run_benchmark(remote_script, output_file, err)
  809. self.__end_logging()
  810. results = self.__parse_test(self.PLAINTEXT)
  811. self.benchmarker.report_benchmark_results(framework=self, test=self.PLAINTEXT, results=results['results'])
  812. out.write( "Complete\n" )
  813. out.flush()
  814. except AttributeError:
  815. traceback.print_exc()
  816. pass
  817. ############################################################
  818. # End benchmark
  819. ############################################################
  820. ############################################################
  821. # parse_all
  822. # Method meant to be run for a given timestamp
  823. ############################################################
  824. def parse_all(self):
  825. # JSON
  826. if os.path.exists(self.benchmarker.get_output_file(self.name, self.JSON)):
  827. results = self.__parse_test(self.JSON)
  828. self.benchmarker.report_benchmark_results(framework=self, test=self.JSON, results=results['results'])
  829. # DB
  830. if os.path.exists(self.benchmarker.get_output_file(self.name, self.DB)):
  831. results = self.__parse_test(self.DB)
  832. self.benchmarker.report_benchmark_results(framework=self, test=self.DB, results=results['results'])
  833. # Query
  834. if os.path.exists(self.benchmarker.get_output_file(self.name, self.QUERY)):
  835. results = self.__parse_test(self.QUERY)
  836. self.benchmarker.report_benchmark_results(framework=self, test=self.QUERY, results=results['results'])
  837. # Fortune
  838. if os.path.exists(self.benchmarker.get_output_file(self.name, self.FORTUNE)):
  839. results = self.__parse_test(self.FORTUNE)
  840. self.benchmarker.report_benchmark_results(framework=self, test=self.FORTUNE, results=results['results'])
  841. # Update
  842. if os.path.exists(self.benchmarker.get_output_file(self.name, self.UPDATE)):
  843. results = self.__parse_test(self.UPDATE)
  844. self.benchmarker.report_benchmark_results(framework=self, test=self.UPDATE, results=results['results'])
  845. # Plaintext
  846. if os.path.exists(self.benchmarker.get_output_file(self.name, self.PLAINTEXT)):
  847. results = self.__parse_test(self.PLAINTEXT)
  848. self.benchmarker.report_benchmark_results(framework=self, test=self.PLAINTEXT, results=results['results'])
  849. ############################################################
  850. # End parse_all
  851. ############################################################
  852. ############################################################
  853. # __parse_test(test_type)
  854. ############################################################
  855. def __parse_test(self, test_type):
  856. try:
  857. results = dict()
  858. results['results'] = []
  859. stats = []
  860. if os.path.exists(self.benchmarker.get_output_file(self.name, test_type)):
  861. with open(self.benchmarker.output_file(self.name, test_type)) as raw_data:
  862. is_warmup = True
  863. rawData = None
  864. for line in raw_data:
  865. if "Queries:" in line or "Concurrency:" in line:
  866. is_warmup = False
  867. rawData = None
  868. continue
  869. if "Warmup" in line or "Primer" in line:
  870. is_warmup = True
  871. continue
  872. if not is_warmup:
  873. if rawData == None:
  874. rawData = dict()
  875. results['results'].append(rawData)
  876. #if "Requests/sec:" in line:
  877. # m = re.search("Requests/sec:\s+([0-9]+)", line)
  878. # rawData['reportedResults'] = m.group(1)
  879. # search for weighttp data such as succeeded and failed.
  880. if "Latency" in line:
  881. m = re.findall("([0-9]+\.*[0-9]*[us|ms|s|m|%]+)", line)
  882. if len(m) == 4:
  883. rawData['latencyAvg'] = m[0]
  884. rawData['latencyStdev'] = m[1]
  885. rawData['latencyMax'] = m[2]
  886. # rawData['latencyStdevPercent'] = m[3]
  887. #if "Req/Sec" in line:
  888. # m = re.findall("([0-9]+\.*[0-9]*[k|%]*)", line)
  889. # if len(m) == 4:
  890. # rawData['requestsAvg'] = m[0]
  891. # rawData['requestsStdev'] = m[1]
  892. # rawData['requestsMax'] = m[2]
  893. # rawData['requestsStdevPercent'] = m[3]
  894. #if "requests in" in line:
  895. # m = re.search("requests in ([0-9]+\.*[0-9]*[ms|s|m|h]+)", line)
  896. # if m != None:
  897. # # parse out the raw time, which may be in minutes or seconds
  898. # raw_time = m.group(1)
  899. # if "ms" in raw_time:
  900. # rawData['total_time'] = float(raw_time[:len(raw_time)-2]) / 1000.0
  901. # elif "s" in raw_time:
  902. # rawData['total_time'] = float(raw_time[:len(raw_time)-1])
  903. # elif "m" in raw_time:
  904. # rawData['total_time'] = float(raw_time[:len(raw_time)-1]) * 60.0
  905. # elif "h" in raw_time:
  906. # rawData['total_time'] = float(raw_time[:len(raw_time)-1]) * 3600.0
  907. if "requests in" in line:
  908. m = re.search("([0-9]+) requests in", line)
  909. if m != None:
  910. rawData['totalRequests'] = int(m.group(1))
  911. if "Socket errors" in line:
  912. if "connect" in line:
  913. m = re.search("connect ([0-9]+)", line)
  914. rawData['connect'] = int(m.group(1))
  915. if "read" in line:
  916. m = re.search("read ([0-9]+)", line)
  917. rawData['read'] = int(m.group(1))
  918. if "write" in line:
  919. m = re.search("write ([0-9]+)", line)
  920. rawData['write'] = int(m.group(1))
  921. if "timeout" in line:
  922. m = re.search("timeout ([0-9]+)", line)
  923. rawData['timeout'] = int(m.group(1))
  924. if "Non-2xx" in line:
  925. m = re.search("Non-2xx or 3xx responses: ([0-9]+)", line)
  926. if m != None:
  927. rawData['5xx'] = int(m.group(1))
  928. if "STARTTIME" in line:
  929. m = re.search("[0-9]+", line)
  930. rawData["startTime"] = int(m.group(0))
  931. if "ENDTIME" in line:
  932. m = re.search("[0-9]+", line)
  933. rawData["endTime"] = int(m.group(0))
  934. test_stats = self.__parse_stats(test_type, rawData["startTime"], rawData["endTime"], 1)
  935. # rawData["averageStats"] = self.__calculate_average_stats(test_stats)
  936. stats.append(test_stats)
  937. with open(self.benchmarker.stats_file(self.name, test_type) + ".json", "w") as stats_file:
  938. json.dump(stats, stats_file)
  939. return results
  940. except IOError:
  941. return None
  942. ############################################################
  943. # End benchmark
  944. ############################################################
  945. ##########################################################################################
  946. # Private Methods
  947. ##########################################################################################
  948. ############################################################
  949. # __run_benchmark(script, output_file)
  950. # Runs a single benchmark using the script which is a bash
  951. # template that uses weighttp to run the test. All the results
  952. # outputed to the output_file.
  953. ############################################################
  954. def __run_benchmark(self, script, output_file, err):
  955. with open(output_file, 'w') as raw_file:
  956. p = subprocess.Popen(self.benchmarker.client_ssh_string.split(" "), stdin=subprocess.PIPE, stdout=raw_file, stderr=err)
  957. p.communicate(script)
  958. err.flush()
  959. ############################################################
  960. # End __run_benchmark
  961. ############################################################
  962. ############################################################
  963. # __generate_concurrency_script(url, port)
  964. # Generates the string containing the bash script that will
  965. # be run on the client to benchmark a single test. This
  966. # specifically works for the variable concurrency tests (JSON
  967. # and DB)
  968. ############################################################
  969. def __generate_concurrency_script(self, url, port, accept_header, wrk_command="wrk", intervals=[], pipeline=""):
  970. if len(intervals) == 0:
  971. intervals = self.benchmarker.concurrency_levels
  972. headers = self.__get_request_headers(accept_header)
  973. return self.concurrency_template.format(max_concurrency=self.benchmarker.max_concurrency,
  974. max_threads=self.benchmarker.max_threads, name=self.name, duration=self.benchmarker.duration,
  975. interval=" ".join("{}".format(item) for item in intervals),
  976. server_host=self.benchmarker.server_host, port=port, url=url, headers=headers, wrk=wrk_command,
  977. pipeline=pipeline)
  978. ############################################################
  979. # End __generate_concurrency_script
  980. ############################################################
  981. ############################################################
  982. # __generate_query_script(url, port)
  983. # Generates the string containing the bash script that will
  984. # be run on the client to benchmark a single test. This
  985. # specifically works for the variable query tests (Query)
  986. ############################################################
  987. def __generate_query_script(self, url, port, accept_header):
  988. headers = self.__get_request_headers(accept_header)
  989. return self.query_template.format(max_concurrency=self.benchmarker.max_concurrency,
  990. max_threads=self.benchmarker.max_threads, name=self.name, duration=self.benchmarker.duration,
  991. interval=" ".join("{}".format(item) for item in self.benchmarker.query_intervals),
  992. server_host=self.benchmarker.server_host, port=port, url=url, headers=headers)
  993. ############################################################
  994. # End __generate_query_script
  995. ############################################################
  996. ############################################################
  997. # __get_request_headers(accept_header)
  998. # Generates the complete HTTP header string
  999. ############################################################
  1000. def __get_request_headers(self, accept_header):
  1001. return self.headers_template.format(accept=accept_header)
  1002. ############################################################
  1003. # End __format_request_headers
  1004. ############################################################
  1005. ############################################################
  1006. # __curl_url
  1007. # Dump HTTP response and headers. Throw exception if there
  1008. # is an HTTP error.
  1009. ############################################################
  1010. def __curl_url(self, url, testType, out, err):
  1011. output = None
  1012. try:
  1013. # Use -m 15 to make curl stop trying after 15sec.
  1014. # Use -i to output response with headers.
  1015. # Don't use -f so that the HTTP response code is ignored.
  1016. # Use --stderr - to redirect stderr to stdout so we get
  1017. # error output for sure in stdout.
  1018. # Use -sS to hide progress bar, but show errors.
  1019. subprocess.check_call(["curl", "-m", "15", "-i", "-sS", url], stderr=err, stdout=out)
  1020. # HTTP output may not end in a newline, so add that here.
  1021. out.write( "\n\n" )
  1022. out.flush()
  1023. err.flush()
  1024. # We need to get the respond body from the curl and return it.
  1025. p = subprocess.Popen(["curl", "-m", "15", "-s", url], stdout=subprocess.PIPE)
  1026. output = p.communicate()
  1027. except:
  1028. pass
  1029. if output:
  1030. # We have the response body - return it
  1031. return output[0]
  1032. ##############################################################
  1033. # End __curl_url
  1034. ##############################################################
  1035. def requires_database(self):
  1036. """Returns True/False if this test requires a database"""
  1037. return (self.contains_type(self.FORTUNE) or
  1038. self.contains_type(self.DB) or
  1039. self.contains_type(self.QUERY) or
  1040. self.contains_type(self.UPDATE))
  1041. ############################################################
  1042. # __begin_logging
  1043. # Starts a thread to monitor the resource usage, to be synced with the client's time
  1044. # TODO: MySQL and InnoDB are possible. Figure out how to implement them.
  1045. ############################################################
  1046. def __begin_logging(self, test_name):
  1047. output_file = "{file_name}".format(file_name=self.benchmarker.get_stats_file(self.name, test_name))
  1048. dstat_string = "dstat -afilmprsT --aio --fs --ipc --lock --raw --socket --tcp \
  1049. --raw --socket --tcp --udp --unix --vm --disk-util \
  1050. --rpc --rpcd --output {output_file}".format(output_file=output_file)
  1051. cmd = shlex.split(dstat_string)
  1052. dev_null = open(os.devnull, "w")
  1053. self.subprocess_handle = subprocess.Popen(cmd, stdout=dev_null)
  1054. ##############################################################
  1055. # End __begin_logging
  1056. ##############################################################
  1057. ##############################################################
  1058. # Begin __end_logging
  1059. # Stops the logger thread and blocks until shutdown is complete.
  1060. ##############################################################
  1061. def __end_logging(self):
  1062. self.subprocess_handle.terminate()
  1063. self.subprocess_handle.communicate()
  1064. ##############################################################
  1065. # End __end_logging
  1066. ##############################################################
  1067. ##############################################################
  1068. # Begin __parse_stats
  1069. # For each test type, process all the statistics, and return a multi-layered dictionary
  1070. # that has a structure as follows:
  1071. # (timestamp)
  1072. # | (main header) - group that the stat is in
  1073. # | | (sub header) - title of the stat
  1074. # | | | (stat) - the stat itself, usually a floating point number
  1075. ##############################################################
  1076. def __parse_stats(self, test_type, start_time, end_time, interval):
  1077. stats_dict = dict()
  1078. stats_file = self.benchmarker.stats_file(self.name, test_type)
  1079. with open(stats_file) as stats:
  1080. while(stats.next() != "\n"): # dstat doesn't output a completely compliant CSV file - we need to strip the header
  1081. pass
  1082. stats_reader = csv.reader(stats)
  1083. main_header = stats_reader.next()
  1084. sub_header = stats_reader.next()
  1085. time_row = sub_header.index("epoch")
  1086. int_counter = 0
  1087. for row in stats_reader:
  1088. time = float(row[time_row])
  1089. int_counter+=1
  1090. if time < start_time:
  1091. continue
  1092. elif time > end_time:
  1093. return stats_dict
  1094. if int_counter % interval != 0:
  1095. continue
  1096. row_dict = dict()
  1097. for nextheader in main_header:
  1098. if nextheader != "":
  1099. row_dict[nextheader] = dict()
  1100. header = ""
  1101. for item_num, column in enumerate(row):
  1102. if(len(main_header[item_num]) != 0):
  1103. header = main_header[item_num]
  1104. row_dict[header][sub_header[item_num]] = float(column) # all the stats are numbers, so we want to make sure that they stay that way in json
  1105. stats_dict[time] = row_dict
  1106. return stats_dict
  1107. ##############################################################
  1108. # End __parse_stats
  1109. ##############################################################
  1110. def __getattr__(self, name):
  1111. """For backwards compatibility, we used to pass benchmarker
  1112. as the argument to the setup.py files"""
  1113. try:
  1114. x = getattr(self.benchmarker, name)
  1115. except AttributeError:
  1116. print "AttributeError: %s not a member of FrameworkTest or Benchmarker" % name
  1117. print "This is probably a bug"
  1118. raise
  1119. return x
  1120. ##############################################################
  1121. # Begin __calculate_average_stats
  1122. # We have a large amount of raw data for the statistics that
  1123. # may be useful for the stats nerds, but most people care about
  1124. # a couple of numbers. For now, we're only going to supply:
  1125. # * Average CPU
  1126. # * Average Memory
  1127. # * Total network use
  1128. # * Total disk use
  1129. # More may be added in the future. If they are, please update
  1130. # the above list.
  1131. # Note: raw_stats is directly from the __parse_stats method.
  1132. # Recall that this consists of a dictionary of timestamps,
  1133. # each of which contain a dictionary of stat categories which
  1134. # contain a dictionary of stats
  1135. ##############################################################
  1136. def __calculate_average_stats(self, raw_stats):
  1137. raw_stat_collection = dict()
  1138. for timestamp, time_dict in raw_stats.items():
  1139. for main_header, sub_headers in time_dict.items():
  1140. item_to_append = None
  1141. if 'cpu' in main_header:
  1142. # We want to take the idl stat and subtract it from 100
  1143. # to get the time that the CPU is NOT idle.
  1144. item_to_append = sub_headers['idl'] - 100.0
  1145. elif main_header == 'memory usage':
  1146. item_to_append = sub_headers['used']
  1147. elif 'net' in main_header:
  1148. # Network stats have two parts - recieve and send. We'll use a tuple of
  1149. # style (recieve, send)
  1150. item_to_append = (sub_headers['recv'], sub_headers['send'])
  1151. elif 'dsk' or 'io' in main_header:
  1152. # Similar for network, except our tuple looks like (read, write)
  1153. item_to_append = (sub_headers['read'], sub_headers['writ'])
  1154. if item_to_append is not None:
  1155. if main_header not in raw_stat_collection:
  1156. raw_stat_collection[main_header] = list()
  1157. raw_stat_collection[main_header].append(item_to_append)
  1158. # Simple function to determine human readable size
  1159. # http://stackoverflow.com/questions/1094841/reusable-library-to-get-human-readable-version-of-file-size
  1160. def sizeof_fmt(num):
  1161. # We'll assume that any number we get is convertable to a float, just in case
  1162. num = float(num)
  1163. for x in ['bytes','KB','MB','GB']:
  1164. if num < 1024.0 and num > -1024.0:
  1165. return "%3.1f%s" % (num, x)
  1166. num /= 1024.0
  1167. return "%3.1f%s" % (num, 'TB')
  1168. # Now we have our raw stats in a readable format - we need to format it for display
  1169. # We need a floating point sum, so the built in sum doesn't cut it
  1170. display_stat_collection = dict()
  1171. for header, values in raw_stat_collection.items():
  1172. display_stat = None
  1173. if 'cpu' in header:
  1174. display_stat = sizeof_fmt(math.fsum(values) / len(values))
  1175. elif main_header == 'memory usage':
  1176. display_stat = sizeof_fmt(math.fsum(values) / len(values))
  1177. elif 'net' in main_header:
  1178. receive, send = zip(*values) # unzip
  1179. display_stat = {'receive': sizeof_fmt(math.fsum(receive)), 'send': sizeof_fmt(math.fsum(send))}
  1180. else: # if 'dsk' or 'io' in header:
  1181. read, write = zip(*values) # unzip
  1182. display_stat = {'read': sizeof_fmt(math.fsum(read)), 'write': sizeof_fmt(math.fsum(write))}
  1183. display_stat_collection[header] = display_stat
  1184. return display_stat
  1185. ###########################################################################################
  1186. # End __calculate_average_stats
  1187. #########################################################################################
  1188. ##########################################################################################
  1189. # Constructor
  1190. ##########################################################################################
  1191. def __init__(self, name, directory, benchmarker, runTests, args):
  1192. self.name = name
  1193. self.directory = directory
  1194. self.benchmarker = benchmarker
  1195. self.runTests = runTests
  1196. self.fwroot = benchmarker.fwroot
  1197. # setup logging
  1198. logging.basicConfig(stream=sys.stderr, level=logging.INFO)
  1199. self.install_root="%s/%s" % (self.fwroot, "installs")
  1200. if benchmarker.install_strategy is 'pertest':
  1201. self.install_root="%s/pertest/%s" % (self.install_root, name)
  1202. # Used in setup.py scripts for consistency with
  1203. # the bash environment variables
  1204. self.troot = self.directory
  1205. self.iroot = self.install_root
  1206. self.__dict__.update(args)
  1207. # ensure directory has __init__.py file so that we can use it as a Python package
  1208. if not os.path.exists(os.path.join(directory, "__init__.py")):
  1209. logging.warning("Please add an empty __init__.py file to directory %s", directory)
  1210. open(os.path.join(directory, "__init__.py"), 'w').close()
  1211. # Import the module (TODO - consider using sys.meta_path)
  1212. # Note: You can see the log output if you really want to, but it's a *ton*
  1213. dir_rel_to_fwroot = os.path.relpath(os.path.dirname(directory), self.fwroot)
  1214. if dir_rel_to_fwroot != ".":
  1215. sys.path.append("%s/%s" % (self.fwroot, dir_rel_to_fwroot))
  1216. logging.log(0, "Adding %s to import %s.%s", dir_rel_to_fwroot, os.path.basename(directory), self.setup_file)
  1217. self.setup_module = setup_module = importlib.import_module(os.path.basename(directory) + '.' + self.setup_file)
  1218. sys.path.remove("%s/%s" % (self.fwroot, dir_rel_to_fwroot))
  1219. else:
  1220. logging.log(0, "Importing %s.%s", directory, self.setup_file)
  1221. self.setup_module = setup_module = importlib.import_module(os.path.basename(directory) + '.' + self.setup_file)
  1222. ############################################################
  1223. # End __init__
  1224. ############################################################
  1225. ############################################################
  1226. # End FrameworkTest
  1227. ############################################################
  1228. ##########################################################################################
  1229. # Static methods
  1230. ##########################################################################################
  1231. ##############################################################
  1232. # parse_config(config, directory, benchmarker)
  1233. # parses a config file and returns a list of FrameworkTest
  1234. # objects based on that config file.
  1235. ##############################################################
  1236. def parse_config(config, directory, benchmarker):
  1237. tests = []
  1238. # The config object can specify multiple tests
  1239. # Loop over them and parse each into a FrameworkTest
  1240. for test in config['tests']:
  1241. for test_name, test_keys in test.iteritems():
  1242. # Prefix all test names with framework except 'default' test
  1243. if test_name == 'default':
  1244. test_name = config['framework']
  1245. else:
  1246. test_name = "%s-%s" % (config['framework'], test_name)
  1247. # Ensure FrameworkTest.framework is available
  1248. if not test_keys['framework']:
  1249. test_keys['framework'] = config['framework']
  1250. #if test_keys['framework'].lower() != config['framework'].lower():
  1251. # print Exception("benchmark_config for test %s is invalid - test framework '%s' must match benchmark_config framework '%s'" %
  1252. # (test_name, test_keys['framework'], config['framework']))
  1253. # Confirm required keys are present
  1254. # TODO have a TechEmpower person confirm this list - I don't know what the website requires....
  1255. required = ['language','webserver','classification','database','approach','orm','framework','os','database_os']
  1256. if not all (key in test_keys for key in required):
  1257. raise Exception("benchmark_config for test %s is invalid - missing required keys" % test_name)
  1258. # Map test type to either boolean False (e.g. don't run)
  1259. # or to a list of strings containing all the arguments
  1260. # needed by this test type
  1261. runTests = dict()
  1262. for test_type in benchmarker.types:
  1263. # Ensure all arguments required for this test are present
  1264. if all (arg in test_keys for arg in benchmarker.type_args[test_type]):
  1265. runTests[test_type] = [ test_keys[arg] for arg in benchmarker.type_args[test_type]]
  1266. else:
  1267. runTests[test_type] = False
  1268. # By passing the entire set of keys, each FrameworkTest will have a member for each key
  1269. tests.append(FrameworkTest(test_name, directory, benchmarker, runTests, test_keys))
  1270. return tests
  1271. ##############################################################
  1272. # End parse_config
  1273. ##############################################################