framework_test.py 59 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410
  1. from benchmark.fortune_html_parser import FortuneHTMLParser
  2. from setup.linux import setup_util
  3. from benchmark.test_types.framework_test_type import *
  4. import importlib
  5. import os
  6. import subprocess
  7. import time
  8. import re
  9. from pprint import pprint
  10. import sys
  11. import traceback
  12. import json
  13. import logging
  14. import csv
  15. import shlex
  16. import math
  17. from threading import Thread
  18. from threading import Event
  19. from utils import header
  20. class FrameworkTest:
  21. headers_template = "-H 'Host: localhost' -H '{accept}' -H 'Connection: keep-alive'"
  22. # Used for test types that do not require a database -
  23. # These tests are run at multiple concurrency levels
  24. concurrency_template = """
  25. echo ""
  26. echo "---------------------------------------------------------"
  27. echo " Running Primer {name}"
  28. echo " {wrk} {headers} -d 5 -c 8 --timeout 8 -t 8 \"http://{server_host}:{port}{url}\""
  29. echo "---------------------------------------------------------"
  30. echo ""
  31. {wrk} {headers} -d 5 -c 8 --timeout 8 -t 8 "http://{server_host}:{port}{url}"
  32. sleep 5
  33. echo ""
  34. echo "---------------------------------------------------------"
  35. echo " Running Warmup {name}"
  36. echo " {wrk} {headers} -d {duration} -c {max_concurrency} --timeout {max_concurrency} -t {max_threads} \"http://{server_host}:{port}{url}\""
  37. echo "---------------------------------------------------------"
  38. echo ""
  39. {wrk} {headers} -d {duration} -c {max_concurrency} --timeout {max_concurrency} -t {max_threads} "http://{server_host}:{port}{url}"
  40. sleep 5
  41. echo ""
  42. echo "---------------------------------------------------------"
  43. echo " Synchronizing time"
  44. echo "---------------------------------------------------------"
  45. echo ""
  46. ntpdate -s pool.ntp.org
  47. for c in {interval}
  48. do
  49. echo ""
  50. echo "---------------------------------------------------------"
  51. echo " Concurrency: $c for {name}"
  52. echo " {wrk} {headers} -d {duration} -c $c --timeout $c -t $(($c>{max_threads}?{max_threads}:$c)) \"http://{server_host}:{port}{url}\" -s ~/pipeline.lua -- {pipeline}"
  53. echo "---------------------------------------------------------"
  54. echo ""
  55. STARTTIME=$(date +"%s")
  56. {wrk} {headers} -d {duration} -c $c --timeout $c -t "$(($c>{max_threads}?{max_threads}:$c))" http://{server_host}:{port}{url} -s ~/pipeline.lua -- {pipeline}
  57. echo "STARTTIME $STARTTIME"
  58. echo "ENDTIME $(date +"%s")"
  59. sleep 2
  60. done
  61. """
  62. # Used for test types that require a database -
  63. # These tests run at a static concurrency level and vary the size of
  64. # the query sent with each request
  65. query_template = """
  66. echo ""
  67. echo "---------------------------------------------------------"
  68. echo " Running Primer {name}"
  69. echo " wrk {headers} -d 5 -c 8 --timeout 8 -t 8 \"http://{server_host}:{port}{url}2\""
  70. echo "---------------------------------------------------------"
  71. echo ""
  72. wrk {headers} -d 5 -c 8 --timeout 8 -t 8 "http://{server_host}:{port}{url}2"
  73. sleep 5
  74. echo ""
  75. echo "---------------------------------------------------------"
  76. echo " Running Warmup {name}"
  77. echo " wrk {headers} -d {duration} -c {max_concurrency} --timeout {max_concurrency} -t {max_threads} \"http://{server_host}:{port}{url}2\""
  78. echo "---------------------------------------------------------"
  79. echo ""
  80. wrk {headers} -d {duration} -c {max_concurrency} --timeout {max_concurrency} -t {max_threads} "http://{server_host}:{port}{url}2"
  81. sleep 5
  82. echo ""
  83. echo "---------------------------------------------------------"
  84. echo " Synchronizing time"
  85. echo "---------------------------------------------------------"
  86. echo ""
  87. ntpdate -s pool.ntp.org
  88. for c in {interval}
  89. do
  90. echo ""
  91. echo "---------------------------------------------------------"
  92. echo " Queries: $c for {name}"
  93. echo " wrk {headers} -d {duration} -c {max_concurrency} --timeout {max_concurrency} -t {max_threads} \"http://{server_host}:{port}{url}$c\""
  94. echo "---------------------------------------------------------"
  95. echo ""
  96. STARTTIME=$(date +"%s")
  97. wrk {headers} -d {duration} -c {max_concurrency} --timeout {max_concurrency} -t {max_threads} "http://{server_host}:{port}{url}$c"
  98. echo "STARTTIME $STARTTIME"
  99. echo "ENDTIME $(date +"%s")"
  100. sleep 2
  101. done
  102. """
  103. ############################################################
  104. # Test Variables
  105. ############################################################
  106. JSON = "json"
  107. DB = "db"
  108. QUERY = "query"
  109. FORTUNE = "fortune"
  110. UPDATE = "update"
  111. PLAINTEXT = "plaintext"
  112. ##########################################################################################
  113. # Public Methods
  114. ##########################################################################################
  115. ############################################################
  116. # Validates the jsonString is a JSON object that has an "id"
  117. # and a "randomNumber" key, and that both keys map to
  118. # integers.
  119. ############################################################
  120. def validateDb(self, jsonString, out, err):
  121. err_str = ""
  122. if jsonString is None or len(jsonString) == 0:
  123. err_str += "Empty Response"
  124. return (False, err_str)
  125. try:
  126. obj = {k.lower(): v for k,v in json.loads(jsonString).iteritems()}
  127. # We are allowing the single-object array for the DB
  128. # test for now, but will likely remove this later.
  129. if type(obj) == list:
  130. obj = obj[0]
  131. if "id" not in obj or "randomnumber" not in obj:
  132. err_str += "Expected keys id and randomNumber to be in JSON string. "
  133. return (False, err_str)
  134. # This will error out of the value could not parsed to a
  135. # float (this will work with ints, but it will turn them
  136. # into their float equivalent; i.e. "123" => 123.0)
  137. id_ret_val = True
  138. try:
  139. if not isinstance(float(obj["id"]), float):
  140. id_ret_val=False
  141. except:
  142. id_ret_val=False
  143. if not id_ret_val:
  144. err_str += "Expected id to be type int or float, got '{rand}' ".format(rand=obj["randomnumber"])
  145. random_num_ret_val = True
  146. try:
  147. if not isinstance(float(obj["randomnumber"]), float):
  148. random_num_ret_val=False
  149. except:
  150. random_num_ret_val=False
  151. if not random_num_ret_val:
  152. err_str += "Expected id to be type int or float, got '{rand}' ".format(rand=obj["randomnumber"])
  153. except:
  154. err_str += "Got exception when trying to validate the db test: {exception}".format(exception=traceback.format_exc())
  155. return (True, ) if len(err_str) == 0 else (False, err_str)
  156. def validateDbStrict(self, jsonString, out, err):
  157. err_str = ""
  158. if jsonString is None or len(jsonString) == 0:
  159. err_str += "Empty Response "
  160. return (False, err_str)
  161. try:
  162. obj = {k.lower(): v for k,v in json.loads(jsonString).iteritems()}
  163. # This will error out of the value could not parsed to a
  164. # float (this will work with ints, but it will turn them
  165. # into their float equivalent; i.e. "123" => 123.0)
  166. id_ret_val = True
  167. try:
  168. if not isinstance(float(obj["id"]), float):
  169. id_ret_val=False
  170. except:
  171. id_ret_val=False
  172. if not id_ret_val:
  173. err_str += "Expected id to be type int or float, got '{rand}' ".format(rand=obj["randomnumber"])
  174. random_num_ret_val = True
  175. try:
  176. if not isinstance(float(obj["randomnumber"]), float):
  177. random_num_ret_val=False
  178. except:
  179. random_num_ret_val=False
  180. if not random_num_ret_val:
  181. err_str += "Expected id to be type int or float, got '{rand}' ".format(rand=obj["randomnumber"])
  182. return id_ret_val and random_num_ret_val
  183. except:
  184. err_str += "Got exception when trying to validate the db test: {exception}".format(exception=traceback.format_exc())
  185. return (True, ) if len(err_str) == 0 else (False, err_str)
  186. ############################################################
  187. # Validates the jsonString is an array with a length of
  188. # 2, that each entry in the array is a JSON object, that
  189. # each object has an "id" and a "randomNumber" key, and that
  190. # both keys map to integers.
  191. ############################################################
  192. def validateQuery(self, jsonString, out, err):
  193. err_str = ""
  194. if jsonString is None or len(jsonString) == 0:
  195. err_str += "Empty Response"
  196. return (False, err_str)
  197. try:
  198. arr = [{k.lower(): v for k,v in d.iteritems()} for d in json.loads(jsonString)]
  199. if len(arr) != 2:
  200. err_str += "Expected array of length 2. Got length {length}. ".format(length=len(arr))
  201. for obj in arr:
  202. id_ret_val = True
  203. random_num_ret_val = True
  204. if "id" not in obj or "randomnumber" not in obj:
  205. err_str += "Expected keys id and randomNumber to be in JSON string. "
  206. break
  207. try:
  208. if not isinstance(float(obj["id"]), float):
  209. id_ret_val=False
  210. except:
  211. id_ret_val=False
  212. if not id_ret_val:
  213. err_str += "Expected id to be type int or float, got '{rand}' ".format(rand=obj["randomnumber"])
  214. try:
  215. if not isinstance(float(obj["randomnumber"]), float):
  216. random_num_ret_val=False
  217. except:
  218. random_num_ret_val=False
  219. if not random_num_ret_val:
  220. err_str += "Expected randomNumber to be type int or float, got '{rand}' ".format(rand=obj["randomnumber"])
  221. except:
  222. err_str += "Got exception when trying to validate the query test: {exception}".format(exception=traceback.format_exc())
  223. return (True, ) if len(err_str) == 0 else (False, err_str)
  224. ############################################################
  225. # Validates the jsonString is an array with a length of
  226. # 1, that each entry in the array is a JSON object, that
  227. # each object has an "id" and a "randomNumber" key, and that
  228. # both keys map to integers.
  229. ############################################################
  230. def validateQueryOneOrLess(self, jsonString, out, err):
  231. err_str = ""
  232. if jsonString is None or len(jsonString) == 0:
  233. err_str += "Empty Response"
  234. else:
  235. try:
  236. json_load = json.loads(jsonString)
  237. if not isinstance(json_load, list):
  238. err_str += "Expected JSON array, got {typeObj}. ".format(typeObj=type(json_load))
  239. if len(json_load) != 1:
  240. err_str += "Expected array of length 1. Got length {length}. ".format(length=len(json_load))
  241. obj = {k.lower(): v for k,v in json_load[0].iteritems()}
  242. id_ret_val = True
  243. random_num_ret_val = True
  244. if "id" not in obj or "randomnumber" not in obj:
  245. err_str += "Expected keys id and randomNumber to be in JSON string. "
  246. try:
  247. if not isinstance(float(obj["id"]), float):
  248. id_ret_val=False
  249. except:
  250. id_ret_val=False
  251. if not id_ret_val:
  252. err_str += "Expected id to be type int or float, got '{rand}'. ".format(rand=obj["randomnumber"])
  253. try:
  254. if not isinstance(float(obj["randomnumber"]), float):
  255. random_num_ret_val=False
  256. except:
  257. random_num_ret_val=False
  258. if not random_num_ret_val:
  259. err_str += "Expected randomNumber to be type int or float, got '{rand}'. ".format(rand=obj["randomnumber"])
  260. except:
  261. err_str += "Got exception when trying to validate the query test: {exception} ".format(exception=traceback.format_exc())
  262. return (True, ) if len(err_str) == 0 else (False, err_str)
  263. ############################################################
  264. # Validates the jsonString is an array with a length of
  265. # 500, that each entry in the array is a JSON object, that
  266. # each object has an "id" and a "randomNumber" key, and that
  267. # both keys map to integers.
  268. ############################################################
  269. def validateQueryFiveHundredOrMore(self, jsonString, out, err):
  270. err_str = ""
  271. if jsonString is None or len(jsonString) == 0:
  272. err_str += "Empty Response"
  273. return (False, err_str)
  274. try:
  275. arr = [{k.lower(): v for k,v in d.iteritems()} for d in json.loads(jsonString)]
  276. if len(arr) != 500:
  277. err_str += "Expected array of length 500. Got length {length}. ".format(length=len(arr))
  278. return (False, err_str)
  279. for obj in arr:
  280. id_ret_val = True
  281. random_num_ret_val = True
  282. if "id" not in obj or "randomnumber" not in obj:
  283. err_str += "Expected keys id and randomNumber to be in JSON string. "
  284. break
  285. try:
  286. if not isinstance(float(obj["id"]), float):
  287. id_ret_val=False
  288. except:
  289. id_ret_val=False
  290. if not id_ret_val:
  291. err_str += "Expected id to be type int or float, got '{rand}'. ".format(rand=obj["randomnumber"])
  292. try:
  293. if not isinstance(float(obj["randomnumber"]), float):
  294. random_num_ret_val=False
  295. except:
  296. random_num_ret_val=False
  297. if not random_num_ret_val:
  298. err_str += "Expected randomNumber to be type int or float, got '{rand}'. ".format(rand=obj["randomnumber"])
  299. except:
  300. err_str += "Got exception when trying to validate the query test: {exception} ".format(exception=traceback.format_exc())
  301. return (True, ) if len(err_str) == 0 else (False, err_str)
  302. ############################################################
  303. # Parses the given HTML string and asks a FortuneHTMLParser
  304. # whether the parsed string is a valid fortune return.
  305. ############################################################
  306. def validateFortune(self, htmlString, out, err):
  307. err_str = ""
  308. if htmlString is None or len(htmlString) == 0:
  309. err_str += "Empty Response"
  310. return (False, err_str)
  311. try:
  312. parser = FortuneHTMLParser()
  313. parser.feed(htmlString)
  314. valid = parser.isValidFortune(out)
  315. return (valid, '' if valid else 'Did not pass validation')
  316. except:
  317. print "Got exception when trying to validate the fortune test: {exception} ".format(exception=traceback.format_exc())
  318. return (False, err_str)
  319. ############################################################
  320. # Validates the jsonString is an array with a length of
  321. # 2, that each entry in the array is a JSON object, that
  322. # each object has an "id" and a "randomNumber" key, and that
  323. # both keys map to integers.
  324. ############################################################
  325. def validateUpdate(self, jsonString, out, err):
  326. err_str = ""
  327. if jsonString is None or len(jsonString) == 0:
  328. err_str += "Empty Response"
  329. return (False, err_str)
  330. try:
  331. arr = [{k.lower(): v for k,v in d.iteritems()} for d in json.loads(jsonString)]
  332. if len(arr) != 2:
  333. err_str += "Expected array of length 2. Got length {length}.\n".format(length=len(arr))
  334. for obj in arr:
  335. id_ret_val = True
  336. random_num_ret_val = True
  337. if "id" not in obj or "randomnumber" not in obj:
  338. err_str += "Expected keys id and randomNumber to be in JSON string.\n"
  339. return (False, err_str)
  340. try:
  341. if not isinstance(float(obj["id"]), float):
  342. id_ret_val=False
  343. except:
  344. id_ret_val=False
  345. if not id_ret_val:
  346. err_str += "Expected id to be type int or float, got '{rand}'.\n".format(rand=obj["randomnumber"])
  347. try:
  348. if not isinstance(float(obj["randomnumber"]), float):
  349. random_num_ret_val=False
  350. except:
  351. random_num_ret_val=False
  352. if not random_num_ret_val:
  353. err_str += "Expected randomNumber to be type int or float, got '{rand}'.\n".format(rand=obj["randomnumber"])
  354. except:
  355. err_str += "Got exception when trying to validate the update test: {exception}\n".format(exception=traceback.format_exc())
  356. return (True, ) if len(err_str) == 0 else (False, err_str)
  357. ############################################################
  358. #
  359. ############################################################
  360. def validatePlaintext(self, jsonString, out, err):
  361. err_str = ""
  362. if jsonString is None or len(jsonString) == 0:
  363. err_str += "Empty Response"
  364. return (False, err_str)
  365. try:
  366. if not jsonString.lower().strip() == "hello, world!":
  367. err_str += "Expected 'Hello, World!', got '{message}'.\n".format(message=jsonString.strip())
  368. except:
  369. err_str += "Got exception when trying to validate the plaintext test: {exception}\n".format(exception=traceback.format_exc())
  370. return (True, ) if len(err_str) == 0 else (False, err_str)
  371. ############################################################
  372. # start(benchmarker)
  373. # Start the test using it's setup file
  374. ############################################################
  375. def start(self, out, err):
  376. # Load profile for this installation
  377. profile="%s/bash_profile.sh" % self.directory
  378. if not os.path.exists(profile):
  379. logging.warning("Directory %s does not have a bash_profile.sh" % self.directory)
  380. profile="$FWROOT/config/benchmark_profile"
  381. # Setup variables for TROOT and IROOT
  382. setup_util.replace_environ(config=profile,
  383. command='export TROOT=%s && export IROOT=%s' %
  384. (self.directory, self.install_root))
  385. # Because start can take so long, we print a dot to let the user know
  386. # we are working
  387. class ProgressPrinterThread(Thread):
  388. def __init__(self, event):
  389. Thread.__init__(self)
  390. self.stopped = event
  391. def run(self):
  392. while not self.stopped.wait(20):
  393. sys.stderr.write("Waiting for start to return...\n")
  394. stopFlag = Event()
  395. thread = ProgressPrinterThread(stopFlag)
  396. thread.start()
  397. # Run the module start (inside parent of TROOT)
  398. # - we use the parent as a historical accident - a lot of tests
  399. # use subprocess's cwd argument already
  400. previousDir = os.getcwd()
  401. os.chdir(os.path.dirname(self.troot))
  402. logging.info("Running setup module start (cwd=%s)", os.path.dirname(self.troot))
  403. try:
  404. retcode = self.setup_module.start(self, out, err)
  405. if retcode == None:
  406. retcode = 0
  407. except Exception:
  408. retcode = 1
  409. st = traceback.format_exc()
  410. st = '\n'.join((4 * ' ') + x for x in st.splitlines())
  411. st = "Start exception:\n%s" % st
  412. logging.info(st)
  413. err.write(st + '\n')
  414. os.chdir(previousDir)
  415. # Stop the progress printer
  416. stopFlag.set()
  417. logging.info("Start completed, running %s", self.benchmarker.mode)
  418. return retcode
  419. ############################################################
  420. # End start
  421. ############################################################
  422. ############################################################
  423. # stop(benchmarker)
  424. # Stops the test using it's setup file
  425. ############################################################
  426. def stop(self, out, err):
  427. # Load profile for this installation
  428. profile="%s/bash_profile.sh" % self.directory
  429. if not os.path.exists(profile):
  430. logging.warning("Directory %s does not have a bash_profile.sh" % self.directory)
  431. profile="$FWROOT/config/benchmark_profile"
  432. setup_util.replace_environ(config=profile,
  433. command='export TROOT=%s && export IROOT=%s' %
  434. (self.directory, self.install_root))
  435. # Run the module stop (inside parent of TROOT)
  436. # - we use the parent as a historical accident - a lot of tests
  437. # use subprocess's cwd argument already
  438. previousDir = os.getcwd()
  439. os.chdir(os.path.dirname(self.troot))
  440. logging.info("Running setup module stop (cwd=%s)", os.path.dirname(self.troot))
  441. try:
  442. retcode = self.setup_module.stop(out, err)
  443. if retcode == None:
  444. retcode = 0
  445. except Exception:
  446. retcode = 1
  447. st = traceback.format_exc()
  448. st = '\n'.join((4 * ' ') + x for x in st.splitlines())
  449. st = "Stop exception:\n%s\n" % st
  450. logging.info(st)
  451. err.write(st + '\n')
  452. os.chdir(previousDir)
  453. # Give processes sent a SIGTERM a moment to shut down gracefully
  454. time.sleep(5)
  455. return retcode
  456. ############################################################
  457. # End stop
  458. ############################################################
  459. ############################################################
  460. # verify_urls
  461. # Verifys each of the URLs for this test. THis will sinply
  462. # curl the URL and check for it's return status.
  463. # For each url, a flag will be set on this object for whether
  464. # or not it passed
  465. # Returns True if all verifications succeeded
  466. ############################################################
  467. def verify_urls(self, out, err):
  468. result = True
  469. def verify_type(test_type):
  470. test = self.runTests[test_type]
  471. out.write(header("VERIFYING %s" % test_type.upper()))
  472. base_url = "http://%s:%s" % (self.benchmarker.server_host, self.port)
  473. results = test.verify(base_url)
  474. test.failed = any(result is 'fail' for (result, reason, url) in results)
  475. test.warned = any(result is 'warn' for (result, reason, url) in results)
  476. test.passed = all(result is 'pass' for (result, reason, url) in results)
  477. def output_result(result, reason, url):
  478. out.write(" %s for %s\n" % (result.upper(), url))
  479. print " %s for %s" % (result.upper(), url)
  480. if reason is not None and len(reason) != 0:
  481. for line in reason.splitlines():
  482. out.write(" " + line + '\n')
  483. print " " + line
  484. [output_result(r1,r2,url) for (r1, r2, url) in results]
  485. if test.failed:
  486. self.benchmarker.report_verify_results(self, test_type, 'fail')
  487. elif test.warned:
  488. self.benchmarker.report_verify_results(self, test_type, 'warn')
  489. elif test.passed:
  490. self.benchmarker.report_verify_results(self, test_type, 'pass')
  491. else:
  492. raise Exception("What the hell")
  493. # JSON
  494. if self.runTests[self.JSON]:
  495. out.write(header("VERIFYING JSON (%s)" % self.json_url))
  496. out.flush()
  497. url = self.benchmarker.generate_url(self.json_url, self.port)
  498. output = self.__curl_url(url, self.JSON, out, err)
  499. out.write("VALIDATING JSON ... ")
  500. ret_tuple = self.validateJson(output, out, err)
  501. if ret_tuple[0]:
  502. self.json_url_passed = True
  503. out.write("PASS\n\n")
  504. self.benchmarker.report_verify_results(self, self.JSON, 'pass')
  505. else:
  506. self.json_url_passed = False
  507. out.write("\nFAIL" + ret_tuple[1] + "\n\n")
  508. self.benchmarker.report_verify_results(self, self.JSON, 'fail')
  509. result = False
  510. out.flush()
  511. # DB
  512. if self.runTests[self.DB]:
  513. out.write(header("VERIFYING DB (%s)" % self.db_url))
  514. out.flush()
  515. url = self.benchmarker.generate_url(self.db_url, self.port)
  516. output = self.__curl_url(url, self.DB, out, err)
  517. validate_ret_tuple = self.validateDb(output, out, err)
  518. validate_strict_ret_tuple = self.validateDbStrict(output, out, err)
  519. if validate_ret_tuple[0]:
  520. self.db_url_passed = True
  521. else:
  522. self.db_url_passed = False
  523. if validate_strict_ret_tuple:
  524. self.db_url_warn = False
  525. else:
  526. self.db_url_warn = True
  527. out.write("VALIDATING DB ... ")
  528. if self.db_url_passed:
  529. out.write("PASS")
  530. self.benchmarker.report_verify_results(self, self.DB, 'pass')
  531. if self.db_url_warn:
  532. out.write(" (with warnings) " + validate_strict_ret_tuple[1])
  533. self.benchmarker.report_verify_results(self, self.DB, 'warn')
  534. out.write("\n\n")
  535. else:
  536. self.benchmarker.report_verify_results(self, self.DB, 'fail')
  537. out.write("\nFAIL" + validate_ret_tuple[1])
  538. result = False
  539. out.flush()
  540. # Query
  541. if self.runTests[self.QUERY]:
  542. out.write(header("VERIFYING QUERY (%s)" % self.query_url+"2"))
  543. out.flush()
  544. url = self.benchmarker.generate_url(self.query_url + "2", self.port)
  545. output = self.__curl_url(url, self.QUERY, out, err)
  546. ret_tuple = self.validateQuery(output, out, err)
  547. if ret_tuple[0]:
  548. self.query_url_passed = True
  549. out.write(self.query_url + "2 - PASS\n\n")
  550. else:
  551. self.query_url_passed = False
  552. out.write(self.query_url + "2 - FAIL " + ret_tuple[1] + "\n\n")
  553. out.write("-----------------------------------------------------\n\n")
  554. out.flush()
  555. self.query_url_warn = False
  556. url2 = self.benchmarker.generate_url(self.query_url + "0", self.port)
  557. output2 = self.__curl_url(url2, self.QUERY, out, err)
  558. ret_tuple = self.validateQueryOneOrLess(output2, out, err)
  559. if not ret_tuple[0]:
  560. self.query_url_warn = True
  561. out.write(self.query_url + "0 - WARNING " + ret_tuple[1] + "\n\n")
  562. else:
  563. out.write(self.query_url + "0 - PASS\n\n")
  564. out.write("-----------------------------------------------------\n\n")
  565. out.flush()
  566. url3 = self.benchmarker.generate_url(self.query_url + "foo", self.port)
  567. output3 = self.__curl_url(url3, self.QUERY, out, err)
  568. ret_tuple = self.validateQueryOneOrLess(output3, out, err)
  569. if not ret_tuple[0]:
  570. self.query_url_warn = True
  571. out.write(self.query_url + "foo - WARNING " + ret_tuple[1] + "\n\n")
  572. else:
  573. out.write(self.query_url + "foo - PASS\n\n")
  574. out.write("-----------------------------------------------------\n\n")
  575. out.flush()
  576. url4 = self.benchmarker.generate_url(self.query_url + "501", self.port)
  577. output4 = self.__curl_url(url4, self.QUERY, out, err)
  578. ret_tuple = self.validateQueryFiveHundredOrMore(output4, out, err)
  579. if not ret_tuple[0]:
  580. self.query_url_warn = True
  581. out.write(self.query_url + "501 - WARNING " + ret_tuple[1] + "\n\n")
  582. else:
  583. out.write(self.query_url + "501 - PASS\n\n")
  584. out.write("-----------------------------------------------------\n\n\n")
  585. out.flush()
  586. out.write("VALIDATING QUERY ... ")
  587. if self.query_url_passed:
  588. out.write("PASS")
  589. self.benchmarker.report_verify_results(self, self.QUERY, 'pass')
  590. if self.query_url_warn:
  591. out.write(" (with warnings)")
  592. self.benchmarker.report_verify_results(self, self.QUERY, 'warn')
  593. out.write("\n\n")
  594. else:
  595. out.write("\nFAIL " + ret_tuple[1] + "\n\n")
  596. self.benchmarker.report_verify_results(self, self.QUERY, 'fail')
  597. result = False
  598. out.flush()
  599. # Fortune
  600. if self.runTests[self.FORTUNE]:
  601. out.write(header("VERIFYING FORTUNE (%s)" % self.fortune_url))
  602. out.flush()
  603. url = self.benchmarker.generate_url(self.fortune_url, self.port)
  604. output = self.__curl_url(url, self.FORTUNE, out, err)
  605. out.write("VALIDATING FORTUNE ... ")
  606. ret_tuple = self.validateFortune(output, out, err)
  607. if ret_tuple[0]:
  608. self.fortune_url_passed = True
  609. out.write("PASS\n\n")
  610. self.benchmarker.report_verify_results(self, self.FORTUNE, 'pass')
  611. else:
  612. self.fortune_url_passed = False
  613. out.write("\nFAIL " + ret_tuple[1] + "\n\n")
  614. self.benchmarker.report_verify_results(self, self.FORTUNE, 'fail')
  615. result = False
  616. out.flush()
  617. # Update
  618. if self.runTests[self.UPDATE]:
  619. out.write(header("VERIFYING UPDATE (%s)" % self.update_url))
  620. out.flush()
  621. url = self.benchmarker.generate_url(self.update_url + "2", self.port)
  622. output = self.__curl_url(url, self.UPDATE, out, err)
  623. out.write("VALIDATING UPDATE ... ")
  624. ret_tuple = self.validateUpdate(output, out, err)
  625. if ret_tuple[0]:
  626. self.update_url_passed = True
  627. out.write("PASS\n\n")
  628. self.benchmarker.report_verify_results(self, self.UPDATE, 'pass')
  629. else:
  630. self.update_url_passed = False
  631. out.write("\nFAIL " + ret_tuple[1] + "\n\n")
  632. self.benchmarker.report_verify_results(self, self.UPDATE, 'fail')
  633. result = False
  634. out.flush()
  635. # plaintext
  636. if self.runTests[self.PLAINTEXT]:
  637. out.write(header("VERIFYING PLAINTEXT (%s)" % self.plaintext_url))
  638. out.flush()
  639. url = self.benchmarker.generate_url(self.plaintext_url, self.port)
  640. output = self.__curl_url(url, self.PLAINTEXT, out, err)
  641. out.write("VALIDATING PLAINTEXT ... ")
  642. ret_tuple = self.validatePlaintext(output, out, err)
  643. if ret_tuple[0]:
  644. self.plaintext_url_passed = True
  645. out.write("PASS\n\n")
  646. self.benchmarker.report_verify_results(self, self.PLAINTEXT, 'pass')
  647. else:
  648. self.plaintext_url_passed = False
  649. out.write("\nFAIL\n\n" + ret_tuple[1] + "\n\n")
  650. self.benchmarker.report_verify_results(self, self.PLAINTEXT, 'fail')
  651. result = False
  652. out.flush()
  653. return result
  654. ############################################################
  655. # End verify_urls
  656. ############################################################
  657. ############################################################
  658. # benchmark
  659. # Runs the benchmark for each type of test that it implements
  660. # JSON/DB/Query.
  661. ############################################################
  662. def benchmark(self, out, err):
  663. def benchmark_type(test_type):
  664. out.write("BENCHMARKING %s ... " % test_type.upper())
  665. test = self.runTests[test_type]
  666. output_file = self.benchmarker.output_file(self.name, test_type)
  667. if not os.path.exists(output_file):
  668. # Open to create the empty file
  669. with open(output_file, 'w'):
  670. pass
  671. if test.passed:
  672. if test.requires_db:
  673. remote_script = self.__generate_query_script(test.get_url(), self.port, test.accept_header)
  674. else:
  675. remote_script = self.__generate_concurrency_script(test.get_url(), self.port, test.accept_header)
  676. self.__begin_logging(test_type)
  677. self.__run_benchmark(remote_script, output_file, err)
  678. self.__end_logging()
  679. results = self.__parse_test(test_type)
  680. print "Benchmark results:"
  681. pprint(results)
  682. self.benchmarker.report_benchmark_results(framework=self, test=test_type, results=results['results'])
  683. out.write( "Complete\n" )
  684. out.flush()
  685. # JSON
  686. if self.runTests[self.JSON]:
  687. try:
  688. out.write("BENCHMARKING JSON ... ")
  689. out.flush()
  690. results = None
  691. output_file = self.benchmarker.output_file(self.name, self.JSON)
  692. if not os.path.exists(output_file):
  693. with open(output_file, 'w'):
  694. # Simply opening the file in write mode should create the empty file.
  695. pass
  696. if self.json_url_passed:
  697. remote_script = self.__generate_concurrency_script(self.json_url, self.port, self.accept_json)
  698. self.__begin_logging(self.JSON)
  699. self.__run_benchmark(remote_script, output_file, err)
  700. self.__end_logging()
  701. results = self.__parse_test(self.JSON)
  702. print results
  703. self.benchmarker.report_benchmark_results(framework=self, test=self.JSON, results=results['results'])
  704. out.write( "Complete\n" )
  705. out.flush()
  706. except AttributeError:
  707. pass
  708. # DB
  709. if self.runTests[self.DB]:
  710. try:
  711. out.write("BENCHMARKING DB ... ")
  712. out.flush()
  713. results = None
  714. output_file = self.benchmarker.output_file(self.name, self.DB)
  715. if not os.path.exists(output_file):
  716. with open(output_file, 'w'):
  717. # Simply opening the file in write mode should create the empty file.
  718. pass
  719. if self.db_url_passed:
  720. self.benchmarker.report_verify_results(self, self.DB, 'pass')
  721. remote_script = self.__generate_concurrency_script(self.db_url, self.port, self.accept_json)
  722. self.__begin_logging(self.DB)
  723. self.__run_benchmark(remote_script, output_file, err)
  724. self.__end_logging()
  725. results = self.__parse_test(self.DB)
  726. self.benchmarker.report_benchmark_results(framework=self, test=self.DB, results=results['results'])
  727. out.write( "Complete\n" )
  728. except AttributeError:
  729. pass
  730. # Query
  731. if self.runTests[self.QUERY]:
  732. try:
  733. out.write("BENCHMARKING Query ... ")
  734. out.flush()
  735. results = None
  736. output_file = self.benchmarker.output_file(self.name, self.QUERY)
  737. if not os.path.exists(output_file):
  738. with open(output_file, 'w'):
  739. # Simply opening the file in write mode should create the empty file.
  740. pass
  741. if self.query_url_passed:
  742. remote_script = self.__generate_query_script(self.query_url, self.port, self.accept_json)
  743. self.__begin_logging(self.QUERY)
  744. self.__run_benchmark(remote_script, output_file, err)
  745. self.__end_logging()
  746. results = self.__parse_test(self.QUERY)
  747. self.benchmarker.report_benchmark_results(framework=self, test=self.QUERY, results=results['results'])
  748. out.write( "Complete\n" )
  749. out.flush()
  750. except AttributeError:
  751. pass
  752. # fortune
  753. if self.runTests[self.FORTUNE]:
  754. try:
  755. out.write("BENCHMARKING Fortune ... ")
  756. out.flush()
  757. results = None
  758. output_file = self.benchmarker.output_file(self.name, self.FORTUNE)
  759. if not os.path.exists(output_file):
  760. with open(output_file, 'w'):
  761. # Simply opening the file in write mode should create the empty file.
  762. pass
  763. if self.fortune_url_passed:
  764. remote_script = self.__generate_concurrency_script(self.fortune_url, self.port, self.accept_html)
  765. self.__begin_logging(self.FORTUNE)
  766. self.__run_benchmark(remote_script, output_file, err)
  767. self.__end_logging()
  768. results = self.__parse_test(self.FORTUNE)
  769. self.benchmarker.report_benchmark_results(framework=self, test=self.FORTUNE, results=results['results'])
  770. out.write( "Complete\n" )
  771. out.flush()
  772. except AttributeError:
  773. pass
  774. # update
  775. if self.runTests[self.UPDATE]:
  776. try:
  777. out.write("BENCHMARKING Update ... ")
  778. out.flush()
  779. results = None
  780. output_file = self.benchmarker.output_file(self.name, self.UPDATE)
  781. if not os.path.exists(output_file):
  782. with open(output_file, 'w'):
  783. # Simply opening the file in write mode should create the empty file.
  784. pass
  785. if self.update_url_passed:
  786. remote_script = self.__generate_query_script(self.update_url, self.port, self.accept_json)
  787. self.__begin_logging(self.UPDATE)
  788. self.__run_benchmark(remote_script, output_file, err)
  789. self.__end_logging()
  790. results = self.__parse_test(self.UPDATE)
  791. self.benchmarker.report_benchmark_results(framework=self, test=self.UPDATE, results=results['results'])
  792. out.write( "Complete\n" )
  793. out.flush()
  794. except AttributeError:
  795. pass
  796. # plaintext
  797. if self.runTests[self.PLAINTEXT]:
  798. try:
  799. out.write("BENCHMARKING Plaintext ... ")
  800. out.flush()
  801. results = None
  802. output_file = self.benchmarker.output_file(self.name, self.PLAINTEXT)
  803. if not os.path.exists(output_file):
  804. with open(output_file, 'w'):
  805. # Simply opening the file in write mode should create the empty file.
  806. pass
  807. if self.plaintext_url_passed:
  808. remote_script = self.__generate_concurrency_script(self.plaintext_url, self.port, self.accept_plaintext, wrk_command="wrk", intervals=[256,1024,4096,16384], pipeline="16")
  809. self.__begin_logging(self.PLAINTEXT)
  810. self.__run_benchmark(remote_script, output_file, err)
  811. self.__end_logging()
  812. results = self.__parse_test(self.PLAINTEXT)
  813. self.benchmarker.report_benchmark_results(framework=self, test=self.PLAINTEXT, results=results['results'])
  814. out.write( "Complete\n" )
  815. out.flush()
  816. except AttributeError:
  817. traceback.print_exc()
  818. pass
  819. ############################################################
  820. # End benchmark
  821. ############################################################
  822. ############################################################
  823. # parse_all
  824. # Method meant to be run for a given timestamp
  825. ############################################################
  826. def parse_all(self):
  827. # JSON
  828. if os.path.exists(self.benchmarker.get_output_file(self.name, self.JSON)):
  829. results = self.__parse_test(self.JSON)
  830. self.benchmarker.report_benchmark_results(framework=self, test=self.JSON, results=results['results'])
  831. # DB
  832. if os.path.exists(self.benchmarker.get_output_file(self.name, self.DB)):
  833. results = self.__parse_test(self.DB)
  834. self.benchmarker.report_benchmark_results(framework=self, test=self.DB, results=results['results'])
  835. # Query
  836. if os.path.exists(self.benchmarker.get_output_file(self.name, self.QUERY)):
  837. results = self.__parse_test(self.QUERY)
  838. self.benchmarker.report_benchmark_results(framework=self, test=self.QUERY, results=results['results'])
  839. # Fortune
  840. if os.path.exists(self.benchmarker.get_output_file(self.name, self.FORTUNE)):
  841. results = self.__parse_test(self.FORTUNE)
  842. self.benchmarker.report_benchmark_results(framework=self, test=self.FORTUNE, results=results['results'])
  843. # Update
  844. if os.path.exists(self.benchmarker.get_output_file(self.name, self.UPDATE)):
  845. results = self.__parse_test(self.UPDATE)
  846. self.benchmarker.report_benchmark_results(framework=self, test=self.UPDATE, results=results['results'])
  847. # Plaintext
  848. if os.path.exists(self.benchmarker.get_output_file(self.name, self.PLAINTEXT)):
  849. results = self.__parse_test(self.PLAINTEXT)
  850. self.benchmarker.report_benchmark_results(framework=self, test=self.PLAINTEXT, results=results['results'])
  851. ############################################################
  852. # End parse_all
  853. ############################################################
  854. ############################################################
  855. # __parse_test(test_type)
  856. ############################################################
  857. def __parse_test(self, test_type):
  858. try:
  859. results = dict()
  860. results['results'] = []
  861. stats = []
  862. if os.path.exists(self.benchmarker.get_output_file(self.name, test_type)):
  863. with open(self.benchmarker.output_file(self.name, test_type)) as raw_data:
  864. is_warmup = True
  865. rawData = None
  866. for line in raw_data:
  867. if "Queries:" in line or "Concurrency:" in line:
  868. is_warmup = False
  869. rawData = None
  870. continue
  871. if "Warmup" in line or "Primer" in line:
  872. is_warmup = True
  873. continue
  874. if not is_warmup:
  875. if rawData == None:
  876. rawData = dict()
  877. results['results'].append(rawData)
  878. #if "Requests/sec:" in line:
  879. # m = re.search("Requests/sec:\s+([0-9]+)", line)
  880. # rawData['reportedResults'] = m.group(1)
  881. # search for weighttp data such as succeeded and failed.
  882. if "Latency" in line:
  883. m = re.findall("([0-9]+\.*[0-9]*[us|ms|s|m|%]+)", line)
  884. if len(m) == 4:
  885. rawData['latencyAvg'] = m[0]
  886. rawData['latencyStdev'] = m[1]
  887. rawData['latencyMax'] = m[2]
  888. # rawData['latencyStdevPercent'] = m[3]
  889. #if "Req/Sec" in line:
  890. # m = re.findall("([0-9]+\.*[0-9]*[k|%]*)", line)
  891. # if len(m) == 4:
  892. # rawData['requestsAvg'] = m[0]
  893. # rawData['requestsStdev'] = m[1]
  894. # rawData['requestsMax'] = m[2]
  895. # rawData['requestsStdevPercent'] = m[3]
  896. #if "requests in" in line:
  897. # m = re.search("requests in ([0-9]+\.*[0-9]*[ms|s|m|h]+)", line)
  898. # if m != None:
  899. # # parse out the raw time, which may be in minutes or seconds
  900. # raw_time = m.group(1)
  901. # if "ms" in raw_time:
  902. # rawData['total_time'] = float(raw_time[:len(raw_time)-2]) / 1000.0
  903. # elif "s" in raw_time:
  904. # rawData['total_time'] = float(raw_time[:len(raw_time)-1])
  905. # elif "m" in raw_time:
  906. # rawData['total_time'] = float(raw_time[:len(raw_time)-1]) * 60.0
  907. # elif "h" in raw_time:
  908. # rawData['total_time'] = float(raw_time[:len(raw_time)-1]) * 3600.0
  909. if "requests in" in line:
  910. m = re.search("([0-9]+) requests in", line)
  911. if m != None:
  912. rawData['totalRequests'] = int(m.group(1))
  913. if "Socket errors" in line:
  914. if "connect" in line:
  915. m = re.search("connect ([0-9]+)", line)
  916. rawData['connect'] = int(m.group(1))
  917. if "read" in line:
  918. m = re.search("read ([0-9]+)", line)
  919. rawData['read'] = int(m.group(1))
  920. if "write" in line:
  921. m = re.search("write ([0-9]+)", line)
  922. rawData['write'] = int(m.group(1))
  923. if "timeout" in line:
  924. m = re.search("timeout ([0-9]+)", line)
  925. rawData['timeout'] = int(m.group(1))
  926. if "Non-2xx" in line:
  927. m = re.search("Non-2xx or 3xx responses: ([0-9]+)", line)
  928. if m != None:
  929. rawData['5xx'] = int(m.group(1))
  930. if "STARTTIME" in line:
  931. m = re.search("[0-9]+", line)
  932. rawData["startTime"] = int(m.group(0))
  933. if "ENDTIME" in line:
  934. m = re.search("[0-9]+", line)
  935. rawData["endTime"] = int(m.group(0))
  936. test_stats = self.__parse_stats(test_type, rawData["startTime"], rawData["endTime"], 1)
  937. # rawData["averageStats"] = self.__calculate_average_stats(test_stats)
  938. stats.append(test_stats)
  939. with open(self.benchmarker.stats_file(self.name, test_type) + ".json", "w") as stats_file:
  940. json.dump(stats, stats_file)
  941. return results
  942. except IOError:
  943. return None
  944. ############################################################
  945. # End benchmark
  946. ############################################################
  947. ##########################################################################################
  948. # Private Methods
  949. ##########################################################################################
  950. ############################################################
  951. # __run_benchmark(script, output_file)
  952. # Runs a single benchmark using the script which is a bash
  953. # template that uses weighttp to run the test. All the results
  954. # outputed to the output_file.
  955. ############################################################
  956. def __run_benchmark(self, script, output_file, err):
  957. with open(output_file, 'w') as raw_file:
  958. p = subprocess.Popen(self.benchmarker.client_ssh_string.split(" "), stdin=subprocess.PIPE, stdout=raw_file, stderr=err)
  959. p.communicate(script)
  960. err.flush()
  961. ############################################################
  962. # End __run_benchmark
  963. ############################################################
  964. ############################################################
  965. # __generate_concurrency_script(url, port)
  966. # Generates the string containing the bash script that will
  967. # be run on the client to benchmark a single test. This
  968. # specifically works for the variable concurrency tests (JSON
  969. # and DB)
  970. ############################################################
  971. def __generate_concurrency_script(self, url, port, accept_header, wrk_command="wrk", intervals=[], pipeline=""):
  972. if len(intervals) == 0:
  973. intervals = self.benchmarker.concurrency_levels
  974. headers = self.__get_request_headers(accept_header)
  975. return self.concurrency_template.format(max_concurrency=self.benchmarker.max_concurrency,
  976. max_threads=self.benchmarker.max_threads, name=self.name, duration=self.benchmarker.duration,
  977. interval=" ".join("{}".format(item) for item in intervals),
  978. server_host=self.benchmarker.server_host, port=port, url=url, headers=headers, wrk=wrk_command,
  979. pipeline=pipeline)
  980. ############################################################
  981. # End __generate_concurrency_script
  982. ############################################################
  983. ############################################################
  984. # __generate_query_script(url, port)
  985. # Generates the string containing the bash script that will
  986. # be run on the client to benchmark a single test. This
  987. # specifically works for the variable query tests (Query)
  988. ############################################################
  989. def __generate_query_script(self, url, port, accept_header):
  990. headers = self.__get_request_headers(accept_header)
  991. return self.query_template.format(max_concurrency=self.benchmarker.max_concurrency,
  992. max_threads=self.benchmarker.max_threads, name=self.name, duration=self.benchmarker.duration,
  993. interval=" ".join("{}".format(item) for item in self.benchmarker.query_intervals),
  994. server_host=self.benchmarker.server_host, port=port, url=url, headers=headers)
  995. ############################################################
  996. # End __generate_query_script
  997. ############################################################
  998. ############################################################
  999. # __get_request_headers(accept_header)
  1000. # Generates the complete HTTP header string
  1001. ############################################################
  1002. def __get_request_headers(self, accept_header):
  1003. return self.headers_template.format(accept=accept_header)
  1004. ############################################################
  1005. # End __format_request_headers
  1006. ############################################################
  1007. ############################################################
  1008. # __curl_url
  1009. # Dump HTTP response and headers. Throw exception if there
  1010. # is an HTTP error.
  1011. ############################################################
  1012. def __curl_url(self, url, testType, out, err):
  1013. output = None
  1014. try:
  1015. # Use -m 15 to make curl stop trying after 15sec.
  1016. # Use -i to output response with headers.
  1017. # Don't use -f so that the HTTP response code is ignored.
  1018. # Use --stderr - to redirect stderr to stdout so we get
  1019. # error output for sure in stdout.
  1020. # Use -sS to hide progress bar, but show errors.
  1021. subprocess.check_call(["curl", "-m", "15", "-i", "-sS", url], stderr=err, stdout=out)
  1022. # HTTP output may not end in a newline, so add that here.
  1023. out.write( "\n\n" )
  1024. out.flush()
  1025. err.flush()
  1026. # We need to get the respond body from the curl and return it.
  1027. p = subprocess.Popen(["curl", "-m", "15", "-s", url], stdout=subprocess.PIPE)
  1028. output = p.communicate()
  1029. except:
  1030. pass
  1031. if output:
  1032. # We have the response body - return it
  1033. return output[0]
  1034. ##############################################################
  1035. # End __curl_url
  1036. ##############################################################
  1037. def requires_database(self):
  1038. '''Returns True/False if this test requires a database'''
  1039. return any(tobj.requires_db for (ttype,tobj) in self.runTests.iteritems())
  1040. ############################################################
  1041. # __begin_logging
  1042. # Starts a thread to monitor the resource usage, to be synced with the client's time
  1043. # TODO: MySQL and InnoDB are possible. Figure out how to implement them.
  1044. ############################################################
  1045. def __begin_logging(self, test_name):
  1046. output_file = "{file_name}".format(file_name=self.benchmarker.get_stats_file(self.name, test_name))
  1047. dstat_string = "dstat -afilmprsT --aio --fs --ipc --lock --raw --socket --tcp \
  1048. --raw --socket --tcp --udp --unix --vm --disk-util \
  1049. --rpc --rpcd --output {output_file}".format(output_file=output_file)
  1050. cmd = shlex.split(dstat_string)
  1051. dev_null = open(os.devnull, "w")
  1052. self.subprocess_handle = subprocess.Popen(cmd, stdout=dev_null)
  1053. ##############################################################
  1054. # End __begin_logging
  1055. ##############################################################
  1056. ##############################################################
  1057. # Begin __end_logging
  1058. # Stops the logger thread and blocks until shutdown is complete.
  1059. ##############################################################
  1060. def __end_logging(self):
  1061. self.subprocess_handle.terminate()
  1062. self.subprocess_handle.communicate()
  1063. ##############################################################
  1064. # End __end_logging
  1065. ##############################################################
  1066. ##############################################################
  1067. # Begin __parse_stats
  1068. # For each test type, process all the statistics, and return a multi-layered dictionary
  1069. # that has a structure as follows:
  1070. # (timestamp)
  1071. # | (main header) - group that the stat is in
  1072. # | | (sub header) - title of the stat
  1073. # | | | (stat) - the stat itself, usually a floating point number
  1074. ##############################################################
  1075. def __parse_stats(self, test_type, start_time, end_time, interval):
  1076. stats_dict = dict()
  1077. stats_file = self.benchmarker.stats_file(self.name, test_type)
  1078. with open(stats_file) as stats:
  1079. while(stats.next() != "\n"): # dstat doesn't output a completely compliant CSV file - we need to strip the header
  1080. pass
  1081. stats_reader = csv.reader(stats)
  1082. main_header = stats_reader.next()
  1083. sub_header = stats_reader.next()
  1084. time_row = sub_header.index("epoch")
  1085. int_counter = 0
  1086. for row in stats_reader:
  1087. time = float(row[time_row])
  1088. int_counter+=1
  1089. if time < start_time:
  1090. continue
  1091. elif time > end_time:
  1092. return stats_dict
  1093. if int_counter % interval != 0:
  1094. continue
  1095. row_dict = dict()
  1096. for nextheader in main_header:
  1097. if nextheader != "":
  1098. row_dict[nextheader] = dict()
  1099. header = ""
  1100. for item_num, column in enumerate(row):
  1101. if(len(main_header[item_num]) != 0):
  1102. header = main_header[item_num]
  1103. row_dict[header][sub_header[item_num]] = float(column) # all the stats are numbers, so we want to make sure that they stay that way in json
  1104. stats_dict[time] = row_dict
  1105. return stats_dict
  1106. ##############################################################
  1107. # End __parse_stats
  1108. ##############################################################
  1109. def __getattr__(self, name):
  1110. """For backwards compatibility, we used to pass benchmarker
  1111. as the argument to the setup.py files"""
  1112. try:
  1113. x = getattr(self.benchmarker, name)
  1114. except AttributeError:
  1115. print "AttributeError: %s not a member of FrameworkTest or Benchmarker" % name
  1116. print "This is probably a bug"
  1117. raise
  1118. return x
  1119. ##############################################################
  1120. # Begin __calculate_average_stats
  1121. # We have a large amount of raw data for the statistics that
  1122. # may be useful for the stats nerds, but most people care about
  1123. # a couple of numbers. For now, we're only going to supply:
  1124. # * Average CPU
  1125. # * Average Memory
  1126. # * Total network use
  1127. # * Total disk use
  1128. # More may be added in the future. If they are, please update
  1129. # the above list.
  1130. # Note: raw_stats is directly from the __parse_stats method.
  1131. # Recall that this consists of a dictionary of timestamps,
  1132. # each of which contain a dictionary of stat categories which
  1133. # contain a dictionary of stats
  1134. ##############################################################
  1135. def __calculate_average_stats(self, raw_stats):
  1136. raw_stat_collection = dict()
  1137. for timestamp, time_dict in raw_stats.items():
  1138. for main_header, sub_headers in time_dict.items():
  1139. item_to_append = None
  1140. if 'cpu' in main_header:
  1141. # We want to take the idl stat and subtract it from 100
  1142. # to get the time that the CPU is NOT idle.
  1143. item_to_append = sub_headers['idl'] - 100.0
  1144. elif main_header == 'memory usage':
  1145. item_to_append = sub_headers['used']
  1146. elif 'net' in main_header:
  1147. # Network stats have two parts - recieve and send. We'll use a tuple of
  1148. # style (recieve, send)
  1149. item_to_append = (sub_headers['recv'], sub_headers['send'])
  1150. elif 'dsk' or 'io' in main_header:
  1151. # Similar for network, except our tuple looks like (read, write)
  1152. item_to_append = (sub_headers['read'], sub_headers['writ'])
  1153. if item_to_append is not None:
  1154. if main_header not in raw_stat_collection:
  1155. raw_stat_collection[main_header] = list()
  1156. raw_stat_collection[main_header].append(item_to_append)
  1157. # Simple function to determine human readable size
  1158. # http://stackoverflow.com/questions/1094841/reusable-library-to-get-human-readable-version-of-file-size
  1159. def sizeof_fmt(num):
  1160. # We'll assume that any number we get is convertable to a float, just in case
  1161. num = float(num)
  1162. for x in ['bytes','KB','MB','GB']:
  1163. if num < 1024.0 and num > -1024.0:
  1164. return "%3.1f%s" % (num, x)
  1165. num /= 1024.0
  1166. return "%3.1f%s" % (num, 'TB')
  1167. # Now we have our raw stats in a readable format - we need to format it for display
  1168. # We need a floating point sum, so the built in sum doesn't cut it
  1169. display_stat_collection = dict()
  1170. for header, values in raw_stat_collection.items():
  1171. display_stat = None
  1172. if 'cpu' in header:
  1173. display_stat = sizeof_fmt(math.fsum(values) / len(values))
  1174. elif main_header == 'memory usage':
  1175. display_stat = sizeof_fmt(math.fsum(values) / len(values))
  1176. elif 'net' in main_header:
  1177. receive, send = zip(*values) # unzip
  1178. display_stat = {'receive': sizeof_fmt(math.fsum(receive)), 'send': sizeof_fmt(math.fsum(send))}
  1179. else: # if 'dsk' or 'io' in header:
  1180. read, write = zip(*values) # unzip
  1181. display_stat = {'read': sizeof_fmt(math.fsum(read)), 'write': sizeof_fmt(math.fsum(write))}
  1182. display_stat_collection[header] = display_stat
  1183. return display_stat
  1184. ###########################################################################################
  1185. # End __calculate_average_stats
  1186. #########################################################################################
  1187. ##########################################################################################
  1188. # Constructor
  1189. ##########################################################################################
  1190. def __init__(self, name, directory, benchmarker, runTests, args):
  1191. self.name = name
  1192. self.directory = directory
  1193. self.benchmarker = benchmarker
  1194. self.runTests = runTests
  1195. self.fwroot = benchmarker.fwroot
  1196. # setup logging
  1197. logging.basicConfig(stream=sys.stderr, level=logging.INFO)
  1198. self.install_root="%s/%s" % (self.fwroot, "installs")
  1199. if benchmarker.install_strategy is 'pertest':
  1200. self.install_root="%s/pertest/%s" % (self.install_root, name)
  1201. # Used in setup.py scripts for consistency with
  1202. # the bash environment variables
  1203. self.troot = self.directory
  1204. self.iroot = self.install_root
  1205. self.__dict__.update(args)
  1206. # ensure directory has __init__.py file so that we can use it as a Python package
  1207. if not os.path.exists(os.path.join(directory, "__init__.py")):
  1208. logging.warning("Please add an empty __init__.py file to directory %s", directory)
  1209. open(os.path.join(directory, "__init__.py"), 'w').close()
  1210. # Import the module (TODO - consider using sys.meta_path)
  1211. # Note: You can see the log output if you really want to, but it's a *ton*
  1212. dir_rel_to_fwroot = os.path.relpath(os.path.dirname(directory), self.fwroot)
  1213. if dir_rel_to_fwroot != ".":
  1214. sys.path.append("%s/%s" % (self.fwroot, dir_rel_to_fwroot))
  1215. logging.log(0, "Adding %s to import %s.%s", dir_rel_to_fwroot, os.path.basename(directory), self.setup_file)
  1216. self.setup_module = setup_module = importlib.import_module(os.path.basename(directory) + '.' + self.setup_file)
  1217. sys.path.remove("%s/%s" % (self.fwroot, dir_rel_to_fwroot))
  1218. else:
  1219. logging.log(0, "Importing %s.%s", directory, self.setup_file)
  1220. self.setup_module = setup_module = importlib.import_module(os.path.basename(directory) + '.' + self.setup_file)
  1221. ############################################################
  1222. # End __init__
  1223. ############################################################
  1224. ############################################################
  1225. # End FrameworkTest
  1226. ############################################################
  1227. ##########################################################################################
  1228. # Static methods
  1229. ##########################################################################################
  1230. ##############################################################
  1231. # parse_config(config, directory, benchmarker)
  1232. # parses a config file and returns a list of FrameworkTest
  1233. # objects based on that config file.
  1234. ##############################################################
  1235. def parse_config(config, directory, benchmarker):
  1236. tests = []
  1237. # The config object can specify multiple tests
  1238. # Loop over them and parse each into a FrameworkTest
  1239. for test in config['tests']:
  1240. for test_name, test_keys in test.iteritems():
  1241. # Prefix all test names with framework except 'default' test
  1242. if test_name == 'default':
  1243. test_name = config['framework']
  1244. else:
  1245. test_name = "%s-%s" % (config['framework'], test_name)
  1246. # Ensure FrameworkTest.framework is available
  1247. if not test_keys['framework']:
  1248. test_keys['framework'] = config['framework']
  1249. #if test_keys['framework'].lower() != config['framework'].lower():
  1250. # print Exception("benchmark_config for test %s is invalid - test framework '%s' must match benchmark_config framework '%s'" %
  1251. # (test_name, test_keys['framework'], config['framework']))
  1252. # Confirm required keys are present
  1253. # TODO have a TechEmpower person confirm this list - I don't know what the website requires....
  1254. required = ['language','webserver','classification','database','approach','orm','framework','os','database_os']
  1255. if not all (key in test_keys for key in required):
  1256. raise Exception("benchmark_config for test %s is invalid - missing required keys" % test_name)
  1257. # Map test type to a parsed FrameworkTestType object
  1258. runTests = dict()
  1259. for type_name, type_obj in benchmarker.types.iteritems():
  1260. try:
  1261. runTests[type_name] = type_obj.copy().parse(test_keys)
  1262. except AttributeError as ae:
  1263. # This is quite common - most tests don't support all types
  1264. # Quitely log it and move on
  1265. logging.debug("Missing arguments for test type %s for framework test %s", type_name, test_name)
  1266. pass
  1267. # By passing the entire set of keys, each FrameworkTest will have a member for each key
  1268. tests.append(FrameworkTest(test_name, directory, benchmarker, runTests, test_keys))
  1269. return tests
  1270. ##############################################################
  1271. # End parse_config
  1272. ##############################################################