framework_test.py 35 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814
  1. from benchmark.fortune_html_parser import FortuneHTMLParser
  2. from setup.linux import setup_util
  3. from benchmark.test_types import *
  4. import importlib
  5. import os
  6. import subprocess
  7. import time
  8. import re
  9. from pprint import pprint
  10. import sys
  11. import traceback
  12. import json
  13. import logging
  14. import csv
  15. import shlex
  16. import math
  17. from collections import OrderedDict
  18. from threading import Thread
  19. from threading import Event
  20. from utils import header
  21. class FrameworkTest:
  22. headers_template = "-H 'Host: localhost' -H '{accept}' -H 'Connection: keep-alive'"
  23. # Used for test types that require no pipelining or query string params.
  24. concurrency_template = """
  25. echo ""
  26. echo "---------------------------------------------------------"
  27. echo " Running Primer {name}"
  28. echo " {wrk} {headers} -d 5 -c 8 --timeout 8 -t 8 \"http://{server_host}:{port}{url}\""
  29. echo "---------------------------------------------------------"
  30. echo ""
  31. {wrk} {headers} -d 5 -c 8 --timeout 8 -t 8 "http://{server_host}:{port}{url}"
  32. sleep 5
  33. echo ""
  34. echo "---------------------------------------------------------"
  35. echo " Running Warmup {name}"
  36. echo " {wrk} {headers} -d {duration} -c {max_concurrency} --timeout {max_concurrency} -t {max_threads} \"http://{server_host}:{port}{url}\""
  37. echo "---------------------------------------------------------"
  38. echo ""
  39. {wrk} {headers} -d {duration} -c {max_concurrency} --timeout {max_concurrency} -t {max_threads} "http://{server_host}:{port}{url}"
  40. sleep 5
  41. echo ""
  42. echo "---------------------------------------------------------"
  43. echo " Synchronizing time"
  44. echo "---------------------------------------------------------"
  45. echo ""
  46. ntpdate -s pool.ntp.org
  47. for c in {levels}
  48. do
  49. echo ""
  50. echo "---------------------------------------------------------"
  51. echo " Concurrency: $c for {name}"
  52. echo " {wrk} {headers} -d {duration} -c $c --timeout $c -t $(($c>{max_threads}?{max_threads}:$c)) \"http://{server_host}:{port}{url}\""
  53. echo "---------------------------------------------------------"
  54. echo ""
  55. STARTTIME=$(date +"%s")
  56. {wrk} {headers} -d {duration} -c $c --timeout $c -t "$(($c>{max_threads}?{max_threads}:$c))" http://{server_host}:{port}{url}
  57. echo "STARTTIME $STARTTIME"
  58. echo "ENDTIME $(date +"%s")"
  59. sleep 2
  60. done
  61. """
  62. # Used for test types that require pipelining.
  63. pipeline_template = """
  64. echo ""
  65. echo "---------------------------------------------------------"
  66. echo " Running Primer {name}"
  67. echo " {wrk} {headers} -d 5 -c 8 --timeout 8 -t 8 \"http://{server_host}:{port}{url}\""
  68. echo "---------------------------------------------------------"
  69. echo ""
  70. {wrk} {headers} -d 5 -c 8 --timeout 8 -t 8 "http://{server_host}:{port}{url}"
  71. sleep 5
  72. echo ""
  73. echo "---------------------------------------------------------"
  74. echo " Running Warmup {name}"
  75. echo " {wrk} {headers} -d {duration} -c {max_concurrency} --timeout {max_concurrency} -t {max_threads} \"http://{server_host}:{port}{url}\""
  76. echo "---------------------------------------------------------"
  77. echo ""
  78. {wrk} {headers} -d {duration} -c {max_concurrency} --timeout {max_concurrency} -t {max_threads} "http://{server_host}:{port}{url}"
  79. sleep 5
  80. echo ""
  81. echo "---------------------------------------------------------"
  82. echo " Synchronizing time"
  83. echo "---------------------------------------------------------"
  84. echo ""
  85. ntpdate -s pool.ntp.org
  86. for c in {levels}
  87. do
  88. echo ""
  89. echo "---------------------------------------------------------"
  90. echo " Concurrency: $c for {name}"
  91. echo " {wrk} {headers} -d {duration} -c $c --timeout $c -t $(($c>{max_threads}?{max_threads}:$c)) \"http://{server_host}:{port}{url}\" -s ~/pipeline.lua -- {pipeline}"
  92. echo "---------------------------------------------------------"
  93. echo ""
  94. STARTTIME=$(date +"%s")
  95. {wrk} {headers} -d {duration} -c $c --timeout $c -t "$(($c>{max_threads}?{max_threads}:$c))" http://{server_host}:{port}{url} -s ~/pipeline.lua -- {pipeline}
  96. echo "STARTTIME $STARTTIME"
  97. echo "ENDTIME $(date +"%s")"
  98. sleep 2
  99. done
  100. """
  101. # Used for test types that require a database -
  102. # These tests run at a static concurrency level and vary the size of
  103. # the query sent with each request
  104. query_template = """
  105. echo ""
  106. echo "---------------------------------------------------------"
  107. echo " Running Primer {name}"
  108. echo " wrk {headers} -d 5 -c 8 --timeout 8 -t 8 \"http://{server_host}:{port}{url}2\""
  109. echo "---------------------------------------------------------"
  110. echo ""
  111. wrk {headers} -d 5 -c 8 --timeout 8 -t 8 "http://{server_host}:{port}{url}2"
  112. sleep 5
  113. echo ""
  114. echo "---------------------------------------------------------"
  115. echo " Running Warmup {name}"
  116. echo " wrk {headers} -d {duration} -c {max_concurrency} --timeout {max_concurrency} -t {max_threads} \"http://{server_host}:{port}{url}2\""
  117. echo "---------------------------------------------------------"
  118. echo ""
  119. wrk {headers} -d {duration} -c {max_concurrency} --timeout {max_concurrency} -t {max_threads} "http://{server_host}:{port}{url}2"
  120. sleep 5
  121. echo ""
  122. echo "---------------------------------------------------------"
  123. echo " Synchronizing time"
  124. echo "---------------------------------------------------------"
  125. echo ""
  126. ntpdate -s pool.ntp.org
  127. for c in {levels}
  128. do
  129. echo ""
  130. echo "---------------------------------------------------------"
  131. echo " Queries: $c for {name}"
  132. echo " wrk {headers} -d {duration} -c {max_concurrency} --timeout {max_concurrency} -t {max_threads} \"http://{server_host}:{port}{url}$c\""
  133. echo "---------------------------------------------------------"
  134. echo ""
  135. STARTTIME=$(date +"%s")
  136. wrk {headers} -d {duration} -c {max_concurrency} --timeout {max_concurrency} -t {max_threads} "http://{server_host}:{port}{url}$c"
  137. echo "STARTTIME $STARTTIME"
  138. echo "ENDTIME $(date +"%s")"
  139. sleep 2
  140. done
  141. """
  142. ############################################################
  143. # start(benchmarker)
  144. # Start the test using it's setup file
  145. ############################################################
  146. def start(self, out, err):
  147. # Load profile for this installation
  148. profile="%s/bash_profile.sh" % self.directory
  149. if not os.path.exists(profile):
  150. logging.warning("Directory %s does not have a bash_profile.sh" % self.directory)
  151. profile="$FWROOT/config/benchmark_profile"
  152. # Setup variables for TROOT and IROOT
  153. setup_util.replace_environ(config=profile,
  154. command='export TROOT=%s && export IROOT=%s && export DBHOST=%s && export MAX_THREADS=%s' %
  155. (self.directory, self.install_root, self.database_host, self.benchmarker.threads))
  156. # Because start can take so long, we print a dot to let the user know
  157. # we are working
  158. class ProgressPrinterThread(Thread):
  159. def __init__(self, event):
  160. Thread.__init__(self)
  161. self.stopped = event
  162. def run(self):
  163. while not self.stopped.wait(20):
  164. sys.stderr.write("Waiting for start to return...\n")
  165. stopFlag = Event()
  166. thread = ProgressPrinterThread(stopFlag)
  167. thread.start()
  168. # Run the module start (inside parent of TROOT)
  169. # - we use the parent as a historical accident - a lot of tests
  170. # use subprocess's cwd argument already
  171. previousDir = os.getcwd()
  172. os.chdir(os.path.dirname(self.troot))
  173. logging.info("Running setup module start (cwd=%s)", os.path.dirname(self.directory))
  174. # Run the start script for the test as the "testrunner" user.
  175. # This requires superuser privs, so `sudo` is necessary.
  176. # -u [username] The username
  177. # -E Preserves the current environment variables
  178. # Note: check_call is a blocking call, so any startup scripts
  179. # run by the framework that need to continue (read: server has
  180. # started and needs to remain that way), then they should be
  181. # executed in the background.
  182. try:
  183. retcode = subprocess.check_call('sudo -u %s -E ./%s.sh' %
  184. (self.benchmarker.runner_user, self.setup_file),
  185. cwd=self.directory, shell=True, stderr=err, stdout=out)
  186. if retcode == None:
  187. retcode = 0
  188. except Exception:
  189. retcode = 1
  190. st = traceback.format_exc()
  191. st = '\n'.join((4 * ' ') + x for x in st.splitlines())
  192. st = "Start exception:\n%s" % st
  193. logging.info(st)
  194. err.write(st + '\n')
  195. os.chdir(previousDir)
  196. # Stop the progress printer
  197. stopFlag.set()
  198. logging.info("Called setup.py start")
  199. return retcode
  200. ############################################################
  201. # End start
  202. ############################################################
  203. ############################################################
  204. # stop(benchmarker)
  205. # Stops the test using it's setup file
  206. ############################################################
  207. def stop(self, out, err):
  208. # Load profile for this installation
  209. profile="%s/bash_profile.sh" % self.directory
  210. if not os.path.exists(profile):
  211. logging.warning("Directory %s does not have a bash_profile.sh" % self.directory)
  212. profile="$FWROOT/config/benchmark_profile"
  213. setup_util.replace_environ(config=profile,
  214. command='export TROOT=%s && export IROOT=%s' %
  215. (self.directory, self.install_root))
  216. # Run the module stop (inside parent of TROOT)
  217. # - we use the parent as a historical accident - a lot of tests
  218. # use subprocess's cwd argument already
  219. previousDir = os.getcwd()
  220. os.chdir(os.path.dirname(self.troot))
  221. logging.info("Running setup module stop (cwd=%s)", os.path.dirname(self.directory))
  222. # Meganuke
  223. try:
  224. subprocess.check_call('sudo killall -s 9 -u %s' % self.benchmarker.runner_user, shell=True, stderr=err, stdout=out)
  225. retcode = 0
  226. except Exception:
  227. retcode = 1
  228. os.chdir(previousDir)
  229. # Give processes sent a SIGTERM a moment to shut down gracefully
  230. time.sleep(5)
  231. return retcode
  232. ############################################################
  233. # End stop
  234. ############################################################
  235. ############################################################
  236. # verify_urls
  237. # Verifys each of the URLs for this test. THis will sinply
  238. # curl the URL and check for it's return status.
  239. # For each url, a flag will be set on this object for whether
  240. # or not it passed
  241. # Returns True if all verifications succeeded
  242. ############################################################
  243. def verify_urls(self, out, err):
  244. result = True
  245. def verify_type(test_type):
  246. test = self.runTests[test_type]
  247. test.setup_out_err(out, err)
  248. out.write(header("VERIFYING %s" % test_type.upper()))
  249. base_url = "http://%s:%s" % (self.benchmarker.server_host, self.port)
  250. try:
  251. results = test.verify(base_url)
  252. except Exception as e:
  253. results = [('fail',"""Caused Exception in TFB
  254. This almost certainly means your return value is incorrect,
  255. but also that you have found a bug. Please submit an issue
  256. including this message: %s\n%s""" % (e, traceback.format_exc()),
  257. base_url)]
  258. logging.warning("Verifying test %s for %s caused an exception: %s", test_type, self.name, e)
  259. traceback.format_exc()
  260. test.failed = any(result is 'fail' for (result, reason, url) in results)
  261. test.warned = any(result is 'warn' for (result, reason, url) in results)
  262. test.passed = all(result is 'pass' for (result, reason, url) in results)
  263. def output_result(result, reason, url):
  264. out.write(" %s for %s\n" % (result.upper(), url))
  265. print " %s for %s" % (result.upper(), url)
  266. if reason is not None and len(reason) != 0:
  267. for line in reason.splitlines():
  268. out.write(" " + line + '\n')
  269. print " " + line
  270. [output_result(r1,r2,url) for (r1, r2, url) in results]
  271. if test.failed:
  272. self.benchmarker.report_verify_results(self, test_type, 'fail')
  273. elif test.warned:
  274. self.benchmarker.report_verify_results(self, test_type, 'warn')
  275. elif test.passed:
  276. self.benchmarker.report_verify_results(self, test_type, 'pass')
  277. else:
  278. raise Exception("Unknown error - test did not pass,warn,or fail")
  279. result = True
  280. for test_type in self.runTests:
  281. verify_type(test_type)
  282. if self.runTests[test_type].failed:
  283. result = False
  284. return result
  285. ############################################################
  286. # End verify_urls
  287. ############################################################
  288. ############################################################
  289. # benchmark
  290. # Runs the benchmark for each type of test that it implements
  291. # JSON/DB/Query.
  292. ############################################################
  293. def benchmark(self, out, err):
  294. def benchmark_type(test_type):
  295. out.write("BENCHMARKING %s ... " % test_type.upper())
  296. test = self.runTests[test_type]
  297. test.setup_out_err(out, err)
  298. output_file = self.benchmarker.output_file(self.name, test_type)
  299. if not os.path.exists(output_file):
  300. # Open to create the empty file
  301. with open(output_file, 'w'):
  302. pass
  303. if not test.failed:
  304. if test_type == 'plaintext': # One special case
  305. remote_script = self.__generate_pipeline_script(test.get_url(), self.port, test.accept_header)
  306. elif test_type == 'query' or test_type == 'update':
  307. remote_script = self.__generate_query_script(test.get_url(), self.port, test.accept_header)
  308. else:
  309. remote_script = self.__generate_concurrency_script(test.get_url(), self.port, test.accept_header)
  310. # Begin resource usage metrics collection
  311. self.__begin_logging(test_type)
  312. # Run the benchmark
  313. with open(output_file, 'w') as raw_file:
  314. p = subprocess.Popen(self.benchmarker.client_ssh_string.split(" "), stdin=subprocess.PIPE, stdout=raw_file, stderr=err)
  315. p.communicate(remote_script)
  316. err.flush()
  317. # End resource usage metrics collection
  318. self.__end_logging()
  319. results = self.__parse_test(test_type)
  320. print "Benchmark results:"
  321. pprint(results)
  322. self.benchmarker.report_benchmark_results(framework=self, test=test_type, results=results['results'])
  323. out.write( "Complete\n" )
  324. out.flush()
  325. for test_type in self.runTests:
  326. benchmark_type(test_type)
  327. ############################################################
  328. # End benchmark
  329. ############################################################
  330. ############################################################
  331. # parse_all
  332. # Method meant to be run for a given timestamp
  333. ############################################################
  334. def parse_all(self):
  335. for test_type in self.runTests:
  336. if os.path.exists(self.benchmarker.get_output_file(self.name, test_type)):
  337. results = self.__parse_test(test_type)
  338. self.benchmarker.report_benchmark_results(framework=self, test=test_type, results=results['results'])
  339. ############################################################
  340. # __parse_test(test_type)
  341. ############################################################
  342. def __parse_test(self, test_type):
  343. try:
  344. results = dict()
  345. results['results'] = []
  346. stats = []
  347. if os.path.exists(self.benchmarker.get_output_file(self.name, test_type)):
  348. with open(self.benchmarker.output_file(self.name, test_type)) as raw_data:
  349. is_warmup = True
  350. rawData = None
  351. for line in raw_data:
  352. if "Queries:" in line or "Concurrency:" in line:
  353. is_warmup = False
  354. rawData = None
  355. continue
  356. if "Warmup" in line or "Primer" in line:
  357. is_warmup = True
  358. continue
  359. if not is_warmup:
  360. if rawData == None:
  361. rawData = dict()
  362. results['results'].append(rawData)
  363. #if "Requests/sec:" in line:
  364. # m = re.search("Requests/sec:\s+([0-9]+)", line)
  365. # rawData['reportedResults'] = m.group(1)
  366. # search for weighttp data such as succeeded and failed.
  367. if "Latency" in line:
  368. m = re.findall("([0-9]+\.*[0-9]*[us|ms|s|m|%]+)", line)
  369. if len(m) == 4:
  370. rawData['latencyAvg'] = m[0]
  371. rawData['latencyStdev'] = m[1]
  372. rawData['latencyMax'] = m[2]
  373. # rawData['latencyStdevPercent'] = m[3]
  374. #if "Req/Sec" in line:
  375. # m = re.findall("([0-9]+\.*[0-9]*[k|%]*)", line)
  376. # if len(m) == 4:
  377. # rawData['requestsAvg'] = m[0]
  378. # rawData['requestsStdev'] = m[1]
  379. # rawData['requestsMax'] = m[2]
  380. # rawData['requestsStdevPercent'] = m[3]
  381. #if "requests in" in line:
  382. # m = re.search("requests in ([0-9]+\.*[0-9]*[ms|s|m|h]+)", line)
  383. # if m != None:
  384. # # parse out the raw time, which may be in minutes or seconds
  385. # raw_time = m.group(1)
  386. # if "ms" in raw_time:
  387. # rawData['total_time'] = float(raw_time[:len(raw_time)-2]) / 1000.0
  388. # elif "s" in raw_time:
  389. # rawData['total_time'] = float(raw_time[:len(raw_time)-1])
  390. # elif "m" in raw_time:
  391. # rawData['total_time'] = float(raw_time[:len(raw_time)-1]) * 60.0
  392. # elif "h" in raw_time:
  393. # rawData['total_time'] = float(raw_time[:len(raw_time)-1]) * 3600.0
  394. if "requests in" in line:
  395. m = re.search("([0-9]+) requests in", line)
  396. if m != None:
  397. rawData['totalRequests'] = int(m.group(1))
  398. if "Socket errors" in line:
  399. if "connect" in line:
  400. m = re.search("connect ([0-9]+)", line)
  401. rawData['connect'] = int(m.group(1))
  402. if "read" in line:
  403. m = re.search("read ([0-9]+)", line)
  404. rawData['read'] = int(m.group(1))
  405. if "write" in line:
  406. m = re.search("write ([0-9]+)", line)
  407. rawData['write'] = int(m.group(1))
  408. if "timeout" in line:
  409. m = re.search("timeout ([0-9]+)", line)
  410. rawData['timeout'] = int(m.group(1))
  411. if "Non-2xx" in line:
  412. m = re.search("Non-2xx or 3xx responses: ([0-9]+)", line)
  413. if m != None:
  414. rawData['5xx'] = int(m.group(1))
  415. if "STARTTIME" in line:
  416. m = re.search("[0-9]+", line)
  417. rawData["startTime"] = int(m.group(0))
  418. if "ENDTIME" in line:
  419. m = re.search("[0-9]+", line)
  420. rawData["endTime"] = int(m.group(0))
  421. test_stats = self.__parse_stats(test_type, rawData["startTime"], rawData["endTime"], 1)
  422. # rawData["averageStats"] = self.__calculate_average_stats(test_stats)
  423. stats.append(test_stats)
  424. with open(self.benchmarker.stats_file(self.name, test_type) + ".json", "w") as stats_file:
  425. json.dump(stats, stats_file, indent=2)
  426. return results
  427. except IOError:
  428. return None
  429. ############################################################
  430. # End benchmark
  431. ############################################################
  432. ##########################################################################################
  433. # Private Methods
  434. ##########################################################################################
  435. ############################################################
  436. # __generate_concurrency_script(url, port)
  437. # Generates the string containing the bash script that will
  438. # be run on the client to benchmark a single test. This
  439. # specifically works for the variable concurrency tests (JSON
  440. # and DB)
  441. ############################################################
  442. def __generate_concurrency_script(self, url, port, accept_header, wrk_command="wrk"):
  443. headers = self.headers_template.format(accept=accept_header)
  444. return self.concurrency_template.format(max_concurrency=max(self.benchmarker.concurrency_levels),
  445. max_threads=self.benchmarker.threads, name=self.name, duration=self.benchmarker.duration,
  446. levels=" ".join("{}".format(item) for item in self.benchmarker.concurrency_levels),
  447. server_host=self.benchmarker.server_host, port=port, url=url, headers=headers, wrk=wrk_command)
  448. ############################################################
  449. # __generate_pipeline_script(url, port)
  450. # Generates the string containing the bash script that will
  451. # be run on the client to benchmark a single pipeline test.
  452. ############################################################
  453. def __generate_pipeline_script(self, url, port, accept_header, wrk_command="wrk"):
  454. headers = self.headers_template.format(accept=accept_header)
  455. return self.pipeline_template.format(max_concurrency=16384,
  456. max_threads=self.benchmarker.threads, name=self.name, duration=self.benchmarker.duration,
  457. levels=" ".join("{}".format(item) for item in [256,1024,4096,16384]),
  458. server_host=self.benchmarker.server_host, port=port, url=url, headers=headers, wrk=wrk_command,
  459. pipeline=16)
  460. ############################################################
  461. # __generate_query_script(url, port)
  462. # Generates the string containing the bash script that will
  463. # be run on the client to benchmark a single test. This
  464. # specifically works for the variable query tests (Query)
  465. ############################################################
  466. def __generate_query_script(self, url, port, accept_header):
  467. headers = self.headers_template.format(accept=accept_header)
  468. return self.query_template.format(max_concurrency=max(self.benchmarker.concurrency_levels),
  469. max_threads=self.benchmarker.threads, name=self.name, duration=self.benchmarker.duration,
  470. levels=" ".join("{}".format(item) for item in self.benchmarker.query_levels),
  471. server_host=self.benchmarker.server_host, port=port, url=url, headers=headers)
  472. ############################################################
  473. # Returns True if any test type this this framework test will use a DB
  474. ############################################################
  475. def requires_database(self):
  476. '''Returns True/False if this test requires a database'''
  477. return any(tobj.requires_db for (ttype,tobj) in self.runTests.iteritems())
  478. ############################################################
  479. # __begin_logging
  480. # Starts a thread to monitor the resource usage, to be synced with the client's time
  481. # TODO: MySQL and InnoDB are possible. Figure out how to implement them.
  482. ############################################################
  483. def __begin_logging(self, test_type):
  484. output_file = "{file_name}".format(file_name=self.benchmarker.get_stats_file(self.name, test_type))
  485. dstat_string = "dstat -afilmprsT --aio --fs --ipc --lock --raw --socket --tcp \
  486. --raw --socket --tcp --udp --unix --vm --disk-util \
  487. --rpc --rpcd --output {output_file}".format(output_file=output_file)
  488. cmd = shlex.split(dstat_string)
  489. dev_null = open(os.devnull, "w")
  490. self.subprocess_handle = subprocess.Popen(cmd, stdout=dev_null)
  491. ##############################################################
  492. # Begin __end_logging
  493. # Stops the logger thread and blocks until shutdown is complete.
  494. ##############################################################
  495. def __end_logging(self):
  496. self.subprocess_handle.terminate()
  497. self.subprocess_handle.communicate()
  498. ##############################################################
  499. # Begin __parse_stats
  500. # For each test type, process all the statistics, and return a multi-layered dictionary
  501. # that has a structure as follows:
  502. # (timestamp)
  503. # | (main header) - group that the stat is in
  504. # | | (sub header) - title of the stat
  505. # | | | (stat) - the stat itself, usually a floating point number
  506. ##############################################################
  507. def __parse_stats(self, test_type, start_time, end_time, interval):
  508. stats_dict = dict()
  509. stats_file = self.benchmarker.stats_file(self.name, test_type)
  510. with open(stats_file) as stats:
  511. while(stats.next() != "\n"): # dstat doesn't output a completely compliant CSV file - we need to strip the header
  512. pass
  513. stats_reader = csv.reader(stats)
  514. main_header = stats_reader.next()
  515. sub_header = stats_reader.next()
  516. time_row = sub_header.index("epoch")
  517. int_counter = 0
  518. for row in stats_reader:
  519. time = float(row[time_row])
  520. int_counter+=1
  521. if time < start_time:
  522. continue
  523. elif time > end_time:
  524. return stats_dict
  525. if int_counter % interval != 0:
  526. continue
  527. row_dict = dict()
  528. for nextheader in main_header:
  529. if nextheader != "":
  530. row_dict[nextheader] = dict()
  531. header = ""
  532. for item_num, column in enumerate(row):
  533. if(len(main_header[item_num]) != 0):
  534. header = main_header[item_num]
  535. row_dict[header][sub_header[item_num]] = float(column) # all the stats are numbers, so we want to make sure that they stay that way in json
  536. stats_dict[time] = row_dict
  537. return stats_dict
  538. ##############################################################
  539. # End __parse_stats
  540. ##############################################################
  541. def __getattr__(self, name):
  542. """For backwards compatibility, we used to pass benchmarker
  543. as the argument to the setup.py files"""
  544. try:
  545. x = getattr(self.benchmarker, name)
  546. except AttributeError:
  547. print "AttributeError: %s not a member of FrameworkTest or Benchmarker" % name
  548. print "This is probably a bug"
  549. raise
  550. return x
  551. ##############################################################
  552. # Begin __calculate_average_stats
  553. # We have a large amount of raw data for the statistics that
  554. # may be useful for the stats nerds, but most people care about
  555. # a couple of numbers. For now, we're only going to supply:
  556. # * Average CPU
  557. # * Average Memory
  558. # * Total network use
  559. # * Total disk use
  560. # More may be added in the future. If they are, please update
  561. # the above list.
  562. # Note: raw_stats is directly from the __parse_stats method.
  563. # Recall that this consists of a dictionary of timestamps,
  564. # each of which contain a dictionary of stat categories which
  565. # contain a dictionary of stats
  566. ##############################################################
  567. def __calculate_average_stats(self, raw_stats):
  568. raw_stat_collection = dict()
  569. for timestamp, time_dict in raw_stats.items():
  570. for main_header, sub_headers in time_dict.items():
  571. item_to_append = None
  572. if 'cpu' in main_header:
  573. # We want to take the idl stat and subtract it from 100
  574. # to get the time that the CPU is NOT idle.
  575. item_to_append = sub_headers['idl'] - 100.0
  576. elif main_header == 'memory usage':
  577. item_to_append = sub_headers['used']
  578. elif 'net' in main_header:
  579. # Network stats have two parts - recieve and send. We'll use a tuple of
  580. # style (recieve, send)
  581. item_to_append = (sub_headers['recv'], sub_headers['send'])
  582. elif 'dsk' or 'io' in main_header:
  583. # Similar for network, except our tuple looks like (read, write)
  584. item_to_append = (sub_headers['read'], sub_headers['writ'])
  585. if item_to_append is not None:
  586. if main_header not in raw_stat_collection:
  587. raw_stat_collection[main_header] = list()
  588. raw_stat_collection[main_header].append(item_to_append)
  589. # Simple function to determine human readable size
  590. # http://stackoverflow.com/questions/1094841/reusable-library-to-get-human-readable-version-of-file-size
  591. def sizeof_fmt(num):
  592. # We'll assume that any number we get is convertable to a float, just in case
  593. num = float(num)
  594. for x in ['bytes','KB','MB','GB']:
  595. if num < 1024.0 and num > -1024.0:
  596. return "%3.1f%s" % (num, x)
  597. num /= 1024.0
  598. return "%3.1f%s" % (num, 'TB')
  599. # Now we have our raw stats in a readable format - we need to format it for display
  600. # We need a floating point sum, so the built in sum doesn't cut it
  601. display_stat_collection = dict()
  602. for header, values in raw_stat_collection.items():
  603. display_stat = None
  604. if 'cpu' in header:
  605. display_stat = sizeof_fmt(math.fsum(values) / len(values))
  606. elif main_header == 'memory usage':
  607. display_stat = sizeof_fmt(math.fsum(values) / len(values))
  608. elif 'net' in main_header:
  609. receive, send = zip(*values) # unzip
  610. display_stat = {'receive': sizeof_fmt(math.fsum(receive)), 'send': sizeof_fmt(math.fsum(send))}
  611. else: # if 'dsk' or 'io' in header:
  612. read, write = zip(*values) # unzip
  613. display_stat = {'read': sizeof_fmt(math.fsum(read)), 'write': sizeof_fmt(math.fsum(write))}
  614. display_stat_collection[header] = display_stat
  615. return display_stat
  616. ###########################################################################################
  617. # End __calculate_average_stats
  618. #########################################################################################
  619. ##########################################################################################
  620. # Constructor
  621. ##########################################################################################
  622. def __init__(self, name, directory, benchmarker, runTests, args):
  623. self.name = name
  624. self.directory = directory
  625. self.benchmarker = benchmarker
  626. self.runTests = runTests
  627. self.fwroot = benchmarker.fwroot
  628. # setup logging
  629. logging.basicConfig(stream=sys.stderr, level=logging.INFO)
  630. self.install_root="%s/%s" % (self.fwroot, "installs")
  631. if benchmarker.install_strategy is 'pertest':
  632. self.install_root="%s/pertest/%s" % (self.install_root, name)
  633. # Used in setup.py scripts for consistency with
  634. # the bash environment variables
  635. self.troot = self.directory
  636. self.iroot = self.install_root
  637. self.__dict__.update(args)
  638. # ensure directory has __init__.py file so that we can use it as a Python package
  639. if not os.path.exists(os.path.join(directory, "__init__.py")):
  640. logging.warning("Please add an empty __init__.py file to directory %s", directory)
  641. open(os.path.join(directory, "__init__.py"), 'w').close()
  642. # Import the module (TODO - consider using sys.meta_path)
  643. # Note: You can see the log output if you really want to, but it's a *ton*
  644. dir_rel_to_fwroot = os.path.relpath(os.path.dirname(directory), self.fwroot)
  645. if dir_rel_to_fwroot != ".":
  646. sys.path.append("%s/%s" % (self.fwroot, dir_rel_to_fwroot))
  647. logging.log(0, "Adding %s to import %s.%s", dir_rel_to_fwroot, os.path.basename(directory), self.setup_file)
  648. #self.setup_module = setup_module = importlib.import_module(os.path.basename(directory) + '.' + self.setup_file)
  649. sys.path.remove("%s/%s" % (self.fwroot, dir_rel_to_fwroot))
  650. else:
  651. logging.log(0, "Importing %s.%s", directory, self.setup_file)
  652. #self.setup_module = setup_module = importlib.import_module(os.path.basename(directory) + '.' + self.setup_file)
  653. ############################################################
  654. # End __init__
  655. ############################################################
  656. ############################################################
  657. # End FrameworkTest
  658. ############################################################
  659. ##########################################################################################
  660. # Static methods
  661. ##########################################################################################
  662. ##############################################################
  663. # parse_config(config, directory, benchmarker)
  664. # parses a config file and returns a list of FrameworkTest
  665. # objects based on that config file.
  666. ##############################################################
  667. def parse_config(config, directory, benchmarker):
  668. tests = []
  669. # This sort ordering is set up specifically to return the length
  670. # of the test name. There were SO many problems involved with
  671. # 'plaintext' being run first (rather, just not last) that we
  672. # needed to ensure that it was run last for every framework.
  673. def testOrder(type_name):
  674. return len(type_name)
  675. # The config object can specify multiple tests
  676. # Loop over them and parse each into a FrameworkTest
  677. for test in config['tests']:
  678. for test_name, test_keys in test.iteritems():
  679. # Prefix all test names with framework except 'default' test
  680. if test_name == 'default':
  681. test_name = config['framework']
  682. else:
  683. test_name = "%s-%s" % (config['framework'], test_name)
  684. # Ensure FrameworkTest.framework is available
  685. if not test_keys['framework']:
  686. test_keys['framework'] = config['framework']
  687. #if test_keys['framework'].lower() != config['framework'].lower():
  688. # print Exception("benchmark_config for test %s is invalid - test framework '%s' must match benchmark_config framework '%s'" %
  689. # (test_name, test_keys['framework'], config['framework']))
  690. # Confirm required keys are present
  691. # TODO have a TechEmpower person confirm this list - I don't know what the website requires....
  692. required = ['language','webserver','classification','database','approach','orm','framework','os','database_os']
  693. if not all (key in test_keys for key in required):
  694. raise Exception("benchmark_config for test %s is invalid - missing required keys" % test_name)
  695. # Map test type to a parsed FrameworkTestType object
  696. runTests = dict()
  697. for type_name, type_obj in benchmarker.types.iteritems():
  698. try:
  699. runTests[type_name] = type_obj.copy().parse(test_keys)
  700. except AttributeError as ae:
  701. # This is quite common - most tests don't support all types
  702. # Quitely log it and move on (debug logging is on in travis and this causes
  703. # ~1500 lines of debug, so I'm totally ignoring it for now
  704. # logging.debug("Missing arguments for test type %s for framework test %s", type_name, test_name)
  705. pass
  706. # We need to sort by test_type to run
  707. sortedTestKeys = sorted(runTests.keys(), key=testOrder)
  708. sortedRunTests = OrderedDict()
  709. for sortedTestKey in sortedTestKeys:
  710. sortedRunTests[sortedTestKey] = runTests[sortedTestKey]
  711. # By passing the entire set of keys, each FrameworkTest will have a member for each key
  712. tests.append(FrameworkTest(test_name, directory, benchmarker, sortedRunTests, test_keys))
  713. return tests
  714. ##############################################################
  715. # End parse_config
  716. ##############################################################