framework_test.py 35 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821
  1. from benchmark.fortune_html_parser import FortuneHTMLParser
  2. from setup.linux import setup_util
  3. from benchmark.test_types import *
  4. import importlib
  5. import os
  6. import subprocess
  7. import time
  8. import re
  9. from pprint import pprint
  10. import sys
  11. import traceback
  12. import json
  13. import logging
  14. import csv
  15. import shlex
  16. import math
  17. from collections import OrderedDict
  18. from threading import Thread
  19. from threading import Event
  20. from utils import header
  21. # Cross-platform colored text
  22. from colorama import Fore, Back, Style
  23. class FrameworkTest:
  24. headers_template = "-H 'Host: localhost' -H '{accept}' -H 'Connection: keep-alive'"
  25. # Used for test types that require no pipelining or query string params.
  26. concurrency_template = """
  27. echo ""
  28. echo "---------------------------------------------------------"
  29. echo " Running Primer {name}"
  30. echo " {wrk} {headers} -d 5 -c 8 --timeout 8 -t 8 \"http://{server_host}:{port}{url}\""
  31. echo "---------------------------------------------------------"
  32. echo ""
  33. {wrk} {headers} -d 5 -c 8 --timeout 8 -t 8 "http://{server_host}:{port}{url}"
  34. sleep 5
  35. echo ""
  36. echo "---------------------------------------------------------"
  37. echo " Running Warmup {name}"
  38. echo " {wrk} {headers} -d {duration} -c {max_concurrency} --timeout {max_concurrency} -t {max_threads} \"http://{server_host}:{port}{url}\""
  39. echo "---------------------------------------------------------"
  40. echo ""
  41. {wrk} {headers} -d {duration} -c {max_concurrency} --timeout {max_concurrency} -t {max_threads} "http://{server_host}:{port}{url}"
  42. sleep 5
  43. echo ""
  44. echo "---------------------------------------------------------"
  45. echo " Synchronizing time"
  46. echo "---------------------------------------------------------"
  47. echo ""
  48. ntpdate -s pool.ntp.org
  49. for c in {levels}
  50. do
  51. echo ""
  52. echo "---------------------------------------------------------"
  53. echo " Concurrency: $c for {name}"
  54. echo " {wrk} {headers} -d {duration} -c $c --timeout $c -t $(($c>{max_threads}?{max_threads}:$c)) \"http://{server_host}:{port}{url}\""
  55. echo "---------------------------------------------------------"
  56. echo ""
  57. STARTTIME=$(date +"%s")
  58. {wrk} {headers} -d {duration} -c $c --timeout $c -t "$(($c>{max_threads}?{max_threads}:$c))" http://{server_host}:{port}{url}
  59. echo "STARTTIME $STARTTIME"
  60. echo "ENDTIME $(date +"%s")"
  61. sleep 2
  62. done
  63. """
  64. # Used for test types that require pipelining.
  65. pipeline_template = """
  66. echo ""
  67. echo "---------------------------------------------------------"
  68. echo " Running Primer {name}"
  69. echo " {wrk} {headers} -d 5 -c 8 --timeout 8 -t 8 \"http://{server_host}:{port}{url}\""
  70. echo "---------------------------------------------------------"
  71. echo ""
  72. {wrk} {headers} -d 5 -c 8 --timeout 8 -t 8 "http://{server_host}:{port}{url}"
  73. sleep 5
  74. echo ""
  75. echo "---------------------------------------------------------"
  76. echo " Running Warmup {name}"
  77. echo " {wrk} {headers} -d {duration} -c {max_concurrency} --timeout {max_concurrency} -t {max_threads} \"http://{server_host}:{port}{url}\""
  78. echo "---------------------------------------------------------"
  79. echo ""
  80. {wrk} {headers} -d {duration} -c {max_concurrency} --timeout {max_concurrency} -t {max_threads} "http://{server_host}:{port}{url}"
  81. sleep 5
  82. echo ""
  83. echo "---------------------------------------------------------"
  84. echo " Synchronizing time"
  85. echo "---------------------------------------------------------"
  86. echo ""
  87. ntpdate -s pool.ntp.org
  88. for c in {levels}
  89. do
  90. echo ""
  91. echo "---------------------------------------------------------"
  92. echo " Concurrency: $c for {name}"
  93. echo " {wrk} {headers} -d {duration} -c $c --timeout $c -t $(($c>{max_threads}?{max_threads}:$c)) \"http://{server_host}:{port}{url}\" -s ~/pipeline.lua -- {pipeline}"
  94. echo "---------------------------------------------------------"
  95. echo ""
  96. STARTTIME=$(date +"%s")
  97. {wrk} {headers} -d {duration} -c $c --timeout $c -t "$(($c>{max_threads}?{max_threads}:$c))" http://{server_host}:{port}{url} -s ~/pipeline.lua -- {pipeline}
  98. echo "STARTTIME $STARTTIME"
  99. echo "ENDTIME $(date +"%s")"
  100. sleep 2
  101. done
  102. """
  103. # Used for test types that require a database -
  104. # These tests run at a static concurrency level and vary the size of
  105. # the query sent with each request
  106. query_template = """
  107. echo ""
  108. echo "---------------------------------------------------------"
  109. echo " Running Primer {name}"
  110. echo " wrk {headers} -d 5 -c 8 --timeout 8 -t 8 \"http://{server_host}:{port}{url}2\""
  111. echo "---------------------------------------------------------"
  112. echo ""
  113. wrk {headers} -d 5 -c 8 --timeout 8 -t 8 "http://{server_host}:{port}{url}2"
  114. sleep 5
  115. echo ""
  116. echo "---------------------------------------------------------"
  117. echo " Running Warmup {name}"
  118. echo " wrk {headers} -d {duration} -c {max_concurrency} --timeout {max_concurrency} -t {max_threads} \"http://{server_host}:{port}{url}2\""
  119. echo "---------------------------------------------------------"
  120. echo ""
  121. wrk {headers} -d {duration} -c {max_concurrency} --timeout {max_concurrency} -t {max_threads} "http://{server_host}:{port}{url}2"
  122. sleep 5
  123. echo ""
  124. echo "---------------------------------------------------------"
  125. echo " Synchronizing time"
  126. echo "---------------------------------------------------------"
  127. echo ""
  128. ntpdate -s pool.ntp.org
  129. for c in {levels}
  130. do
  131. echo ""
  132. echo "---------------------------------------------------------"
  133. echo " Queries: $c for {name}"
  134. echo " wrk {headers} -d {duration} -c {max_concurrency} --timeout {max_concurrency} -t {max_threads} \"http://{server_host}:{port}{url}$c\""
  135. echo "---------------------------------------------------------"
  136. echo ""
  137. STARTTIME=$(date +"%s")
  138. wrk {headers} -d {duration} -c {max_concurrency} --timeout {max_concurrency} -t {max_threads} "http://{server_host}:{port}{url}$c"
  139. echo "STARTTIME $STARTTIME"
  140. echo "ENDTIME $(date +"%s")"
  141. sleep 2
  142. done
  143. """
  144. ############################################################
  145. # start(benchmarker)
  146. # Start the test using it's setup file
  147. ############################################################
  148. def start(self, out, err):
  149. # Load profile for this installation
  150. profile="$FWROOT/config/benchmark_profile"
  151. # Setup variables for TROOT and IROOT
  152. setup_util.replace_environ(config=profile,
  153. command='export TROOT=%s && export IROOT=%s && export DBHOST=%s && export MAX_THREADS=%s && export OUT=%s && export ERR=%s' %
  154. (self.directory, self.install_root, self.database_host, self.benchmarker.threads, os.path.join(self.fwroot, out.name), os.path.join(self.fwroot, err.name)))
  155. # Because start can take so long, we print a dot to let the user know
  156. # we are working
  157. class ProgressPrinterThread(Thread):
  158. def __init__(self, event):
  159. Thread.__init__(self)
  160. self.stopped = event
  161. def run(self):
  162. while not self.stopped.wait(20):
  163. sys.stderr.write("Waiting for start to return...\n")
  164. stopFlag = Event()
  165. thread = ProgressPrinterThread(stopFlag)
  166. thread.start()
  167. # Run the module start (inside parent of TROOT)
  168. # - we use the parent as a historical accident - a lot of tests
  169. # use subprocess's cwd argument already
  170. previousDir = os.getcwd()
  171. os.chdir(os.path.dirname(self.troot))
  172. logging.info("Running setup module start (cwd=%s)", self.directory)
  173. # Write the stderr to our temp.txt file to be read and fed back
  174. # to the user via logging later.
  175. with open('temp', 'w') as errout:
  176. # Run the start script for the test as the "testrunner" user.
  177. # This requires superuser privs, so `sudo` is necessary.
  178. # -u [username] The username
  179. # -E Preserves the current environment variables
  180. # -H Forces the home var (~) to be reset to the user specified
  181. # -e Force bash to exit on first error
  182. # Note: check_call is a blocking call, so any startup scripts
  183. # run by the framework that need to continue (read: server has
  184. # started and needs to remain that way), then they should be
  185. # executed in the background.
  186. command = 'sudo -u %s -E -H bash -e %s.sh' % (self.benchmarker.runner_user, self.setup_file)
  187. debug_command = '''\
  188. export FWROOT=%s && \\
  189. export TROOT=%s && \\
  190. export IROOT=%s && \\
  191. export DBHOST=%s && \\
  192. export MAX_THREADS=%s && \\
  193. export OUT=%s && \\
  194. export ERR=%s && \\
  195. cd %s && \\
  196. %s''' % (self.fwroot,
  197. self.directory,
  198. self.install_root,
  199. self.database_host,
  200. self.benchmarker.threads,
  201. os.path.join(self.fwroot, out.name),
  202. os.path.join(self.fwroot, err.name),
  203. self.directory,
  204. command)
  205. logging.info("To run framework manually, copy/paste this:\n%s", debug_command)
  206. try:
  207. subprocess.check_call(command, cwd=self.directory,
  208. shell=True, stderr=errout, stdout=out)
  209. retcode = 0
  210. except Exception:
  211. logging.exception("Failure running setup.sh")
  212. retcode = 1
  213. with open('temp', 'r') as errout:
  214. # Read out temp error output in its entirety
  215. body = errout.read()
  216. if len(body) > 0:
  217. # Log it to the user.
  218. logging.error(body)
  219. # Log it to our err.txt file
  220. err.write(body)
  221. # We are done with our temp file - delete it
  222. os.remove('temp')
  223. os.chdir(previousDir)
  224. # Stop the progress printer
  225. stopFlag.set()
  226. logging.info("Executed %s.sh", self.setup_file)
  227. return retcode
  228. ############################################################
  229. # End start
  230. ############################################################
  231. ############################################################
  232. # verify_urls
  233. # Verifys each of the URLs for this test. THis will sinply
  234. # curl the URL and check for it's return status.
  235. # For each url, a flag will be set on this object for whether
  236. # or not it passed
  237. # Returns True if all verifications succeeded
  238. ############################################################
  239. def verify_urls(self, out, err):
  240. result = True
  241. def verify_type(test_type):
  242. test = self.runTests[test_type]
  243. test.setup_out_err(out, err)
  244. out.write(header("VERIFYING %s" % test_type.upper()))
  245. base_url = "http://%s:%s" % (self.benchmarker.server_host, self.port)
  246. try:
  247. results = test.verify(base_url)
  248. except Exception as e:
  249. results = [('fail',"""Caused Exception in TFB
  250. This almost certainly means your return value is incorrect,
  251. but also that you have found a bug. Please submit an issue
  252. including this message: %s\n%s""" % (e, traceback.format_exc()),
  253. base_url)]
  254. logging.warning("Verifying test %s for %s caused an exception: %s", test_type, self.name, e)
  255. traceback.format_exc()
  256. test.failed = any(result is 'fail' for (result, reason, url) in results)
  257. test.warned = any(result is 'warn' for (result, reason, url) in results)
  258. test.passed = all(result is 'pass' for (result, reason, url) in results)
  259. def output_result(result, reason, url):
  260. specific_rules_url = "http://frameworkbenchmarks.readthedocs.org/en/latest/Project-Information/Framework-Tests/#specific-test-requirements"
  261. prefix = Fore.CYAN
  262. color = Fore.GREEN
  263. showRules = False
  264. if result.upper() == "WARN":
  265. color = Fore.YELLOW
  266. showRules = True
  267. elif result.upper() == "FAIL":
  268. color = Fore.RED
  269. showRules = True
  270. out.write((prefix + " " + color + "%s" + Style.RESET_ALL + " for %s\n") % (result.upper(), url))
  271. print (prefix + " " + color + "%s" + Style.RESET_ALL + " for %s") % (result.upper(), url)
  272. if reason is not None and len(reason) != 0:
  273. for line in reason.splitlines():
  274. out.write(" " + line + '\n')
  275. print " " + line
  276. if showRules:
  277. out.write(" See %s" % specific_rules_url);
  278. print " See %s" % specific_rules_url
  279. [output_result(r1,r2,url) for (r1, r2, url) in results]
  280. if test.failed:
  281. self.benchmarker.report_verify_results(self, test_type, 'fail')
  282. elif test.warned:
  283. self.benchmarker.report_verify_results(self, test_type, 'warn')
  284. elif test.passed:
  285. self.benchmarker.report_verify_results(self, test_type, 'pass')
  286. else:
  287. raise Exception("Unknown error - test did not pass,warn,or fail")
  288. result = True
  289. for test_type in self.runTests:
  290. verify_type(test_type)
  291. if self.runTests[test_type].failed:
  292. result = False
  293. return result
  294. ############################################################
  295. # End verify_urls
  296. ############################################################
  297. ############################################################
  298. # benchmark
  299. # Runs the benchmark for each type of test that it implements
  300. # JSON/DB/Query.
  301. ############################################################
  302. def benchmark(self, out, err):
  303. def benchmark_type(test_type):
  304. out.write("BENCHMARKING %s ... " % test_type.upper())
  305. test = self.runTests[test_type]
  306. test.setup_out_err(out, err)
  307. output_file = self.benchmarker.output_file(self.name, test_type)
  308. if not os.path.exists(output_file):
  309. # Open to create the empty file
  310. with open(output_file, 'w'):
  311. pass
  312. if not test.failed:
  313. if test_type == 'plaintext': # One special case
  314. remote_script = self.__generate_pipeline_script(test.get_url(), self.port, test.accept_header)
  315. elif test_type == 'query' or test_type == 'update':
  316. remote_script = self.__generate_query_script(test.get_url(), self.port, test.accept_header)
  317. else:
  318. remote_script = self.__generate_concurrency_script(test.get_url(), self.port, test.accept_header)
  319. # Begin resource usage metrics collection
  320. self.__begin_logging(test_type)
  321. # Run the benchmark
  322. with open(output_file, 'w') as raw_file:
  323. p = subprocess.Popen(self.benchmarker.client_ssh_string.split(" "), stdin=subprocess.PIPE, stdout=raw_file, stderr=err)
  324. p.communicate(remote_script)
  325. err.flush()
  326. # End resource usage metrics collection
  327. self.__end_logging()
  328. results = self.__parse_test(test_type)
  329. print "Benchmark results:"
  330. pprint(results)
  331. self.benchmarker.report_benchmark_results(framework=self, test=test_type, results=results['results'])
  332. out.write( "Complete\n" )
  333. out.flush()
  334. for test_type in self.runTests:
  335. benchmark_type(test_type)
  336. ############################################################
  337. # End benchmark
  338. ############################################################
  339. ############################################################
  340. # parse_all
  341. # Method meant to be run for a given timestamp
  342. ############################################################
  343. def parse_all(self):
  344. for test_type in self.runTests:
  345. if os.path.exists(self.benchmarker.get_output_file(self.name, test_type)):
  346. results = self.__parse_test(test_type)
  347. self.benchmarker.report_benchmark_results(framework=self, test=test_type, results=results['results'])
  348. ############################################################
  349. # __parse_test(test_type)
  350. ############################################################
  351. def __parse_test(self, test_type):
  352. try:
  353. results = dict()
  354. results['results'] = []
  355. stats = []
  356. if os.path.exists(self.benchmarker.get_output_file(self.name, test_type)):
  357. with open(self.benchmarker.output_file(self.name, test_type)) as raw_data:
  358. is_warmup = True
  359. rawData = None
  360. for line in raw_data:
  361. if "Queries:" in line or "Concurrency:" in line:
  362. is_warmup = False
  363. rawData = None
  364. continue
  365. if "Warmup" in line or "Primer" in line:
  366. is_warmup = True
  367. continue
  368. if not is_warmup:
  369. if rawData == None:
  370. rawData = dict()
  371. results['results'].append(rawData)
  372. #if "Requests/sec:" in line:
  373. # m = re.search("Requests/sec:\s+([0-9]+)", line)
  374. # rawData['reportedResults'] = m.group(1)
  375. # search for weighttp data such as succeeded and failed.
  376. if "Latency" in line:
  377. m = re.findall("([0-9]+\.*[0-9]*[us|ms|s|m|%]+)", line)
  378. if len(m) == 4:
  379. rawData['latencyAvg'] = m[0]
  380. rawData['latencyStdev'] = m[1]
  381. rawData['latencyMax'] = m[2]
  382. # rawData['latencyStdevPercent'] = m[3]
  383. #if "Req/Sec" in line:
  384. # m = re.findall("([0-9]+\.*[0-9]*[k|%]*)", line)
  385. # if len(m) == 4:
  386. # rawData['requestsAvg'] = m[0]
  387. # rawData['requestsStdev'] = m[1]
  388. # rawData['requestsMax'] = m[2]
  389. # rawData['requestsStdevPercent'] = m[3]
  390. #if "requests in" in line:
  391. # m = re.search("requests in ([0-9]+\.*[0-9]*[ms|s|m|h]+)", line)
  392. # if m != None:
  393. # # parse out the raw time, which may be in minutes or seconds
  394. # raw_time = m.group(1)
  395. # if "ms" in raw_time:
  396. # rawData['total_time'] = float(raw_time[:len(raw_time)-2]) / 1000.0
  397. # elif "s" in raw_time:
  398. # rawData['total_time'] = float(raw_time[:len(raw_time)-1])
  399. # elif "m" in raw_time:
  400. # rawData['total_time'] = float(raw_time[:len(raw_time)-1]) * 60.0
  401. # elif "h" in raw_time:
  402. # rawData['total_time'] = float(raw_time[:len(raw_time)-1]) * 3600.0
  403. if "requests in" in line:
  404. m = re.search("([0-9]+) requests in", line)
  405. if m != None:
  406. rawData['totalRequests'] = int(m.group(1))
  407. if "Socket errors" in line:
  408. if "connect" in line:
  409. m = re.search("connect ([0-9]+)", line)
  410. rawData['connect'] = int(m.group(1))
  411. if "read" in line:
  412. m = re.search("read ([0-9]+)", line)
  413. rawData['read'] = int(m.group(1))
  414. if "write" in line:
  415. m = re.search("write ([0-9]+)", line)
  416. rawData['write'] = int(m.group(1))
  417. if "timeout" in line:
  418. m = re.search("timeout ([0-9]+)", line)
  419. rawData['timeout'] = int(m.group(1))
  420. if "Non-2xx" in line:
  421. m = re.search("Non-2xx or 3xx responses: ([0-9]+)", line)
  422. if m != None:
  423. rawData['5xx'] = int(m.group(1))
  424. if "STARTTIME" in line:
  425. m = re.search("[0-9]+", line)
  426. rawData["startTime"] = int(m.group(0))
  427. if "ENDTIME" in line:
  428. m = re.search("[0-9]+", line)
  429. rawData["endTime"] = int(m.group(0))
  430. test_stats = self.__parse_stats(test_type, rawData["startTime"], rawData["endTime"], 1)
  431. # rawData["averageStats"] = self.__calculate_average_stats(test_stats)
  432. stats.append(test_stats)
  433. with open(self.benchmarker.stats_file(self.name, test_type) + ".json", "w") as stats_file:
  434. json.dump(stats, stats_file, indent=2)
  435. return results
  436. except IOError:
  437. return None
  438. ############################################################
  439. # End benchmark
  440. ############################################################
  441. ##########################################################################################
  442. # Private Methods
  443. ##########################################################################################
  444. ############################################################
  445. # __generate_concurrency_script(url, port)
  446. # Generates the string containing the bash script that will
  447. # be run on the client to benchmark a single test. This
  448. # specifically works for the variable concurrency tests (JSON
  449. # and DB)
  450. ############################################################
  451. def __generate_concurrency_script(self, url, port, accept_header, wrk_command="wrk"):
  452. headers = self.headers_template.format(accept=accept_header)
  453. return self.concurrency_template.format(max_concurrency=max(self.benchmarker.concurrency_levels),
  454. max_threads=self.benchmarker.threads, name=self.name, duration=self.benchmarker.duration,
  455. levels=" ".join("{}".format(item) for item in self.benchmarker.concurrency_levels),
  456. server_host=self.benchmarker.server_host, port=port, url=url, headers=headers, wrk=wrk_command)
  457. ############################################################
  458. # __generate_pipeline_script(url, port)
  459. # Generates the string containing the bash script that will
  460. # be run on the client to benchmark a single pipeline test.
  461. ############################################################
  462. def __generate_pipeline_script(self, url, port, accept_header, wrk_command="wrk"):
  463. headers = self.headers_template.format(accept=accept_header)
  464. return self.pipeline_template.format(max_concurrency=16384,
  465. max_threads=self.benchmarker.threads, name=self.name, duration=self.benchmarker.duration,
  466. levels=" ".join("{}".format(item) for item in [256,1024,4096,16384]),
  467. server_host=self.benchmarker.server_host, port=port, url=url, headers=headers, wrk=wrk_command,
  468. pipeline=16)
  469. ############################################################
  470. # __generate_query_script(url, port)
  471. # Generates the string containing the bash script that will
  472. # be run on the client to benchmark a single test. This
  473. # specifically works for the variable query tests (Query)
  474. ############################################################
  475. def __generate_query_script(self, url, port, accept_header):
  476. headers = self.headers_template.format(accept=accept_header)
  477. return self.query_template.format(max_concurrency=max(self.benchmarker.concurrency_levels),
  478. max_threads=self.benchmarker.threads, name=self.name, duration=self.benchmarker.duration,
  479. levels=" ".join("{}".format(item) for item in self.benchmarker.query_levels),
  480. server_host=self.benchmarker.server_host, port=port, url=url, headers=headers)
  481. ############################################################
  482. # Returns True if any test type this this framework test will use a DB
  483. ############################################################
  484. def requires_database(self):
  485. '''Returns True/False if this test requires a database'''
  486. return any(tobj.requires_db for (ttype,tobj) in self.runTests.iteritems())
  487. ############################################################
  488. # __begin_logging
  489. # Starts a thread to monitor the resource usage, to be synced with the client's time
  490. # TODO: MySQL and InnoDB are possible. Figure out how to implement them.
  491. ############################################################
  492. def __begin_logging(self, test_type):
  493. output_file = "{file_name}".format(file_name=self.benchmarker.get_stats_file(self.name, test_type))
  494. dstat_string = "dstat -afilmprsT --aio --fs --ipc --lock --raw --socket --tcp \
  495. --raw --socket --tcp --udp --unix --vm --disk-util \
  496. --rpc --rpcd --output {output_file}".format(output_file=output_file)
  497. cmd = shlex.split(dstat_string)
  498. dev_null = open(os.devnull, "w")
  499. self.subprocess_handle = subprocess.Popen(cmd, stdout=dev_null)
  500. ##############################################################
  501. # Begin __end_logging
  502. # Stops the logger thread and blocks until shutdown is complete.
  503. ##############################################################
  504. def __end_logging(self):
  505. self.subprocess_handle.terminate()
  506. self.subprocess_handle.communicate()
  507. ##############################################################
  508. # Begin __parse_stats
  509. # For each test type, process all the statistics, and return a multi-layered dictionary
  510. # that has a structure as follows:
  511. # (timestamp)
  512. # | (main header) - group that the stat is in
  513. # | | (sub header) - title of the stat
  514. # | | | (stat) - the stat itself, usually a floating point number
  515. ##############################################################
  516. def __parse_stats(self, test_type, start_time, end_time, interval):
  517. stats_dict = dict()
  518. stats_file = self.benchmarker.stats_file(self.name, test_type)
  519. with open(stats_file) as stats:
  520. while(stats.next() != "\n"): # dstat doesn't output a completely compliant CSV file - we need to strip the header
  521. pass
  522. stats_reader = csv.reader(stats)
  523. main_header = stats_reader.next()
  524. sub_header = stats_reader.next()
  525. time_row = sub_header.index("epoch")
  526. int_counter = 0
  527. for row in stats_reader:
  528. time = float(row[time_row])
  529. int_counter+=1
  530. if time < start_time:
  531. continue
  532. elif time > end_time:
  533. return stats_dict
  534. if int_counter % interval != 0:
  535. continue
  536. row_dict = dict()
  537. for nextheader in main_header:
  538. if nextheader != "":
  539. row_dict[nextheader] = dict()
  540. header = ""
  541. for item_num, column in enumerate(row):
  542. if(len(main_header[item_num]) != 0):
  543. header = main_header[item_num]
  544. row_dict[header][sub_header[item_num]] = float(column) # all the stats are numbers, so we want to make sure that they stay that way in json
  545. stats_dict[time] = row_dict
  546. return stats_dict
  547. ##############################################################
  548. # End __parse_stats
  549. ##############################################################
  550. def __getattr__(self, name):
  551. """For backwards compatibility, we used to pass benchmarker
  552. as the argument to the setup.sh files"""
  553. try:
  554. x = getattr(self.benchmarker, name)
  555. except AttributeError:
  556. print "AttributeError: %s not a member of FrameworkTest or Benchmarker" % name
  557. print "This is probably a bug"
  558. raise
  559. return x
  560. ##############################################################
  561. # Begin __calculate_average_stats
  562. # We have a large amount of raw data for the statistics that
  563. # may be useful for the stats nerds, but most people care about
  564. # a couple of numbers. For now, we're only going to supply:
  565. # * Average CPU
  566. # * Average Memory
  567. # * Total network use
  568. # * Total disk use
  569. # More may be added in the future. If they are, please update
  570. # the above list.
  571. # Note: raw_stats is directly from the __parse_stats method.
  572. # Recall that this consists of a dictionary of timestamps,
  573. # each of which contain a dictionary of stat categories which
  574. # contain a dictionary of stats
  575. ##############################################################
  576. def __calculate_average_stats(self, raw_stats):
  577. raw_stat_collection = dict()
  578. for timestamp, time_dict in raw_stats.items():
  579. for main_header, sub_headers in time_dict.items():
  580. item_to_append = None
  581. if 'cpu' in main_header:
  582. # We want to take the idl stat and subtract it from 100
  583. # to get the time that the CPU is NOT idle.
  584. item_to_append = sub_headers['idl'] - 100.0
  585. elif main_header == 'memory usage':
  586. item_to_append = sub_headers['used']
  587. elif 'net' in main_header:
  588. # Network stats have two parts - recieve and send. We'll use a tuple of
  589. # style (recieve, send)
  590. item_to_append = (sub_headers['recv'], sub_headers['send'])
  591. elif 'dsk' or 'io' in main_header:
  592. # Similar for network, except our tuple looks like (read, write)
  593. item_to_append = (sub_headers['read'], sub_headers['writ'])
  594. if item_to_append is not None:
  595. if main_header not in raw_stat_collection:
  596. raw_stat_collection[main_header] = list()
  597. raw_stat_collection[main_header].append(item_to_append)
  598. # Simple function to determine human readable size
  599. # http://stackoverflow.com/questions/1094841/reusable-library-to-get-human-readable-version-of-file-size
  600. def sizeof_fmt(num):
  601. # We'll assume that any number we get is convertable to a float, just in case
  602. num = float(num)
  603. for x in ['bytes','KB','MB','GB']:
  604. if num < 1024.0 and num > -1024.0:
  605. return "%3.1f%s" % (num, x)
  606. num /= 1024.0
  607. return "%3.1f%s" % (num, 'TB')
  608. # Now we have our raw stats in a readable format - we need to format it for display
  609. # We need a floating point sum, so the built in sum doesn't cut it
  610. display_stat_collection = dict()
  611. for header, values in raw_stat_collection.items():
  612. display_stat = None
  613. if 'cpu' in header:
  614. display_stat = sizeof_fmt(math.fsum(values) / len(values))
  615. elif main_header == 'memory usage':
  616. display_stat = sizeof_fmt(math.fsum(values) / len(values))
  617. elif 'net' in main_header:
  618. receive, send = zip(*values) # unzip
  619. display_stat = {'receive': sizeof_fmt(math.fsum(receive)), 'send': sizeof_fmt(math.fsum(send))}
  620. else: # if 'dsk' or 'io' in header:
  621. read, write = zip(*values) # unzip
  622. display_stat = {'read': sizeof_fmt(math.fsum(read)), 'write': sizeof_fmt(math.fsum(write))}
  623. display_stat_collection[header] = display_stat
  624. return display_stat
  625. ###########################################################################################
  626. # End __calculate_average_stats
  627. #########################################################################################
  628. ##########################################################################################
  629. # Constructor
  630. ##########################################################################################
  631. def __init__(self, name, directory, benchmarker, runTests, args):
  632. self.name = name
  633. self.directory = directory
  634. self.benchmarker = benchmarker
  635. self.runTests = runTests
  636. self.fwroot = benchmarker.fwroot
  637. self.approach = ""
  638. self.classification = ""
  639. self.database = ""
  640. self.framework = ""
  641. self.language = ""
  642. self.orm = ""
  643. self.platform = ""
  644. self.webserver = ""
  645. self.os = ""
  646. self.database_os = ""
  647. self.display_name = ""
  648. self.notes = ""
  649. self.versus = ""
  650. # setup logging
  651. logging.basicConfig(stream=sys.stderr, level=logging.INFO)
  652. self.install_root="%s/%s" % (self.fwroot, "installs")
  653. if benchmarker.install_strategy is 'pertest':
  654. self.install_root="%s/pertest/%s" % (self.install_root, name)
  655. # Used in setup.sh scripts for consistency with
  656. # the bash environment variables
  657. self.troot = self.directory
  658. self.iroot = self.install_root
  659. self.__dict__.update(args)
  660. ############################################################
  661. # End __init__
  662. ############################################################
  663. ############################################################
  664. # End FrameworkTest
  665. ############################################################
  666. ##########################################################################################
  667. # Static methods
  668. ##########################################################################################
  669. ##############################################################
  670. # parse_config(config, directory, benchmarker)
  671. # parses a config file and returns a list of FrameworkTest
  672. # objects based on that config file.
  673. ##############################################################
  674. def parse_config(config, directory, benchmarker):
  675. tests = []
  676. # This sort ordering is set up specifically to return the length
  677. # of the test name. There were SO many problems involved with
  678. # 'plaintext' being run first (rather, just not last) that we
  679. # needed to ensure that it was run last for every framework.
  680. def testOrder(type_name):
  681. return len(type_name)
  682. # The config object can specify multiple tests
  683. # Loop over them and parse each into a FrameworkTest
  684. for test in config['tests']:
  685. names = [name for (name,keys) in test.iteritems()]
  686. if "default" not in names:
  687. logging.warn("Framework %s does not define a default test in benchmark_config.json", config['framework'])
  688. for test_name, test_keys in test.iteritems():
  689. # Prefix all test names with framework except 'default' test
  690. if test_name == 'default':
  691. test_name = config['framework']
  692. else:
  693. test_name = "%s-%s" % (config['framework'], test_name)
  694. # Ensure FrameworkTest.framework is available
  695. if not test_keys['framework']:
  696. test_keys['framework'] = config['framework']
  697. #if test_keys['framework'].lower() != config['framework'].lower():
  698. # print Exception("benchmark_config.json for test %s is invalid - test framework '%s' must match benchmark_config.json framework '%s'" %
  699. # (test_name, test_keys['framework'], config['framework']))
  700. # Confirm required keys are present
  701. # TODO have a TechEmpower person confirm this list - I don't know what the website requires....
  702. required = ['language','webserver','classification','database','approach','orm','framework','os','database_os']
  703. if not all (key in test_keys for key in required):
  704. raise Exception("benchmark_config.json for test %s is invalid - missing required keys" % test_name)
  705. # Map test type to a parsed FrameworkTestType object
  706. runTests = dict()
  707. for type_name, type_obj in benchmarker.types.iteritems():
  708. try:
  709. runTests[type_name] = type_obj.copy().parse(test_keys)
  710. except AttributeError as ae:
  711. # This is quite common - most tests don't support all types
  712. # Quitely log it and move on (debug logging is on in travis and this causes
  713. # ~1500 lines of debug, so I'm totally ignoring it for now
  714. # logging.debug("Missing arguments for test type %s for framework test %s", type_name, test_name)
  715. pass
  716. # We need to sort by test_type to run
  717. sortedTestKeys = sorted(runTests.keys(), key=testOrder)
  718. sortedRunTests = OrderedDict()
  719. for sortedTestKey in sortedTestKeys:
  720. sortedRunTests[sortedTestKey] = runTests[sortedTestKey]
  721. # By passing the entire set of keys, each FrameworkTest will have a member for each key
  722. tests.append(FrameworkTest(test_name, directory, benchmarker, sortedRunTests, test_keys))
  723. return tests
  724. ##############################################################
  725. # End parse_config
  726. ##############################################################