framework_test.py 35 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815
  1. from benchmark.fortune_html_parser import FortuneHTMLParser
  2. from setup.linux import setup_util
  3. from benchmark.test_types import *
  4. import importlib
  5. import os
  6. import subprocess
  7. import time
  8. import re
  9. from pprint import pprint
  10. import sys
  11. import traceback
  12. import json
  13. import logging
  14. import csv
  15. import shlex
  16. import math
  17. from collections import OrderedDict
  18. from threading import Thread
  19. from threading import Event
  20. from utils import header
  21. class FrameworkTest:
  22. headers_template = "-H 'Host: localhost' -H '{accept}' -H 'Connection: keep-alive'"
  23. # Used for test types that require no pipelining or query string params.
  24. concurrency_template = """
  25. echo ""
  26. echo "---------------------------------------------------------"
  27. echo " Running Primer {name}"
  28. echo " {wrk} {headers} -d 5 -c 8 --timeout 8 -t 8 \"http://{server_host}:{port}{url}\""
  29. echo "---------------------------------------------------------"
  30. echo ""
  31. {wrk} {headers} -d 5 -c 8 --timeout 8 -t 8 "http://{server_host}:{port}{url}"
  32. sleep 5
  33. echo ""
  34. echo "---------------------------------------------------------"
  35. echo " Running Warmup {name}"
  36. echo " {wrk} {headers} -d {duration} -c {max_concurrency} --timeout {max_concurrency} -t {max_threads} \"http://{server_host}:{port}{url}\""
  37. echo "---------------------------------------------------------"
  38. echo ""
  39. {wrk} {headers} -d {duration} -c {max_concurrency} --timeout {max_concurrency} -t {max_threads} "http://{server_host}:{port}{url}"
  40. sleep 5
  41. echo ""
  42. echo "---------------------------------------------------------"
  43. echo " Synchronizing time"
  44. echo "---------------------------------------------------------"
  45. echo ""
  46. ntpdate -s pool.ntp.org
  47. for c in {levels}
  48. do
  49. echo ""
  50. echo "---------------------------------------------------------"
  51. echo " Concurrency: $c for {name}"
  52. echo " {wrk} {headers} -d {duration} -c $c --timeout $c -t $(($c>{max_threads}?{max_threads}:$c)) \"http://{server_host}:{port}{url}\""
  53. echo "---------------------------------------------------------"
  54. echo ""
  55. STARTTIME=$(date +"%s")
  56. {wrk} {headers} -d {duration} -c $c --timeout $c -t "$(($c>{max_threads}?{max_threads}:$c))" http://{server_host}:{port}{url}
  57. echo "STARTTIME $STARTTIME"
  58. echo "ENDTIME $(date +"%s")"
  59. sleep 2
  60. done
  61. """
  62. # Used for test types that require pipelining.
  63. pipeline_template = """
  64. echo ""
  65. echo "---------------------------------------------------------"
  66. echo " Running Primer {name}"
  67. echo " {wrk} {headers} -d 5 -c 8 --timeout 8 -t 8 \"http://{server_host}:{port}{url}\""
  68. echo "---------------------------------------------------------"
  69. echo ""
  70. {wrk} {headers} -d 5 -c 8 --timeout 8 -t 8 "http://{server_host}:{port}{url}"
  71. sleep 5
  72. echo ""
  73. echo "---------------------------------------------------------"
  74. echo " Running Warmup {name}"
  75. echo " {wrk} {headers} -d {duration} -c {max_concurrency} --timeout {max_concurrency} -t {max_threads} \"http://{server_host}:{port}{url}\""
  76. echo "---------------------------------------------------------"
  77. echo ""
  78. {wrk} {headers} -d {duration} -c {max_concurrency} --timeout {max_concurrency} -t {max_threads} "http://{server_host}:{port}{url}"
  79. sleep 5
  80. echo ""
  81. echo "---------------------------------------------------------"
  82. echo " Synchronizing time"
  83. echo "---------------------------------------------------------"
  84. echo ""
  85. ntpdate -s pool.ntp.org
  86. for c in {levels}
  87. do
  88. echo ""
  89. echo "---------------------------------------------------------"
  90. echo " Concurrency: $c for {name}"
  91. echo " {wrk} {headers} -d {duration} -c $c --timeout $c -t $(($c>{max_threads}?{max_threads}:$c)) \"http://{server_host}:{port}{url}\" -s ~/pipeline.lua -- {pipeline}"
  92. echo "---------------------------------------------------------"
  93. echo ""
  94. STARTTIME=$(date +"%s")
  95. {wrk} {headers} -d {duration} -c $c --timeout $c -t "$(($c>{max_threads}?{max_threads}:$c))" http://{server_host}:{port}{url} -s ~/pipeline.lua -- {pipeline}
  96. echo "STARTTIME $STARTTIME"
  97. echo "ENDTIME $(date +"%s")"
  98. sleep 2
  99. done
  100. """
  101. # Used for test types that require a database -
  102. # These tests run at a static concurrency level and vary the size of
  103. # the query sent with each request
  104. query_template = """
  105. echo ""
  106. echo "---------------------------------------------------------"
  107. echo " Running Primer {name}"
  108. echo " wrk {headers} -d 5 -c 8 --timeout 8 -t 8 \"http://{server_host}:{port}{url}2\""
  109. echo "---------------------------------------------------------"
  110. echo ""
  111. wrk {headers} -d 5 -c 8 --timeout 8 -t 8 "http://{server_host}:{port}{url}2"
  112. sleep 5
  113. echo ""
  114. echo "---------------------------------------------------------"
  115. echo " Running Warmup {name}"
  116. echo " wrk {headers} -d {duration} -c {max_concurrency} --timeout {max_concurrency} -t {max_threads} \"http://{server_host}:{port}{url}2\""
  117. echo "---------------------------------------------------------"
  118. echo ""
  119. wrk {headers} -d {duration} -c {max_concurrency} --timeout {max_concurrency} -t {max_threads} "http://{server_host}:{port}{url}2"
  120. sleep 5
  121. echo ""
  122. echo "---------------------------------------------------------"
  123. echo " Synchronizing time"
  124. echo "---------------------------------------------------------"
  125. echo ""
  126. ntpdate -s pool.ntp.org
  127. for c in {levels}
  128. do
  129. echo ""
  130. echo "---------------------------------------------------------"
  131. echo " Queries: $c for {name}"
  132. echo " wrk {headers} -d {duration} -c {max_concurrency} --timeout {max_concurrency} -t {max_threads} \"http://{server_host}:{port}{url}$c\""
  133. echo "---------------------------------------------------------"
  134. echo ""
  135. STARTTIME=$(date +"%s")
  136. wrk {headers} -d {duration} -c {max_concurrency} --timeout {max_concurrency} -t {max_threads} "http://{server_host}:{port}{url}$c"
  137. echo "STARTTIME $STARTTIME"
  138. echo "ENDTIME $(date +"%s")"
  139. sleep 2
  140. done
  141. """
  142. ############################################################
  143. # start(benchmarker)
  144. # Start the test using it's setup file
  145. ############################################################
  146. def start(self, out, err):
  147. # Load profile for this installation
  148. profile="%s/bash_profile.sh" % self.directory
  149. if not os.path.exists(profile):
  150. logging.warning("Directory %s does not have a bash_profile.sh" % self.directory)
  151. profile="$FWROOT/config/benchmark_profile"
  152. # Setup variables for TROOT and IROOT
  153. setup_util.replace_environ(config=profile,
  154. command='export TROOT=%s && export IROOT=%s && export DBHOST=%s && export MAX_THREADS=%s' %
  155. (self.directory, self.install_root, self.database_host, self.benchmarker.threads))
  156. # Because start can take so long, we print a dot to let the user know
  157. # we are working
  158. class ProgressPrinterThread(Thread):
  159. def __init__(self, event):
  160. Thread.__init__(self)
  161. self.stopped = event
  162. def run(self):
  163. while not self.stopped.wait(20):
  164. sys.stderr.write("Waiting for start to return...\n")
  165. stopFlag = Event()
  166. thread = ProgressPrinterThread(stopFlag)
  167. thread.start()
  168. # Run the module start (inside parent of TROOT)
  169. # - we use the parent as a historical accident - a lot of tests
  170. # use subprocess's cwd argument already
  171. previousDir = os.getcwd()
  172. os.chdir(os.path.dirname(self.troot))
  173. logging.info("Running setup module start (cwd=%s)", os.path.dirname(self.directory))
  174. # Run the start script for the test as the "testrunner" user.
  175. # This requires superuser privs, so `sudo` is necessary.
  176. # -u [username] The username
  177. # -E Preserves the current environment variables
  178. # -H Forces the home var (~) to be reset to the user specified
  179. # Note: check_call is a blocking call, so any startup scripts
  180. # run by the framework that need to continue (read: server has
  181. # started and needs to remain that way), then they should be
  182. # executed in the background.
  183. try:
  184. retcode = subprocess.check_call('sudo -u %s -E -H ./%s.sh' %
  185. (self.benchmarker.runner_user, self.setup_file),
  186. cwd=self.directory, shell=True, stderr=err, stdout=out)
  187. if retcode == None:
  188. retcode = 0
  189. except Exception:
  190. retcode = 1
  191. st = traceback.format_exc()
  192. st = '\n'.join((4 * ' ') + x for x in st.splitlines())
  193. st = "Start exception:\n%s" % st
  194. logging.info(st)
  195. err.write(st + '\n')
  196. os.chdir(previousDir)
  197. # Stop the progress printer
  198. stopFlag.set()
  199. logging.info("Called setup.py start")
  200. return retcode
  201. ############################################################
  202. # End start
  203. ############################################################
  204. ############################################################
  205. # stop(benchmarker)
  206. # Stops the test using it's setup file
  207. ############################################################
  208. def stop(self, out, err):
  209. # Load profile for this installation
  210. profile="%s/bash_profile.sh" % self.directory
  211. if not os.path.exists(profile):
  212. logging.warning("Directory %s does not have a bash_profile.sh" % self.directory)
  213. profile="$FWROOT/config/benchmark_profile"
  214. setup_util.replace_environ(config=profile,
  215. command='export TROOT=%s && export IROOT=%s' %
  216. (self.directory, self.install_root))
  217. # Run the module stop (inside parent of TROOT)
  218. # - we use the parent as a historical accident - a lot of tests
  219. # use subprocess's cwd argument already
  220. previousDir = os.getcwd()
  221. os.chdir(os.path.dirname(self.troot))
  222. logging.info("Running setup module stop (cwd=%s)", os.path.dirname(self.directory))
  223. # Meganuke
  224. try:
  225. subprocess.check_call('sudo killall -s 9 -u %s' % self.benchmarker.runner_user, shell=True, stderr=err, stdout=out)
  226. retcode = 0
  227. except Exception:
  228. retcode = 1
  229. os.chdir(previousDir)
  230. # Give processes sent a SIGTERM a moment to shut down gracefully
  231. time.sleep(5)
  232. return retcode
  233. ############################################################
  234. # End stop
  235. ############################################################
  236. ############################################################
  237. # verify_urls
  238. # Verifys each of the URLs for this test. THis will sinply
  239. # curl the URL and check for it's return status.
  240. # For each url, a flag will be set on this object for whether
  241. # or not it passed
  242. # Returns True if all verifications succeeded
  243. ############################################################
  244. def verify_urls(self, out, err):
  245. result = True
  246. def verify_type(test_type):
  247. test = self.runTests[test_type]
  248. test.setup_out_err(out, err)
  249. out.write(header("VERIFYING %s" % test_type.upper()))
  250. base_url = "http://%s:%s" % (self.benchmarker.server_host, self.port)
  251. try:
  252. results = test.verify(base_url)
  253. except Exception as e:
  254. results = [('fail',"""Caused Exception in TFB
  255. This almost certainly means your return value is incorrect,
  256. but also that you have found a bug. Please submit an issue
  257. including this message: %s\n%s""" % (e, traceback.format_exc()),
  258. base_url)]
  259. logging.warning("Verifying test %s for %s caused an exception: %s", test_type, self.name, e)
  260. traceback.format_exc()
  261. test.failed = any(result is 'fail' for (result, reason, url) in results)
  262. test.warned = any(result is 'warn' for (result, reason, url) in results)
  263. test.passed = all(result is 'pass' for (result, reason, url) in results)
  264. def output_result(result, reason, url):
  265. out.write(" %s for %s\n" % (result.upper(), url))
  266. print " %s for %s" % (result.upper(), url)
  267. if reason is not None and len(reason) != 0:
  268. for line in reason.splitlines():
  269. out.write(" " + line + '\n')
  270. print " " + line
  271. [output_result(r1,r2,url) for (r1, r2, url) in results]
  272. if test.failed:
  273. self.benchmarker.report_verify_results(self, test_type, 'fail')
  274. elif test.warned:
  275. self.benchmarker.report_verify_results(self, test_type, 'warn')
  276. elif test.passed:
  277. self.benchmarker.report_verify_results(self, test_type, 'pass')
  278. else:
  279. raise Exception("Unknown error - test did not pass,warn,or fail")
  280. result = True
  281. for test_type in self.runTests:
  282. verify_type(test_type)
  283. if self.runTests[test_type].failed:
  284. result = False
  285. return result
  286. ############################################################
  287. # End verify_urls
  288. ############################################################
  289. ############################################################
  290. # benchmark
  291. # Runs the benchmark for each type of test that it implements
  292. # JSON/DB/Query.
  293. ############################################################
  294. def benchmark(self, out, err):
  295. def benchmark_type(test_type):
  296. out.write("BENCHMARKING %s ... " % test_type.upper())
  297. test = self.runTests[test_type]
  298. test.setup_out_err(out, err)
  299. output_file = self.benchmarker.output_file(self.name, test_type)
  300. if not os.path.exists(output_file):
  301. # Open to create the empty file
  302. with open(output_file, 'w'):
  303. pass
  304. if not test.failed:
  305. if test_type == 'plaintext': # One special case
  306. remote_script = self.__generate_pipeline_script(test.get_url(), self.port, test.accept_header)
  307. elif test_type == 'query' or test_type == 'update':
  308. remote_script = self.__generate_query_script(test.get_url(), self.port, test.accept_header)
  309. else:
  310. remote_script = self.__generate_concurrency_script(test.get_url(), self.port, test.accept_header)
  311. # Begin resource usage metrics collection
  312. self.__begin_logging(test_type)
  313. # Run the benchmark
  314. with open(output_file, 'w') as raw_file:
  315. p = subprocess.Popen(self.benchmarker.client_ssh_string.split(" "), stdin=subprocess.PIPE, stdout=raw_file, stderr=err)
  316. p.communicate(remote_script)
  317. err.flush()
  318. # End resource usage metrics collection
  319. self.__end_logging()
  320. results = self.__parse_test(test_type)
  321. print "Benchmark results:"
  322. pprint(results)
  323. self.benchmarker.report_benchmark_results(framework=self, test=test_type, results=results['results'])
  324. out.write( "Complete\n" )
  325. out.flush()
  326. for test_type in self.runTests:
  327. benchmark_type(test_type)
  328. ############################################################
  329. # End benchmark
  330. ############################################################
  331. ############################################################
  332. # parse_all
  333. # Method meant to be run for a given timestamp
  334. ############################################################
  335. def parse_all(self):
  336. for test_type in self.runTests:
  337. if os.path.exists(self.benchmarker.get_output_file(self.name, test_type)):
  338. results = self.__parse_test(test_type)
  339. self.benchmarker.report_benchmark_results(framework=self, test=test_type, results=results['results'])
  340. ############################################################
  341. # __parse_test(test_type)
  342. ############################################################
  343. def __parse_test(self, test_type):
  344. try:
  345. results = dict()
  346. results['results'] = []
  347. stats = []
  348. if os.path.exists(self.benchmarker.get_output_file(self.name, test_type)):
  349. with open(self.benchmarker.output_file(self.name, test_type)) as raw_data:
  350. is_warmup = True
  351. rawData = None
  352. for line in raw_data:
  353. if "Queries:" in line or "Concurrency:" in line:
  354. is_warmup = False
  355. rawData = None
  356. continue
  357. if "Warmup" in line or "Primer" in line:
  358. is_warmup = True
  359. continue
  360. if not is_warmup:
  361. if rawData == None:
  362. rawData = dict()
  363. results['results'].append(rawData)
  364. #if "Requests/sec:" in line:
  365. # m = re.search("Requests/sec:\s+([0-9]+)", line)
  366. # rawData['reportedResults'] = m.group(1)
  367. # search for weighttp data such as succeeded and failed.
  368. if "Latency" in line:
  369. m = re.findall("([0-9]+\.*[0-9]*[us|ms|s|m|%]+)", line)
  370. if len(m) == 4:
  371. rawData['latencyAvg'] = m[0]
  372. rawData['latencyStdev'] = m[1]
  373. rawData['latencyMax'] = m[2]
  374. # rawData['latencyStdevPercent'] = m[3]
  375. #if "Req/Sec" in line:
  376. # m = re.findall("([0-9]+\.*[0-9]*[k|%]*)", line)
  377. # if len(m) == 4:
  378. # rawData['requestsAvg'] = m[0]
  379. # rawData['requestsStdev'] = m[1]
  380. # rawData['requestsMax'] = m[2]
  381. # rawData['requestsStdevPercent'] = m[3]
  382. #if "requests in" in line:
  383. # m = re.search("requests in ([0-9]+\.*[0-9]*[ms|s|m|h]+)", line)
  384. # if m != None:
  385. # # parse out the raw time, which may be in minutes or seconds
  386. # raw_time = m.group(1)
  387. # if "ms" in raw_time:
  388. # rawData['total_time'] = float(raw_time[:len(raw_time)-2]) / 1000.0
  389. # elif "s" in raw_time:
  390. # rawData['total_time'] = float(raw_time[:len(raw_time)-1])
  391. # elif "m" in raw_time:
  392. # rawData['total_time'] = float(raw_time[:len(raw_time)-1]) * 60.0
  393. # elif "h" in raw_time:
  394. # rawData['total_time'] = float(raw_time[:len(raw_time)-1]) * 3600.0
  395. if "requests in" in line:
  396. m = re.search("([0-9]+) requests in", line)
  397. if m != None:
  398. rawData['totalRequests'] = int(m.group(1))
  399. if "Socket errors" in line:
  400. if "connect" in line:
  401. m = re.search("connect ([0-9]+)", line)
  402. rawData['connect'] = int(m.group(1))
  403. if "read" in line:
  404. m = re.search("read ([0-9]+)", line)
  405. rawData['read'] = int(m.group(1))
  406. if "write" in line:
  407. m = re.search("write ([0-9]+)", line)
  408. rawData['write'] = int(m.group(1))
  409. if "timeout" in line:
  410. m = re.search("timeout ([0-9]+)", line)
  411. rawData['timeout'] = int(m.group(1))
  412. if "Non-2xx" in line:
  413. m = re.search("Non-2xx or 3xx responses: ([0-9]+)", line)
  414. if m != None:
  415. rawData['5xx'] = int(m.group(1))
  416. if "STARTTIME" in line:
  417. m = re.search("[0-9]+", line)
  418. rawData["startTime"] = int(m.group(0))
  419. if "ENDTIME" in line:
  420. m = re.search("[0-9]+", line)
  421. rawData["endTime"] = int(m.group(0))
  422. test_stats = self.__parse_stats(test_type, rawData["startTime"], rawData["endTime"], 1)
  423. # rawData["averageStats"] = self.__calculate_average_stats(test_stats)
  424. stats.append(test_stats)
  425. with open(self.benchmarker.stats_file(self.name, test_type) + ".json", "w") as stats_file:
  426. json.dump(stats, stats_file, indent=2)
  427. return results
  428. except IOError:
  429. return None
  430. ############################################################
  431. # End benchmark
  432. ############################################################
  433. ##########################################################################################
  434. # Private Methods
  435. ##########################################################################################
  436. ############################################################
  437. # __generate_concurrency_script(url, port)
  438. # Generates the string containing the bash script that will
  439. # be run on the client to benchmark a single test. This
  440. # specifically works for the variable concurrency tests (JSON
  441. # and DB)
  442. ############################################################
  443. def __generate_concurrency_script(self, url, port, accept_header, wrk_command="wrk"):
  444. headers = self.headers_template.format(accept=accept_header)
  445. return self.concurrency_template.format(max_concurrency=max(self.benchmarker.concurrency_levels),
  446. max_threads=self.benchmarker.threads, name=self.name, duration=self.benchmarker.duration,
  447. levels=" ".join("{}".format(item) for item in self.benchmarker.concurrency_levels),
  448. server_host=self.benchmarker.server_host, port=port, url=url, headers=headers, wrk=wrk_command)
  449. ############################################################
  450. # __generate_pipeline_script(url, port)
  451. # Generates the string containing the bash script that will
  452. # be run on the client to benchmark a single pipeline test.
  453. ############################################################
  454. def __generate_pipeline_script(self, url, port, accept_header, wrk_command="wrk"):
  455. headers = self.headers_template.format(accept=accept_header)
  456. return self.pipeline_template.format(max_concurrency=16384,
  457. max_threads=self.benchmarker.threads, name=self.name, duration=self.benchmarker.duration,
  458. levels=" ".join("{}".format(item) for item in [256,1024,4096,16384]),
  459. server_host=self.benchmarker.server_host, port=port, url=url, headers=headers, wrk=wrk_command,
  460. pipeline=16)
  461. ############################################################
  462. # __generate_query_script(url, port)
  463. # Generates the string containing the bash script that will
  464. # be run on the client to benchmark a single test. This
  465. # specifically works for the variable query tests (Query)
  466. ############################################################
  467. def __generate_query_script(self, url, port, accept_header):
  468. headers = self.headers_template.format(accept=accept_header)
  469. return self.query_template.format(max_concurrency=max(self.benchmarker.concurrency_levels),
  470. max_threads=self.benchmarker.threads, name=self.name, duration=self.benchmarker.duration,
  471. levels=" ".join("{}".format(item) for item in self.benchmarker.query_levels),
  472. server_host=self.benchmarker.server_host, port=port, url=url, headers=headers)
  473. ############################################################
  474. # Returns True if any test type this this framework test will use a DB
  475. ############################################################
  476. def requires_database(self):
  477. '''Returns True/False if this test requires a database'''
  478. return any(tobj.requires_db for (ttype,tobj) in self.runTests.iteritems())
  479. ############################################################
  480. # __begin_logging
  481. # Starts a thread to monitor the resource usage, to be synced with the client's time
  482. # TODO: MySQL and InnoDB are possible. Figure out how to implement them.
  483. ############################################################
  484. def __begin_logging(self, test_type):
  485. output_file = "{file_name}".format(file_name=self.benchmarker.get_stats_file(self.name, test_type))
  486. dstat_string = "dstat -afilmprsT --aio --fs --ipc --lock --raw --socket --tcp \
  487. --raw --socket --tcp --udp --unix --vm --disk-util \
  488. --rpc --rpcd --output {output_file}".format(output_file=output_file)
  489. cmd = shlex.split(dstat_string)
  490. dev_null = open(os.devnull, "w")
  491. self.subprocess_handle = subprocess.Popen(cmd, stdout=dev_null)
  492. ##############################################################
  493. # Begin __end_logging
  494. # Stops the logger thread and blocks until shutdown is complete.
  495. ##############################################################
  496. def __end_logging(self):
  497. self.subprocess_handle.terminate()
  498. self.subprocess_handle.communicate()
  499. ##############################################################
  500. # Begin __parse_stats
  501. # For each test type, process all the statistics, and return a multi-layered dictionary
  502. # that has a structure as follows:
  503. # (timestamp)
  504. # | (main header) - group that the stat is in
  505. # | | (sub header) - title of the stat
  506. # | | | (stat) - the stat itself, usually a floating point number
  507. ##############################################################
  508. def __parse_stats(self, test_type, start_time, end_time, interval):
  509. stats_dict = dict()
  510. stats_file = self.benchmarker.stats_file(self.name, test_type)
  511. with open(stats_file) as stats:
  512. while(stats.next() != "\n"): # dstat doesn't output a completely compliant CSV file - we need to strip the header
  513. pass
  514. stats_reader = csv.reader(stats)
  515. main_header = stats_reader.next()
  516. sub_header = stats_reader.next()
  517. time_row = sub_header.index("epoch")
  518. int_counter = 0
  519. for row in stats_reader:
  520. time = float(row[time_row])
  521. int_counter+=1
  522. if time < start_time:
  523. continue
  524. elif time > end_time:
  525. return stats_dict
  526. if int_counter % interval != 0:
  527. continue
  528. row_dict = dict()
  529. for nextheader in main_header:
  530. if nextheader != "":
  531. row_dict[nextheader] = dict()
  532. header = ""
  533. for item_num, column in enumerate(row):
  534. if(len(main_header[item_num]) != 0):
  535. header = main_header[item_num]
  536. row_dict[header][sub_header[item_num]] = float(column) # all the stats are numbers, so we want to make sure that they stay that way in json
  537. stats_dict[time] = row_dict
  538. return stats_dict
  539. ##############################################################
  540. # End __parse_stats
  541. ##############################################################
  542. def __getattr__(self, name):
  543. """For backwards compatibility, we used to pass benchmarker
  544. as the argument to the setup.py files"""
  545. try:
  546. x = getattr(self.benchmarker, name)
  547. except AttributeError:
  548. print "AttributeError: %s not a member of FrameworkTest or Benchmarker" % name
  549. print "This is probably a bug"
  550. raise
  551. return x
  552. ##############################################################
  553. # Begin __calculate_average_stats
  554. # We have a large amount of raw data for the statistics that
  555. # may be useful for the stats nerds, but most people care about
  556. # a couple of numbers. For now, we're only going to supply:
  557. # * Average CPU
  558. # * Average Memory
  559. # * Total network use
  560. # * Total disk use
  561. # More may be added in the future. If they are, please update
  562. # the above list.
  563. # Note: raw_stats is directly from the __parse_stats method.
  564. # Recall that this consists of a dictionary of timestamps,
  565. # each of which contain a dictionary of stat categories which
  566. # contain a dictionary of stats
  567. ##############################################################
  568. def __calculate_average_stats(self, raw_stats):
  569. raw_stat_collection = dict()
  570. for timestamp, time_dict in raw_stats.items():
  571. for main_header, sub_headers in time_dict.items():
  572. item_to_append = None
  573. if 'cpu' in main_header:
  574. # We want to take the idl stat and subtract it from 100
  575. # to get the time that the CPU is NOT idle.
  576. item_to_append = sub_headers['idl'] - 100.0
  577. elif main_header == 'memory usage':
  578. item_to_append = sub_headers['used']
  579. elif 'net' in main_header:
  580. # Network stats have two parts - recieve and send. We'll use a tuple of
  581. # style (recieve, send)
  582. item_to_append = (sub_headers['recv'], sub_headers['send'])
  583. elif 'dsk' or 'io' in main_header:
  584. # Similar for network, except our tuple looks like (read, write)
  585. item_to_append = (sub_headers['read'], sub_headers['writ'])
  586. if item_to_append is not None:
  587. if main_header not in raw_stat_collection:
  588. raw_stat_collection[main_header] = list()
  589. raw_stat_collection[main_header].append(item_to_append)
  590. # Simple function to determine human readable size
  591. # http://stackoverflow.com/questions/1094841/reusable-library-to-get-human-readable-version-of-file-size
  592. def sizeof_fmt(num):
  593. # We'll assume that any number we get is convertable to a float, just in case
  594. num = float(num)
  595. for x in ['bytes','KB','MB','GB']:
  596. if num < 1024.0 and num > -1024.0:
  597. return "%3.1f%s" % (num, x)
  598. num /= 1024.0
  599. return "%3.1f%s" % (num, 'TB')
  600. # Now we have our raw stats in a readable format - we need to format it for display
  601. # We need a floating point sum, so the built in sum doesn't cut it
  602. display_stat_collection = dict()
  603. for header, values in raw_stat_collection.items():
  604. display_stat = None
  605. if 'cpu' in header:
  606. display_stat = sizeof_fmt(math.fsum(values) / len(values))
  607. elif main_header == 'memory usage':
  608. display_stat = sizeof_fmt(math.fsum(values) / len(values))
  609. elif 'net' in main_header:
  610. receive, send = zip(*values) # unzip
  611. display_stat = {'receive': sizeof_fmt(math.fsum(receive)), 'send': sizeof_fmt(math.fsum(send))}
  612. else: # if 'dsk' or 'io' in header:
  613. read, write = zip(*values) # unzip
  614. display_stat = {'read': sizeof_fmt(math.fsum(read)), 'write': sizeof_fmt(math.fsum(write))}
  615. display_stat_collection[header] = display_stat
  616. return display_stat
  617. ###########################################################################################
  618. # End __calculate_average_stats
  619. #########################################################################################
  620. ##########################################################################################
  621. # Constructor
  622. ##########################################################################################
  623. def __init__(self, name, directory, benchmarker, runTests, args):
  624. self.name = name
  625. self.directory = directory
  626. self.benchmarker = benchmarker
  627. self.runTests = runTests
  628. self.fwroot = benchmarker.fwroot
  629. # setup logging
  630. logging.basicConfig(stream=sys.stderr, level=logging.INFO)
  631. self.install_root="%s/%s" % (self.fwroot, "installs")
  632. if benchmarker.install_strategy is 'pertest':
  633. self.install_root="%s/pertest/%s" % (self.install_root, name)
  634. # Used in setup.py scripts for consistency with
  635. # the bash environment variables
  636. self.troot = self.directory
  637. self.iroot = self.install_root
  638. self.__dict__.update(args)
  639. # ensure directory has __init__.py file so that we can use it as a Python package
  640. if not os.path.exists(os.path.join(directory, "__init__.py")):
  641. logging.warning("Please add an empty __init__.py file to directory %s", directory)
  642. open(os.path.join(directory, "__init__.py"), 'w').close()
  643. # Import the module (TODO - consider using sys.meta_path)
  644. # Note: You can see the log output if you really want to, but it's a *ton*
  645. dir_rel_to_fwroot = os.path.relpath(os.path.dirname(directory), self.fwroot)
  646. if dir_rel_to_fwroot != ".":
  647. sys.path.append("%s/%s" % (self.fwroot, dir_rel_to_fwroot))
  648. logging.log(0, "Adding %s to import %s.%s", dir_rel_to_fwroot, os.path.basename(directory), self.setup_file)
  649. #self.setup_module = setup_module = importlib.import_module(os.path.basename(directory) + '.' + self.setup_file)
  650. sys.path.remove("%s/%s" % (self.fwroot, dir_rel_to_fwroot))
  651. else:
  652. logging.log(0, "Importing %s.%s", directory, self.setup_file)
  653. #self.setup_module = setup_module = importlib.import_module(os.path.basename(directory) + '.' + self.setup_file)
  654. ############################################################
  655. # End __init__
  656. ############################################################
  657. ############################################################
  658. # End FrameworkTest
  659. ############################################################
  660. ##########################################################################################
  661. # Static methods
  662. ##########################################################################################
  663. ##############################################################
  664. # parse_config(config, directory, benchmarker)
  665. # parses a config file and returns a list of FrameworkTest
  666. # objects based on that config file.
  667. ##############################################################
  668. def parse_config(config, directory, benchmarker):
  669. tests = []
  670. # This sort ordering is set up specifically to return the length
  671. # of the test name. There were SO many problems involved with
  672. # 'plaintext' being run first (rather, just not last) that we
  673. # needed to ensure that it was run last for every framework.
  674. def testOrder(type_name):
  675. return len(type_name)
  676. # The config object can specify multiple tests
  677. # Loop over them and parse each into a FrameworkTest
  678. for test in config['tests']:
  679. for test_name, test_keys in test.iteritems():
  680. # Prefix all test names with framework except 'default' test
  681. if test_name == 'default':
  682. test_name = config['framework']
  683. else:
  684. test_name = "%s-%s" % (config['framework'], test_name)
  685. # Ensure FrameworkTest.framework is available
  686. if not test_keys['framework']:
  687. test_keys['framework'] = config['framework']
  688. #if test_keys['framework'].lower() != config['framework'].lower():
  689. # print Exception("benchmark_config for test %s is invalid - test framework '%s' must match benchmark_config framework '%s'" %
  690. # (test_name, test_keys['framework'], config['framework']))
  691. # Confirm required keys are present
  692. # TODO have a TechEmpower person confirm this list - I don't know what the website requires....
  693. required = ['language','webserver','classification','database','approach','orm','framework','os','database_os']
  694. if not all (key in test_keys for key in required):
  695. raise Exception("benchmark_config for test %s is invalid - missing required keys" % test_name)
  696. # Map test type to a parsed FrameworkTestType object
  697. runTests = dict()
  698. for type_name, type_obj in benchmarker.types.iteritems():
  699. try:
  700. runTests[type_name] = type_obj.copy().parse(test_keys)
  701. except AttributeError as ae:
  702. # This is quite common - most tests don't support all types
  703. # Quitely log it and move on (debug logging is on in travis and this causes
  704. # ~1500 lines of debug, so I'm totally ignoring it for now
  705. # logging.debug("Missing arguments for test type %s for framework test %s", type_name, test_name)
  706. pass
  707. # We need to sort by test_type to run
  708. sortedTestKeys = sorted(runTests.keys(), key=testOrder)
  709. sortedRunTests = OrderedDict()
  710. for sortedTestKey in sortedTestKeys:
  711. sortedRunTests[sortedTestKey] = runTests[sortedTestKey]
  712. # By passing the entire set of keys, each FrameworkTest will have a member for each key
  713. tests.append(FrameworkTest(test_name, directory, benchmarker, sortedRunTests, test_keys))
  714. return tests
  715. ##############################################################
  716. # End parse_config
  717. ##############################################################