benchmarker.py 41 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027
  1. from setup.linux.installer import Installer
  2. from setup.linux import setup_util
  3. from benchmark import framework_test
  4. from benchmark.test_types import *
  5. from utils import header
  6. from utils import gather_tests
  7. from utils import gather_frameworks
  8. from utils import verify_database_connections
  9. import os
  10. import json
  11. import subprocess
  12. import traceback
  13. import time
  14. import pprint
  15. import csv
  16. import sys
  17. import logging
  18. import socket
  19. import threading
  20. import textwrap
  21. from pprint import pprint
  22. from multiprocessing import Process
  23. from datetime import datetime
  24. # Cross-platform colored text
  25. from colorama import Fore, Back, Style
  26. # Text-based progress indicators
  27. import progressbar
  28. class Benchmarker:
  29. ##########################################################################################
  30. # Public methods
  31. ##########################################################################################
  32. ############################################################
  33. # Prints all the available tests
  34. ############################################################
  35. def run_list_tests(self):
  36. all_tests = self.__gather_tests
  37. for test in all_tests:
  38. print test.name
  39. self.__finish()
  40. ############################################################
  41. # End run_list_tests
  42. ############################################################
  43. ############################################################
  44. # Prints the metadata for all the available tests
  45. ############################################################
  46. def run_list_test_metadata(self):
  47. all_tests = self.__gather_tests
  48. all_tests_json = json.dumps(map(lambda test: {
  49. "name": test.name,
  50. "approach": test.approach,
  51. "classification": test.classification,
  52. "database": test.database,
  53. "framework": test.framework,
  54. "language": test.language,
  55. "orm": test.orm,
  56. "platform": test.platform,
  57. "webserver": test.webserver,
  58. "os": test.os,
  59. "database_os": test.database_os,
  60. "display_name": test.display_name,
  61. "notes": test.notes,
  62. "versus": test.versus
  63. }, all_tests))
  64. with open(os.path.join(self.full_results_directory(), "test_metadata.json"), "w") as f:
  65. f.write(all_tests_json)
  66. self.__finish()
  67. ############################################################
  68. # End run_list_test_metadata
  69. ############################################################
  70. ############################################################
  71. # parse_timestamp
  72. # Re-parses the raw data for a given timestamp
  73. ############################################################
  74. def parse_timestamp(self):
  75. all_tests = self.__gather_tests
  76. for test in all_tests:
  77. test.parse_all()
  78. self.__parse_results(all_tests)
  79. self.__finish()
  80. ############################################################
  81. # End parse_timestamp
  82. ############################################################
  83. ############################################################
  84. # Run the tests:
  85. # This process involves setting up the client/server machines
  86. # with any necessary change. Then going through each test,
  87. # running their setup script, verifying the URLs, and
  88. # running benchmarks against them.
  89. ############################################################
  90. def run(self):
  91. ##########################
  92. # Get a list of all known
  93. # tests that we can run.
  94. ##########################
  95. all_tests = self.__gather_tests
  96. ##########################
  97. # Setup client/server
  98. ##########################
  99. print header("Preparing Server, Database, and Client ...", top='=', bottom='=')
  100. self.__setup_server()
  101. self.__setup_database()
  102. self.__setup_client()
  103. ## Check if wrk (and wrk-pipeline) is installed and executable, if not, raise an exception
  104. #if not (os.access("/usr/local/bin/wrk", os.X_OK) and os.access("/usr/local/bin/wrk-pipeline", os.X_OK)):
  105. # raise Exception("wrk and/or wrk-pipeline are not properly installed. Not running tests.")
  106. ##########################
  107. # Run tests
  108. ##########################
  109. print header("Running Tests...", top='=', bottom='=')
  110. result = self.__run_tests(all_tests)
  111. ##########################
  112. # Parse results
  113. ##########################
  114. if self.mode == "benchmark":
  115. print header("Parsing Results ...", top='=', bottom='=')
  116. self.__parse_results(all_tests)
  117. self.__finish()
  118. return result
  119. ############################################################
  120. # End run
  121. ############################################################
  122. ############################################################
  123. # database_sftp_string(batch_file)
  124. # generates a fully qualified URL for sftp to database
  125. ############################################################
  126. def database_sftp_string(self, batch_file):
  127. sftp_string = "sftp -oStrictHostKeyChecking=no "
  128. if batch_file != None: sftp_string += " -b " + batch_file + " "
  129. if self.database_identity_file != None:
  130. sftp_string += " -i " + self.database_identity_file + " "
  131. return sftp_string + self.database_user + "@" + self.database_host
  132. ############################################################
  133. # End database_sftp_string
  134. ############################################################
  135. ############################################################
  136. # client_sftp_string(batch_file)
  137. # generates a fully qualified URL for sftp to client
  138. ############################################################
  139. def client_sftp_string(self, batch_file):
  140. sftp_string = "sftp -oStrictHostKeyChecking=no "
  141. if batch_file != None: sftp_string += " -b " + batch_file + " "
  142. if self.client_identity_file != None:
  143. sftp_string += " -i " + self.client_identity_file + " "
  144. return sftp_string + self.client_user + "@" + self.client_host
  145. ############################################################
  146. # End client_sftp_string
  147. ############################################################
  148. ############################################################
  149. # generate_url(url, port)
  150. # generates a fully qualified URL for accessing a test url
  151. ############################################################
  152. def generate_url(self, url, port):
  153. return self.server_host + ":" + str(port) + url
  154. ############################################################
  155. # End generate_url
  156. ############################################################
  157. ############################################################
  158. # get_output_file(test_name, test_type)
  159. # returns the output file name for this test_name and
  160. # test_type timestamp/test_type/test_name/raw
  161. ############################################################
  162. def get_output_file(self, test_name, test_type):
  163. return os.path.join(self.result_directory, self.timestamp, test_type, test_name, "raw")
  164. ############################################################
  165. # End get_output_file
  166. ############################################################
  167. ############################################################
  168. # output_file(test_name, test_type)
  169. # returns the output file for this test_name and test_type
  170. # timestamp/test_type/test_name/raw
  171. ############################################################
  172. def output_file(self, test_name, test_type):
  173. path = self.get_output_file(test_name, test_type)
  174. try:
  175. os.makedirs(os.path.dirname(path))
  176. except OSError:
  177. pass
  178. return path
  179. ############################################################
  180. # End output_file
  181. ############################################################
  182. ############################################################
  183. # get_stats_file(test_name, test_type)
  184. # returns the stats file name for this test_name and
  185. # test_type timestamp/test_type/test_name/raw
  186. ############################################################
  187. def get_stats_file(self, test_name, test_type):
  188. return os.path.join(self.result_directory, self.timestamp, test_type, test_name, "stats")
  189. ############################################################
  190. # End get_stats_file
  191. ############################################################
  192. ############################################################
  193. # stats_file(test_name, test_type)
  194. # returns the stats file for this test_name and test_type
  195. # timestamp/test_type/test_name/raw
  196. ############################################################
  197. def stats_file(self, test_name, test_type):
  198. path = self.get_stats_file(test_name, test_type)
  199. try:
  200. os.makedirs(os.path.dirname(path))
  201. except OSError:
  202. pass
  203. return path
  204. ############################################################
  205. # End stats_file
  206. ############################################################
  207. ############################################################
  208. # full_results_directory
  209. ############################################################
  210. def full_results_directory(self):
  211. path = os.path.join(self.result_directory, self.timestamp)
  212. try:
  213. os.makedirs(path)
  214. except OSError:
  215. pass
  216. return path
  217. ############################################################
  218. # End full_results_directory
  219. ############################################################
  220. ############################################################
  221. # Latest intermediate results dirctory
  222. ############################################################
  223. def latest_results_directory(self):
  224. path = os.path.join(self.result_directory,"latest")
  225. try:
  226. os.makedirs(path)
  227. except OSError:
  228. pass
  229. return path
  230. ############################################################
  231. # report_verify_results
  232. # Used by FrameworkTest to add verification details to our results
  233. #
  234. # TODO: Technically this is an IPC violation - we are accessing
  235. # the parent process' memory from the child process
  236. ############################################################
  237. def report_verify_results(self, framework, test, result):
  238. if framework.name not in self.results['verify'].keys():
  239. self.results['verify'][framework.name] = dict()
  240. self.results['verify'][framework.name][test] = result
  241. ############################################################
  242. # report_benchmark_results
  243. # Used by FrameworkTest to add benchmark data to this
  244. #
  245. # TODO: Technically this is an IPC violation - we are accessing
  246. # the parent process' memory from the child process
  247. ############################################################
  248. def report_benchmark_results(self, framework, test, results):
  249. if test not in self.results['rawData'].keys():
  250. self.results['rawData'][test] = dict()
  251. # If results has a size from the parse, then it succeeded.
  252. if results:
  253. self.results['rawData'][test][framework.name] = results
  254. # This may already be set for single-tests
  255. if framework.name not in self.results['succeeded'][test]:
  256. self.results['succeeded'][test].append(framework.name)
  257. else:
  258. # This may already be set for single-tests
  259. if framework.name not in self.results['failed'][test]:
  260. self.results['failed'][test].append(framework.name)
  261. ############################################################
  262. # End report_results
  263. ############################################################
  264. ##########################################################################################
  265. # Private methods
  266. ##########################################################################################
  267. ############################################################
  268. # Gathers all the tests
  269. ############################################################
  270. @property
  271. def __gather_tests(self):
  272. tests = gather_tests(include=self.test,
  273. exclude=self.exclude,
  274. benchmarker=self)
  275. # If the tests have been interrupted somehow, then we want to resume them where we left
  276. # off, rather than starting from the beginning
  277. if os.path.isfile('current_benchmark.txt'):
  278. with open('current_benchmark.txt', 'r') as interrupted_benchmark:
  279. interrupt_bench = interrupted_benchmark.read().strip()
  280. for index, atest in enumerate(tests):
  281. if atest.name == interrupt_bench:
  282. tests = tests[index:]
  283. break
  284. return tests
  285. ############################################################
  286. # End __gather_tests
  287. ############################################################
  288. ############################################################
  289. # Makes any necessary changes to the server that should be
  290. # made before running the tests. This involves setting kernal
  291. # settings to allow for more connections, or more file
  292. # descriptiors
  293. #
  294. # http://redmine.lighttpd.net/projects/weighttp/wiki#Troubleshooting
  295. ############################################################
  296. def __setup_server(self):
  297. try:
  298. if os.name == 'nt':
  299. return True
  300. subprocess.check_call(["sudo","bash","-c","cd /sys/devices/system/cpu; ls -d cpu[0-9]*|while read x; do echo performance > $x/cpufreq/scaling_governor; done"])
  301. subprocess.check_call("sudo sysctl -w net.ipv4.tcp_max_syn_backlog=65535".rsplit(" "))
  302. subprocess.check_call("sudo sysctl -w net.core.somaxconn=65535".rsplit(" "))
  303. subprocess.check_call("sudo -s ulimit -n 65535".rsplit(" "))
  304. subprocess.check_call("sudo sysctl net.ipv4.tcp_tw_reuse=1".rsplit(" "))
  305. subprocess.check_call("sudo sysctl net.ipv4.tcp_tw_recycle=1".rsplit(" "))
  306. subprocess.check_call("sudo sysctl -w kernel.shmmax=134217728".rsplit(" "))
  307. subprocess.check_call("sudo sysctl -w kernel.shmall=2097152".rsplit(" "))
  308. except subprocess.CalledProcessError:
  309. return False
  310. ############################################################
  311. # End __setup_server
  312. ############################################################
  313. ############################################################
  314. # Makes any necessary changes to the database machine that
  315. # should be made before running the tests. Is very similar
  316. # to the server setup, but may also include database specific
  317. # changes.
  318. ############################################################
  319. def __setup_database(self):
  320. p = subprocess.Popen(self.database_ssh_string, stdin=subprocess.PIPE, shell=True)
  321. p.communicate("""
  322. sudo sysctl -w net.ipv4.tcp_max_syn_backlog=65535
  323. sudo sysctl -w net.core.somaxconn=65535
  324. sudo sysctl -w kernel.sched_autogroup_enabled=0
  325. sudo -s ulimit -n 65535
  326. sudo sysctl net.ipv4.tcp_tw_reuse=1
  327. sudo sysctl net.ipv4.tcp_tw_recycle=1
  328. sudo sysctl -w kernel.shmmax=2147483648
  329. sudo sysctl -w kernel.shmall=2097152
  330. sudo sysctl -w kernel.sem="250 32000 256 256"
  331. echo "Printing kernel configuration:" && sudo sysctl -a
  332. """)
  333. # Explanations:
  334. # net.ipv4.tcp_max_syn_backlog, net.core.somaxconn, kernel.sched_autogroup_enabled: http://tweaked.io/guide/kernel/
  335. # ulimit -n: http://www.cyberciti.biz/faq/linux-increase-the-maximum-number-of-open-files/
  336. # net.ipv4.tcp_tw_*: http://www.linuxbrigade.com/reduce-time_wait-socket-connections/
  337. # kernel.shm*: http://seriousbirder.com/blogs/linux-understanding-shmmax-and-shmall-settings/
  338. # For kernel.sem: https://access.redhat.com/documentation/en-US/Red_Hat_Enterprise_Linux/5/html/Tuning_and_Optimizing_Red_Hat_Enterprise_Linux_for_Oracle_9i_and_10g_Databases/chap-Oracle_9i_and_10g_Tuning_Guide-Setting_Semaphores.html
  339. ############################################################
  340. # End __setup_database
  341. ############################################################
  342. ############################################################
  343. # Makes any necessary changes to the client machine that
  344. # should be made before running the tests. Is very similar
  345. # to the server setup, but may also include client specific
  346. # changes.
  347. ############################################################
  348. def __setup_client(self):
  349. p = subprocess.Popen(self.client_ssh_string, stdin=subprocess.PIPE, shell=True)
  350. p.communicate("""
  351. sudo sysctl -w net.ipv4.tcp_max_syn_backlog=65535
  352. sudo sysctl -w net.core.somaxconn=65535
  353. sudo -s ulimit -n 65535
  354. sudo sysctl net.ipv4.tcp_tw_reuse=1
  355. sudo sysctl net.ipv4.tcp_tw_recycle=1
  356. sudo sysctl -w kernel.shmmax=2147483648
  357. sudo sysctl -w kernel.shmall=2097152
  358. """)
  359. ############################################################
  360. # End __setup_client
  361. ############################################################
  362. ############################################################
  363. # __run_tests
  364. #
  365. # 2013-10-02 ASB Calls each test passed in tests to
  366. # __run_test in a separate process. Each
  367. # test is given a set amount of time and if
  368. # kills the child process (and subsequently
  369. # all of its child processes). Uses
  370. # multiprocessing module.
  371. ############################################################
  372. def __run_tests(self, tests):
  373. if len(tests) == 0:
  374. return 0
  375. logging.debug("Start __run_tests.")
  376. logging.debug("__name__ = %s",__name__)
  377. error_happened = False
  378. if self.os.lower() == 'windows':
  379. logging.debug("Executing __run_tests on Windows")
  380. for test in tests:
  381. with open('current_benchmark.txt', 'w') as benchmark_resume_file:
  382. benchmark_resume_file.write(test.name)
  383. if self.__run_test(test) != 0:
  384. error_happened = True
  385. else:
  386. logging.debug("Executing __run_tests on Linux")
  387. # Setup a nice progressbar and ETA indicator
  388. widgets = [self.mode, ': ', progressbar.Percentage(),
  389. ' ', progressbar.Bar(),
  390. ' Rough ', progressbar.ETA()]
  391. pbar = progressbar.ProgressBar(widgets=widgets, maxval=len(tests)).start()
  392. pbar_test = 0
  393. # These features do not work on Windows
  394. for test in tests:
  395. pbar.update(pbar_test)
  396. pbar_test = pbar_test + 1
  397. if __name__ == 'benchmark.benchmarker':
  398. print header("Running Test: %s" % test.name)
  399. with open('current_benchmark.txt', 'w') as benchmark_resume_file:
  400. benchmark_resume_file.write(test.name)
  401. test_process = Process(target=self.__run_test, name="Test Runner (%s)" % test.name, args=(test,))
  402. test_process.start()
  403. test_process.join(self.run_test_timeout_seconds)
  404. self.__load_results() # Load intermediate result from child process
  405. if(test_process.is_alive()):
  406. logging.debug("Child process for {name} is still alive. Terminating.".format(name=test.name))
  407. self.__write_intermediate_results(test.name,"__run_test timeout (="+ str(self.run_test_timeout_seconds) + " seconds)")
  408. test_process.terminate()
  409. test_process.join()
  410. if test_process.exitcode != 0:
  411. error_happened = True
  412. pbar.finish()
  413. if os.path.isfile('current_benchmark.txt'):
  414. os.remove('current_benchmark.txt')
  415. logging.debug("End __run_tests.")
  416. if error_happened:
  417. return 1
  418. return 0
  419. ############################################################
  420. # End __run_tests
  421. ############################################################
  422. ############################################################
  423. # __run_test
  424. # 2013-10-02 ASB Previously __run_tests. This code now only
  425. # processes a single test.
  426. #
  427. # Ensures that the system has all necessary software to run
  428. # the tests. This does not include that software for the individual
  429. # test, but covers software such as curl and weighttp that
  430. # are needed.
  431. ############################################################
  432. def __run_test(self, test):
  433. # Used to capture return values
  434. def exit_with_code(code):
  435. if self.os.lower() == 'windows':
  436. return code
  437. else:
  438. sys.exit(code)
  439. logDir = os.path.join(self.latest_results_directory, 'logs', test.name.lower())
  440. try:
  441. os.makedirs(logDir)
  442. except Exception:
  443. pass
  444. with open(os.path.join(logDir, 'out.txt'), 'w') as out, \
  445. open(os.path.join(logDir, 'err.txt'), 'w') as err:
  446. if test.os.lower() != self.os.lower() or test.database_os.lower() != self.database_os.lower():
  447. out.write("OS or Database OS specified in benchmark_config does not match the current environment. Skipping.\n")
  448. return exit_with_code(0)
  449. # If the test is in the excludes list, we skip it
  450. if self.exclude != None and test.name in self.exclude:
  451. out.write("Test {name} has been added to the excludes list. Skipping.\n".format(name=test.name))
  452. return exit_with_code(0)
  453. out.write("test.os.lower() = {os} test.database_os.lower() = {dbos}\n".format(os=test.os.lower(),dbos=test.database_os.lower()))
  454. out.write("self.results['frameworks'] != None: {val}\n".format(val=str(self.results['frameworks'] != None)))
  455. out.write("test.name: {name}\n".format(name=str(test.name)))
  456. out.write("self.results['completed']: {completed}\n".format(completed=str(self.results['completed'])))
  457. if self.results['frameworks'] != None and test.name in self.results['completed']:
  458. out.write('Framework {name} found in latest saved data. Skipping.\n'.format(name=str(test.name)))
  459. return exit_with_code(1)
  460. out.flush()
  461. out.write(header("Beginning %s" % test.name, top='='))
  462. out.flush()
  463. ##########################
  464. # Start this test
  465. ##########################
  466. out.write(header("Starting %s" % test.name))
  467. out.flush()
  468. try:
  469. if test.requires_database():
  470. p = subprocess.Popen(self.database_ssh_string, stdin=subprocess.PIPE, stdout=out, stderr=err, shell=True)
  471. p.communicate("""
  472. sudo restart mysql
  473. sudo restart mongod
  474. sudo service redis-server restart
  475. sudo service postgresql restart
  476. sudo service cassandra restart
  477. """)
  478. time.sleep(10)
  479. st = verify_database_connections([
  480. ("mysql", self.database_host, 3306),
  481. ("mongodb", self.database_host, 27017),
  482. ("redis", self.database_host, 6379),
  483. ("postgresql", self.database_host, 5432),
  484. ("cassandra", self.database_host, 9160)
  485. ])
  486. print "database connection test results:\n" + "\n".join(st[1])
  487. if self.__is_port_bound(test.port):
  488. # This can happen sometimes - let's try again
  489. self.__stop_test(out, err)
  490. out.flush()
  491. err.flush()
  492. time.sleep(15)
  493. if self.__is_port_bound(test.port):
  494. # We gave it our all
  495. self.__write_intermediate_results(test.name, "port " + str(test.port) + " is not available before start")
  496. err.write(header("Error: Port %s is not available, cannot start %s" % (test.port, test.name)))
  497. err.flush()
  498. print "Error: Unable to recover port, cannot start test"
  499. return exit_with_code(1)
  500. result = test.start(out, err)
  501. if result != 0:
  502. self.__stop_test(out, err)
  503. time.sleep(5)
  504. err.write( "ERROR: Problem starting {name}\n".format(name=test.name) )
  505. err.flush()
  506. self.__write_intermediate_results(test.name,"<setup.py>#start() returned non-zero")
  507. return exit_with_code(1)
  508. logging.info("Sleeping %s seconds to ensure framework is ready" % self.sleep)
  509. time.sleep(self.sleep)
  510. ##########################
  511. # Verify URLs
  512. ##########################
  513. logging.info("Verifying framework URLs")
  514. passed_verify = test.verify_urls(out, err)
  515. out.flush()
  516. err.flush()
  517. ##########################
  518. # Benchmark this test
  519. ##########################
  520. if self.mode == "benchmark":
  521. logging.info("Benchmarking")
  522. out.write(header("Benchmarking %s" % test.name))
  523. out.flush()
  524. test.benchmark(out, err)
  525. out.flush()
  526. err.flush()
  527. ##########################
  528. # Stop this test
  529. ##########################
  530. out.write(header("Stopping %s" % test.name))
  531. out.flush()
  532. self.__stop_test(out, err)
  533. out.flush()
  534. err.flush()
  535. time.sleep(15)
  536. if self.__is_port_bound(test.port):
  537. # This can happen sometimes - let's try again
  538. self.__stop_test(out, err)
  539. out.flush()
  540. err.flush()
  541. time.sleep(15)
  542. if self.__is_port_bound(test.port):
  543. # We gave it our all
  544. self.__write_intermediate_results(test.name, "port " + str(test.port) + " was not released by stop")
  545. err.write(header("Error: Port %s was not released by stop %s" % (test.port, test.name)))
  546. err.flush()
  547. return exit_with_code(1)
  548. out.write(header("Stopped %s" % test.name))
  549. out.flush()
  550. time.sleep(5)
  551. ##########################################################
  552. # Save results thus far into toolset/benchmark/latest.json
  553. ##########################################################
  554. out.write(header("Saving results through %s" % test.name))
  555. out.flush()
  556. self.__write_intermediate_results(test.name,time.strftime("%Y%m%d%H%M%S", time.localtime()))
  557. if self.mode == "verify" and not passed_verify:
  558. print "Failed verify!"
  559. return exit_with_code(1)
  560. except (OSError, IOError, subprocess.CalledProcessError) as e:
  561. self.__write_intermediate_results(test.name,"<setup.py> raised an exception")
  562. err.write(header("Subprocess Error %s" % test.name))
  563. traceback.print_exc(file=err)
  564. err.flush()
  565. try:
  566. self.__stop_test(out, err)
  567. except (subprocess.CalledProcessError) as e:
  568. self.__write_intermediate_results(test.name,"<setup.py>#stop() raised an error")
  569. err.write(header("Subprocess Error: Test .stop() raised exception %s" % test.name))
  570. traceback.print_exc(file=err)
  571. err.flush()
  572. out.close()
  573. err.close()
  574. return exit_with_code(1)
  575. # TODO - subprocess should not catch this exception!
  576. # Parent process should catch it and cleanup/exit
  577. except (KeyboardInterrupt) as e:
  578. self.__stop_test(out, err)
  579. out.write(header("Cleaning up..."))
  580. out.flush()
  581. self.__finish()
  582. sys.exit(1)
  583. out.close()
  584. err.close()
  585. return exit_with_code(0)
  586. ############################################################
  587. # End __run_tests
  588. ############################################################
  589. ############################################################
  590. # __stop_test(benchmarker)
  591. # Stops all running tests
  592. ############################################################
  593. def __stop_test(self, out, err):
  594. try:
  595. subprocess.check_call('sudo killall -s 9 -u %s' % self.runner_user, shell=True, stderr=err, stdout=out)
  596. retcode = 0
  597. except Exception:
  598. retcode = 1
  599. return retcode
  600. ############################################################
  601. # End __stop_test
  602. ############################################################
  603. ############################################################
  604. # __is_port_bound
  605. # Check if the requested port is available. If it
  606. # isn't available, then a previous test probably didn't
  607. # shutdown properly.
  608. ############################################################
  609. def __is_port_bound(self, port):
  610. s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
  611. try:
  612. # Try to bind to all IP addresses, this port
  613. s.bind(("", port))
  614. # If we get here, we were able to bind successfully,
  615. # which means the port is free.
  616. except Exception:
  617. # If we get an exception, it might be because the port is still bound
  618. # which would be bad, or maybe it is a privileged port (<1024) and we
  619. # are not running as root, or maybe the server is gone, but sockets are
  620. # still in TIME_WAIT (SO_REUSEADDR). To determine which scenario, try to
  621. # connect.
  622. try:
  623. s.connect(("127.0.0.1", port))
  624. # If we get here, we were able to connect to something, which means
  625. # that the port is still bound.
  626. return True
  627. except Exception:
  628. # An exception means that we couldn't connect, so a server probably
  629. # isn't still running on the port.
  630. pass
  631. finally:
  632. s.close()
  633. return False
  634. ############################################################
  635. # End __is_port_bound
  636. ############################################################
  637. ############################################################
  638. # __parse_results
  639. # Ensures that the system has all necessary software to run
  640. # the tests. This does not include that software for the individual
  641. # test, but covers software such as curl and weighttp that
  642. # are needed.
  643. ############################################################
  644. def __parse_results(self, tests):
  645. # Run the method to get the commmit count of each framework.
  646. self.__count_commits()
  647. # Call the method which counts the sloc for each framework
  648. self.__count_sloc()
  649. # Time to create parsed files
  650. # Aggregate JSON file
  651. with open(os.path.join(self.full_results_directory(), "results.json"), "w") as f:
  652. f.write(json.dumps(self.results, indent=2))
  653. ############################################################
  654. # End __parse_results
  655. ############################################################
  656. #############################################################
  657. # __count_sloc
  658. #############################################################
  659. def __count_sloc(self):
  660. frameworks = gather_frameworks(include=self.test,
  661. exclude=self.exclude, benchmarker=self)
  662. jsonResult = {}
  663. for framework, testlist in frameworks.iteritems():
  664. if not os.path.exists(os.path.join(testlist[0].directory, "source_code")):
  665. logging.warn("Cannot count lines of code for %s - no 'source_code' file", framework)
  666. continue
  667. # Unfortunately the source_code files use lines like
  668. # ./cpoll_cppsp/www/fortune_old instead of
  669. # ./www/fortune_old
  670. # so we have to back our working dir up one level
  671. wd = os.path.dirname(testlist[0].directory)
  672. try:
  673. command = "cloc --list-file=%s/source_code --yaml" % testlist[0].directory
  674. # Find the last instance of the word 'code' in the yaml output. This should
  675. # be the line count for the sum of all listed files or just the line count
  676. # for the last file in the case where there's only one file listed.
  677. command = command + "| grep code | tail -1 | cut -d: -f 2"
  678. logging.debug("Running \"%s\" (cwd=%s)", command, wd)
  679. lineCount = subprocess.check_output(command, cwd=wd, shell=True)
  680. jsonResult[framework] = int(lineCount)
  681. except subprocess.CalledProcessError:
  682. continue
  683. except ValueError as ve:
  684. logging.warn("Unable to get linecount for %s due to error '%s'", framework, ve)
  685. self.results['rawData']['slocCounts'] = jsonResult
  686. ############################################################
  687. # End __count_sloc
  688. ############################################################
  689. ############################################################
  690. # __count_commits
  691. #
  692. ############################################################
  693. def __count_commits(self):
  694. frameworks = gather_frameworks(include=self.test,
  695. exclude=self.exclude, benchmarker=self)
  696. def count_commit(directory, jsonResult):
  697. command = "git rev-list HEAD -- " + directory + " | sort -u | wc -l"
  698. try:
  699. commitCount = subprocess.check_output(command, shell=True)
  700. jsonResult[framework] = int(commitCount)
  701. except subprocess.CalledProcessError:
  702. pass
  703. # Because git can be slow when run in large batches, this
  704. # calls git up to 4 times in parallel. Normal improvement is ~3-4x
  705. # in my trials, or ~100 seconds down to ~25
  706. # This is safe to parallelize as long as each thread only
  707. # accesses one key in the dictionary
  708. threads = []
  709. jsonResult = {}
  710. t1 = datetime.now()
  711. for framework, testlist in frameworks.iteritems():
  712. directory = testlist[0].directory
  713. t = threading.Thread(target=count_commit, args=(directory,jsonResult))
  714. t.start()
  715. threads.append(t)
  716. # Git has internal locks, full parallel will just cause contention
  717. # and slowness, so we rate-limit a bit
  718. if len(threads) >= 4:
  719. threads[0].join()
  720. threads.remove(threads[0])
  721. # Wait for remaining threads
  722. for t in threads:
  723. t.join()
  724. t2 = datetime.now()
  725. # print "Took %s seconds " % (t2 - t1).seconds
  726. self.results['rawData']['commitCounts'] = jsonResult
  727. self.commits = jsonResult
  728. ############################################################
  729. # End __count_commits
  730. ############################################################
  731. ############################################################
  732. # __write_intermediate_results
  733. ############################################################
  734. def __write_intermediate_results(self,test_name,status_message):
  735. try:
  736. self.results["completed"][test_name] = status_message
  737. with open(os.path.join(self.latest_results_directory, 'results.json'), 'w') as f:
  738. f.write(json.dumps(self.results, indent=2))
  739. except (IOError):
  740. logging.error("Error writing results.json")
  741. ############################################################
  742. # End __write_intermediate_results
  743. ############################################################
  744. def __load_results(self):
  745. try:
  746. with open(os.path.join(self.latest_results_directory, 'results.json')) as f:
  747. self.results = json.load(f)
  748. except (ValueError, IOError):
  749. pass
  750. ############################################################
  751. # __finish
  752. ############################################################
  753. def __finish(self):
  754. if not self.list_tests and not self.list_test_metadata and not self.parse:
  755. tests = self.__gather_tests
  756. # Normally you don't have to use Fore.BLUE before each line, but
  757. # Travis-CI seems to reset color codes on newline (see travis-ci/travis-ci#2692)
  758. # or stream flush, so we have to ensure that the color code is printed repeatedly
  759. prefix = Fore.CYAN
  760. for line in header("Verification Summary", top='=', bottom='').split('\n'):
  761. print prefix + line
  762. for test in tests:
  763. print prefix + "| Test: %s" % test.name
  764. if test.name in self.results['verify'].keys():
  765. for test_type, result in self.results['verify'][test.name].iteritems():
  766. if result.upper() == "PASS":
  767. color = Fore.GREEN
  768. elif result.upper() == "WARN":
  769. color = Fore.YELLOW
  770. else:
  771. color = Fore.RED
  772. print prefix + "| " + test_type.ljust(11) + ' : ' + color + result.upper()
  773. else:
  774. print prefix + "| " + Fore.RED + "NO RESULTS (Did framework launch?)"
  775. print prefix + header('', top='', bottom='=') + Style.RESET_ALL
  776. print "Time to complete: " + str(int(time.time() - self.start_time)) + " seconds"
  777. print "Results are saved in " + os.path.join(self.result_directory, self.timestamp)
  778. ############################################################
  779. # End __finish
  780. ############################################################
  781. ##########################################################################################
  782. # Constructor
  783. ##########################################################################################
  784. ############################################################
  785. # Initialize the benchmarker. The args are the arguments
  786. # parsed via argparser.
  787. ############################################################
  788. def __init__(self, args):
  789. # Map type strings to their objects
  790. types = dict()
  791. types['json'] = JsonTestType()
  792. types['db'] = DBTestType()
  793. types['query'] = QueryTestType()
  794. types['fortune'] = FortuneTestType()
  795. types['update'] = UpdateTestType()
  796. types['plaintext'] = PlaintextTestType()
  797. # Turn type into a map instead of a string
  798. if args['type'] == 'all':
  799. args['types'] = types
  800. else:
  801. args['types'] = { args['type'] : types[args['type']] }
  802. del args['type']
  803. args['max_threads'] = args['threads']
  804. args['max_concurrency'] = max(args['concurrency_levels'])
  805. self.__dict__.update(args)
  806. # pprint(self.__dict__)
  807. self.start_time = time.time()
  808. self.run_test_timeout_seconds = 3600
  809. # setup logging
  810. logging.basicConfig(stream=sys.stderr, level=logging.INFO)
  811. # setup some additional variables
  812. if self.database_user == None: self.database_user = self.client_user
  813. if self.database_host == None: self.database_host = self.client_host
  814. if self.database_identity_file == None: self.database_identity_file = self.client_identity_file
  815. # Remember root directory
  816. self.fwroot = setup_util.get_fwroot()
  817. # setup results and latest_results directories
  818. self.result_directory = os.path.join("results", self.name)
  819. self.latest_results_directory = self.latest_results_directory()
  820. if hasattr(self, 'parse') and self.parse != None:
  821. self.timestamp = self.parse
  822. else:
  823. self.timestamp = time.strftime("%Y%m%d%H%M%S", time.localtime())
  824. # Load the latest data
  825. #self.latest = None
  826. #try:
  827. # with open('toolset/benchmark/latest.json', 'r') as f:
  828. # # Load json file into config object
  829. # self.latest = json.load(f)
  830. # logging.info("toolset/benchmark/latest.json loaded to self.latest")
  831. # logging.debug("contents of latest.json: " + str(json.dumps(self.latest)))
  832. #except IOError:
  833. # logging.warn("IOError on attempting to read toolset/benchmark/latest.json")
  834. #
  835. #self.results = None
  836. #try:
  837. # if self.latest != None and self.name in self.latest.keys():
  838. # with open(os.path.join(self.result_directory, str(self.latest[self.name]), 'results.json'), 'r') as f:
  839. # # Load json file into config object
  840. # self.results = json.load(f)
  841. #except IOError:
  842. # pass
  843. self.results = None
  844. try:
  845. with open(os.path.join(self.latest_results_directory, 'results.json'), 'r') as f:
  846. #Load json file into results object
  847. self.results = json.load(f)
  848. except IOError:
  849. logging.warn("results.json for test %s not found.",self.name)
  850. if self.results == None:
  851. self.results = dict()
  852. self.results['name'] = self.name
  853. self.results['concurrencyLevels'] = self.concurrency_levels
  854. self.results['queryIntervals'] = self.query_levels
  855. self.results['frameworks'] = [t.name for t in self.__gather_tests]
  856. self.results['duration'] = self.duration
  857. self.results['rawData'] = dict()
  858. self.results['rawData']['json'] = dict()
  859. self.results['rawData']['db'] = dict()
  860. self.results['rawData']['query'] = dict()
  861. self.results['rawData']['fortune'] = dict()
  862. self.results['rawData']['update'] = dict()
  863. self.results['rawData']['plaintext'] = dict()
  864. self.results['completed'] = dict()
  865. self.results['succeeded'] = dict()
  866. self.results['succeeded']['json'] = []
  867. self.results['succeeded']['db'] = []
  868. self.results['succeeded']['query'] = []
  869. self.results['succeeded']['fortune'] = []
  870. self.results['succeeded']['update'] = []
  871. self.results['succeeded']['plaintext'] = []
  872. self.results['failed'] = dict()
  873. self.results['failed']['json'] = []
  874. self.results['failed']['db'] = []
  875. self.results['failed']['query'] = []
  876. self.results['failed']['fortune'] = []
  877. self.results['failed']['update'] = []
  878. self.results['failed']['plaintext'] = []
  879. self.results['verify'] = dict()
  880. else:
  881. #for x in self.__gather_tests():
  882. # if x.name not in self.results['frameworks']:
  883. # self.results['frameworks'] = self.results['frameworks'] + [x.name]
  884. # Always overwrite framework list
  885. self.results['frameworks'] = [t.name for t in self.__gather_tests]
  886. # Setup the ssh command string
  887. self.database_ssh_string = "ssh -T -o StrictHostKeyChecking=no " + self.database_user + "@" + self.database_host
  888. self.client_ssh_string = "ssh -T -o StrictHostKeyChecking=no " + self.client_user + "@" + self.client_host
  889. if self.database_identity_file != None:
  890. self.database_ssh_string = self.database_ssh_string + " -i " + self.database_identity_file
  891. if self.client_identity_file != None:
  892. self.client_ssh_string = self.client_ssh_string + " -i " + self.client_identity_file
  893. if self.install is not None:
  894. install = Installer(self, self.install_strategy)
  895. install.install_software()
  896. ############################################################
  897. # End __init__
  898. ############################################################