benchmarker.py 41 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032
  1. from setup.linux.installer import Installer
  2. from setup.linux import setup_util
  3. from benchmark import framework_test
  4. from benchmark.test_types import *
  5. from utils import header
  6. from utils import gather_tests
  7. from utils import gather_frameworks
  8. from utils import verify_database_connections
  9. import os
  10. import stat
  11. import json
  12. import subprocess
  13. import traceback
  14. import time
  15. import pprint
  16. import csv
  17. import sys
  18. import logging
  19. import socket
  20. import threading
  21. import textwrap
  22. from pprint import pprint
  23. from multiprocessing import Process
  24. from datetime import datetime
  25. # Cross-platform colored text
  26. from colorama import Fore, Back, Style
  27. # Text-based progress indicators
  28. import progressbar
  29. class Benchmarker:
  30. ##########################################################################################
  31. # Public methods
  32. ##########################################################################################
  33. ############################################################
  34. # Prints all the available tests
  35. ############################################################
  36. def run_list_tests(self):
  37. all_tests = self.__gather_tests
  38. for test in all_tests:
  39. print test.name
  40. self.__finish()
  41. ############################################################
  42. # End run_list_tests
  43. ############################################################
  44. ############################################################
  45. # Prints the metadata for all the available tests
  46. ############################################################
  47. def run_list_test_metadata(self):
  48. all_tests = self.__gather_tests
  49. all_tests_json = json.dumps(map(lambda test: {
  50. "name": test.name,
  51. "approach": test.approach,
  52. "classification": test.classification,
  53. "database": test.database,
  54. "framework": test.framework,
  55. "language": test.language,
  56. "orm": test.orm,
  57. "platform": test.platform,
  58. "webserver": test.webserver,
  59. "os": test.os,
  60. "database_os": test.database_os,
  61. "display_name": test.display_name,
  62. "notes": test.notes,
  63. "versus": test.versus
  64. }, all_tests))
  65. with open(os.path.join(self.full_results_directory(), "test_metadata.json"), "w") as f:
  66. f.write(all_tests_json)
  67. self.__finish()
  68. ############################################################
  69. # End run_list_test_metadata
  70. ############################################################
  71. ############################################################
  72. # parse_timestamp
  73. # Re-parses the raw data for a given timestamp
  74. ############################################################
  75. def parse_timestamp(self):
  76. all_tests = self.__gather_tests
  77. for test in all_tests:
  78. test.parse_all()
  79. self.__parse_results(all_tests)
  80. self.__finish()
  81. ############################################################
  82. # End parse_timestamp
  83. ############################################################
  84. ############################################################
  85. # Run the tests:
  86. # This process involves setting up the client/server machines
  87. # with any necessary change. Then going through each test,
  88. # running their setup script, verifying the URLs, and
  89. # running benchmarks against them.
  90. ############################################################
  91. def run(self):
  92. ##########################
  93. # Get a list of all known
  94. # tests that we can run.
  95. ##########################
  96. all_tests = self.__gather_tests
  97. ##########################
  98. # Setup client/server
  99. ##########################
  100. print header("Preparing Server, Database, and Client ...", top='=', bottom='=')
  101. self.__setup_server()
  102. self.__setup_database()
  103. self.__setup_client()
  104. ## Check if wrk (and wrk-pipeline) is installed and executable, if not, raise an exception
  105. #if not (os.access("/usr/local/bin/wrk", os.X_OK) and os.access("/usr/local/bin/wrk-pipeline", os.X_OK)):
  106. # raise Exception("wrk and/or wrk-pipeline are not properly installed. Not running tests.")
  107. ##########################
  108. # Run tests
  109. ##########################
  110. print header("Running Tests...", top='=', bottom='=')
  111. result = self.__run_tests(all_tests)
  112. ##########################
  113. # Parse results
  114. ##########################
  115. if self.mode == "benchmark":
  116. print header("Parsing Results ...", top='=', bottom='=')
  117. self.__parse_results(all_tests)
  118. self.__finish()
  119. return result
  120. ############################################################
  121. # End run
  122. ############################################################
  123. ############################################################
  124. # database_sftp_string(batch_file)
  125. # generates a fully qualified URL for sftp to database
  126. ############################################################
  127. def database_sftp_string(self, batch_file):
  128. sftp_string = "sftp -oStrictHostKeyChecking=no "
  129. if batch_file != None: sftp_string += " -b " + batch_file + " "
  130. if self.database_identity_file != None:
  131. sftp_string += " -i " + self.database_identity_file + " "
  132. return sftp_string + self.database_user + "@" + self.database_host
  133. ############################################################
  134. # End database_sftp_string
  135. ############################################################
  136. ############################################################
  137. # client_sftp_string(batch_file)
  138. # generates a fully qualified URL for sftp to client
  139. ############################################################
  140. def client_sftp_string(self, batch_file):
  141. sftp_string = "sftp -oStrictHostKeyChecking=no "
  142. if batch_file != None: sftp_string += " -b " + batch_file + " "
  143. if self.client_identity_file != None:
  144. sftp_string += " -i " + self.client_identity_file + " "
  145. return sftp_string + self.client_user + "@" + self.client_host
  146. ############################################################
  147. # End client_sftp_string
  148. ############################################################
  149. ############################################################
  150. # generate_url(url, port)
  151. # generates a fully qualified URL for accessing a test url
  152. ############################################################
  153. def generate_url(self, url, port):
  154. return self.server_host + ":" + str(port) + url
  155. ############################################################
  156. # End generate_url
  157. ############################################################
  158. ############################################################
  159. # get_output_file(test_name, test_type)
  160. # returns the output file name for this test_name and
  161. # test_type timestamp/test_type/test_name/raw
  162. ############################################################
  163. def get_output_file(self, test_name, test_type):
  164. return os.path.join(self.result_directory, self.timestamp, test_type, test_name, "raw")
  165. ############################################################
  166. # End get_output_file
  167. ############################################################
  168. ############################################################
  169. # output_file(test_name, test_type)
  170. # returns the output file for this test_name and test_type
  171. # timestamp/test_type/test_name/raw
  172. ############################################################
  173. def output_file(self, test_name, test_type):
  174. path = self.get_output_file(test_name, test_type)
  175. try:
  176. os.makedirs(os.path.dirname(path))
  177. except OSError:
  178. pass
  179. return path
  180. ############################################################
  181. # End output_file
  182. ############################################################
  183. ############################################################
  184. # get_stats_file(test_name, test_type)
  185. # returns the stats file name for this test_name and
  186. # test_type timestamp/test_type/test_name/raw
  187. ############################################################
  188. def get_stats_file(self, test_name, test_type):
  189. return os.path.join(self.result_directory, self.timestamp, test_type, test_name, "stats")
  190. ############################################################
  191. # End get_stats_file
  192. ############################################################
  193. ############################################################
  194. # stats_file(test_name, test_type)
  195. # returns the stats file for this test_name and test_type
  196. # timestamp/test_type/test_name/raw
  197. ############################################################
  198. def stats_file(self, test_name, test_type):
  199. path = self.get_stats_file(test_name, test_type)
  200. try:
  201. os.makedirs(os.path.dirname(path))
  202. except OSError:
  203. pass
  204. return path
  205. ############################################################
  206. # End stats_file
  207. ############################################################
  208. ############################################################
  209. # full_results_directory
  210. ############################################################
  211. def full_results_directory(self):
  212. path = os.path.join(self.result_directory, self.timestamp)
  213. try:
  214. os.makedirs(path)
  215. except OSError:
  216. pass
  217. return path
  218. ############################################################
  219. # End full_results_directory
  220. ############################################################
  221. ############################################################
  222. # Latest intermediate results dirctory
  223. ############################################################
  224. def latest_results_directory(self):
  225. path = os.path.join(self.result_directory,"latest")
  226. try:
  227. os.makedirs(path)
  228. except OSError:
  229. pass
  230. # Give testrunner permission to write into results directory
  231. # so LOGDIR param always works in setup.sh
  232. # While 775 is more preferrable, we would have to ensure that
  233. # testrunner is in the group of the current user
  234. if not self.os.lower() == 'windows':
  235. mode777 = (stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR |
  236. stat.S_IRGRP | stat.S_IWGRP | stat.S_IXGRP |
  237. stat.S_IROTH | stat.S_IWOTH | stat.S_IXOTH)
  238. os.chmod(path, mode777)
  239. return path
  240. ############################################################
  241. # report_verify_results
  242. # Used by FrameworkTest to add verification details to our results
  243. #
  244. # TODO: Technically this is an IPC violation - we are accessing
  245. # the parent process' memory from the child process
  246. ############################################################
  247. def report_verify_results(self, framework, test, result):
  248. if framework.name not in self.results['verify'].keys():
  249. self.results['verify'][framework.name] = dict()
  250. self.results['verify'][framework.name][test] = result
  251. ############################################################
  252. # report_benchmark_results
  253. # Used by FrameworkTest to add benchmark data to this
  254. #
  255. # TODO: Technically this is an IPC violation - we are accessing
  256. # the parent process' memory from the child process
  257. ############################################################
  258. def report_benchmark_results(self, framework, test, results):
  259. if test not in self.results['rawData'].keys():
  260. self.results['rawData'][test] = dict()
  261. # If results has a size from the parse, then it succeeded.
  262. if results:
  263. self.results['rawData'][test][framework.name] = results
  264. # This may already be set for single-tests
  265. if framework.name not in self.results['succeeded'][test]:
  266. self.results['succeeded'][test].append(framework.name)
  267. else:
  268. # This may already be set for single-tests
  269. if framework.name not in self.results['failed'][test]:
  270. self.results['failed'][test].append(framework.name)
  271. ############################################################
  272. # End report_results
  273. ############################################################
  274. ##########################################################################################
  275. # Private methods
  276. ##########################################################################################
  277. ############################################################
  278. # Gathers all the tests
  279. ############################################################
  280. @property
  281. def __gather_tests(self):
  282. tests = gather_tests(include=self.test,
  283. exclude=self.exclude,
  284. benchmarker=self)
  285. # If the tests have been interrupted somehow, then we want to resume them where we left
  286. # off, rather than starting from the beginning
  287. if os.path.isfile('current_benchmark.txt'):
  288. with open('current_benchmark.txt', 'r') as interrupted_benchmark:
  289. interrupt_bench = interrupted_benchmark.read().strip()
  290. for index, atest in enumerate(tests):
  291. if atest.name == interrupt_bench:
  292. tests = tests[index:]
  293. break
  294. return tests
  295. ############################################################
  296. # End __gather_tests
  297. ############################################################
  298. ############################################################
  299. # Makes any necessary changes to the server that should be
  300. # made before running the tests. This involves setting kernal
  301. # settings to allow for more connections, or more file
  302. # descriptiors
  303. #
  304. # http://redmine.lighttpd.net/projects/weighttp/wiki#Troubleshooting
  305. ############################################################
  306. def __setup_server(self):
  307. try:
  308. if os.name == 'nt':
  309. return True
  310. subprocess.check_call(["sudo","bash","-c","cd /sys/devices/system/cpu; ls -d cpu[0-9]*|while read x; do echo performance > $x/cpufreq/scaling_governor; done"])
  311. subprocess.check_call("sudo sysctl -w net.ipv4.tcp_max_syn_backlog=65535".rsplit(" "))
  312. subprocess.check_call("sudo sysctl -w net.core.somaxconn=65535".rsplit(" "))
  313. subprocess.check_call("sudo -s ulimit -n 65535".rsplit(" "))
  314. subprocess.check_call("sudo sysctl net.ipv4.tcp_tw_reuse=1".rsplit(" "))
  315. subprocess.check_call("sudo sysctl net.ipv4.tcp_tw_recycle=1".rsplit(" "))
  316. subprocess.check_call("sudo sysctl -w kernel.shmmax=134217728".rsplit(" "))
  317. subprocess.check_call("sudo sysctl -w kernel.shmall=2097152".rsplit(" "))
  318. except subprocess.CalledProcessError:
  319. return False
  320. ############################################################
  321. # End __setup_server
  322. ############################################################
  323. ############################################################
  324. # Makes any necessary changes to the database machine that
  325. # should be made before running the tests. Is very similar
  326. # to the server setup, but may also include database specific
  327. # changes.
  328. ############################################################
  329. def __setup_database(self):
  330. p = subprocess.Popen(self.database_ssh_string, stdin=subprocess.PIPE, shell=True)
  331. p.communicate("""
  332. sudo sysctl -w net.ipv4.tcp_max_syn_backlog=65535
  333. sudo sysctl -w net.core.somaxconn=65535
  334. sudo sysctl -w kernel.sched_autogroup_enabled=0
  335. sudo -s ulimit -n 65535
  336. sudo sysctl net.ipv4.tcp_tw_reuse=1
  337. sudo sysctl net.ipv4.tcp_tw_recycle=1
  338. sudo sysctl -w kernel.shmmax=2147483648
  339. sudo sysctl -w kernel.shmall=2097152
  340. sudo sysctl -w kernel.sem="250 32000 256 512"
  341. """)
  342. # TODO - print kernel configuration to file
  343. # echo "Printing kernel configuration:" && sudo sysctl -a
  344. # Explanations:
  345. # net.ipv4.tcp_max_syn_backlog, net.core.somaxconn, kernel.sched_autogroup_enabled: http://tweaked.io/guide/kernel/
  346. # ulimit -n: http://www.cyberciti.biz/faq/linux-increase-the-maximum-number-of-open-files/
  347. # net.ipv4.tcp_tw_*: http://www.linuxbrigade.com/reduce-time_wait-socket-connections/
  348. # kernel.shm*: http://seriousbirder.com/blogs/linux-understanding-shmmax-and-shmall-settings/
  349. # For kernel.sem: https://access.redhat.com/documentation/en-US/Red_Hat_Enterprise_Linux/5/html/Tuning_and_Optimizing_Red_Hat_Enterprise_Linux_for_Oracle_9i_and_10g_Databases/chap-Oracle_9i_and_10g_Tuning_Guide-Setting_Semaphores.html
  350. ############################################################
  351. # End __setup_database
  352. ############################################################
  353. ############################################################
  354. # Makes any necessary changes to the client machine that
  355. # should be made before running the tests. Is very similar
  356. # to the server setup, but may also include client specific
  357. # changes.
  358. ############################################################
  359. def __setup_client(self):
  360. p = subprocess.Popen(self.client_ssh_string, stdin=subprocess.PIPE, shell=True)
  361. p.communicate("""
  362. sudo sysctl -w net.ipv4.tcp_max_syn_backlog=65535
  363. sudo sysctl -w net.core.somaxconn=65535
  364. sudo -s ulimit -n 65535
  365. sudo sysctl net.ipv4.tcp_tw_reuse=1
  366. sudo sysctl net.ipv4.tcp_tw_recycle=1
  367. sudo sysctl -w kernel.shmmax=2147483648
  368. sudo sysctl -w kernel.shmall=2097152
  369. """)
  370. ############################################################
  371. # End __setup_client
  372. ############################################################
  373. ############################################################
  374. # __run_tests
  375. #
  376. # 2013-10-02 ASB Calls each test passed in tests to
  377. # __run_test in a separate process. Each
  378. # test is given a set amount of time and if
  379. # kills the child process (and subsequently
  380. # all of its child processes). Uses
  381. # multiprocessing module.
  382. ############################################################
  383. def __run_tests(self, tests):
  384. if len(tests) == 0:
  385. return 0
  386. logging.debug("Start __run_tests.")
  387. logging.debug("__name__ = %s",__name__)
  388. error_happened = False
  389. if self.os.lower() == 'windows':
  390. logging.debug("Executing __run_tests on Windows")
  391. for test in tests:
  392. with open('current_benchmark.txt', 'w') as benchmark_resume_file:
  393. benchmark_resume_file.write(test.name)
  394. if self.__run_test(test) != 0:
  395. error_happened = True
  396. else:
  397. logging.debug("Executing __run_tests on Linux")
  398. # Setup a nice progressbar and ETA indicator
  399. widgets = [self.mode, ': ', progressbar.Percentage(),
  400. ' ', progressbar.Bar(),
  401. ' Rough ', progressbar.ETA()]
  402. pbar = progressbar.ProgressBar(widgets=widgets, maxval=len(tests)).start()
  403. pbar_test = 0
  404. # These features do not work on Windows
  405. for test in tests:
  406. pbar.update(pbar_test)
  407. pbar_test = pbar_test + 1
  408. if __name__ == 'benchmark.benchmarker':
  409. print header("Running Test: %s" % test.name)
  410. with open('current_benchmark.txt', 'w') as benchmark_resume_file:
  411. benchmark_resume_file.write(test.name)
  412. test_process = Process(target=self.__run_test, name="Test Runner (%s)" % test.name, args=(test,))
  413. test_process.start()
  414. test_process.join(self.run_test_timeout_seconds)
  415. self.__load_results() # Load intermediate result from child process
  416. if(test_process.is_alive()):
  417. logging.debug("Child process for {name} is still alive. Terminating.".format(name=test.name))
  418. self.__write_intermediate_results(test.name,"__run_test timeout (="+ str(self.run_test_timeout_seconds) + " seconds)")
  419. test_process.terminate()
  420. test_process.join()
  421. if test_process.exitcode != 0:
  422. error_happened = True
  423. pbar.finish()
  424. if os.path.isfile('current_benchmark.txt'):
  425. os.remove('current_benchmark.txt')
  426. logging.debug("End __run_tests.")
  427. if error_happened:
  428. return 1
  429. return 0
  430. ############################################################
  431. # End __run_tests
  432. ############################################################
  433. ############################################################
  434. # __run_test
  435. # 2013-10-02 ASB Previously __run_tests. This code now only
  436. # processes a single test.
  437. #
  438. # Ensures that the system has all necessary software to run
  439. # the tests. This does not include that software for the individual
  440. # test, but covers software such as curl and weighttp that
  441. # are needed.
  442. ############################################################
  443. def __run_test(self, test):
  444. # Used to capture return values
  445. def exit_with_code(code):
  446. if self.os.lower() == 'windows':
  447. return code
  448. else:
  449. sys.exit(code)
  450. logDir = os.path.join(self.latest_results_directory, 'logs', test.name.lower())
  451. try:
  452. os.makedirs(logDir)
  453. except Exception:
  454. pass
  455. with open(os.path.join(logDir, 'out.txt'), 'w') as out, \
  456. open(os.path.join(logDir, 'err.txt'), 'w') as err:
  457. if test.os.lower() != self.os.lower() or test.database_os.lower() != self.database_os.lower():
  458. out.write("OS or Database OS specified in benchmark_config.json does not match the current environment. Skipping.\n")
  459. return exit_with_code(0)
  460. # If the test is in the excludes list, we skip it
  461. if self.exclude != None and test.name in self.exclude:
  462. out.write("Test {name} has been added to the excludes list. Skipping.\n".format(name=test.name))
  463. return exit_with_code(0)
  464. out.write("test.os.lower() = {os} test.database_os.lower() = {dbos}\n".format(os=test.os.lower(),dbos=test.database_os.lower()))
  465. out.write("self.results['frameworks'] != None: {val}\n".format(val=str(self.results['frameworks'] != None)))
  466. out.write("test.name: {name}\n".format(name=str(test.name)))
  467. out.write("self.results['completed']: {completed}\n".format(completed=str(self.results['completed'])))
  468. if self.results['frameworks'] != None and test.name in self.results['completed']:
  469. out.write('Framework {name} found in latest saved data. Skipping.\n'.format(name=str(test.name)))
  470. print 'WARNING: Test {test} exists in the results directory; this must be removed before running a new test.\n'.format(test=str(test.name))
  471. return exit_with_code(1)
  472. out.flush()
  473. out.write(header("Beginning %s" % test.name, top='='))
  474. out.flush()
  475. ##########################
  476. # Start this test
  477. ##########################
  478. out.write(header("Starting %s" % test.name))
  479. out.flush()
  480. try:
  481. if test.requires_database():
  482. p = subprocess.Popen(self.database_ssh_string, stdin=subprocess.PIPE, stdout=out, stderr=err, shell=True)
  483. p.communicate("""
  484. sudo restart mysql
  485. sudo restart mongod
  486. sudo service redis-server restart
  487. sudo service postgresql restart
  488. sudo service cassandra restart
  489. /opt/elasticsearch/elasticsearch restart
  490. """)
  491. time.sleep(10)
  492. st = verify_database_connections([
  493. ("mysql", self.database_host, 3306),
  494. ("mongodb", self.database_host, 27017),
  495. ("redis", self.database_host, 6379),
  496. ("postgresql", self.database_host, 5432),
  497. ("cassandra", self.database_host, 9160),
  498. ("elasticsearch", self.database_host, 9200)
  499. ])
  500. print "database connection test results:\n" + "\n".join(st[1])
  501. if self.__is_port_bound(test.port):
  502. # This can happen sometimes - let's try again
  503. self.__stop_test(out, err)
  504. out.flush()
  505. err.flush()
  506. time.sleep(15)
  507. if self.__is_port_bound(test.port):
  508. # We gave it our all
  509. self.__write_intermediate_results(test.name, "port " + str(test.port) + " is not available before start")
  510. err.write(header("Error: Port %s is not available, cannot start %s" % (test.port, test.name)))
  511. err.flush()
  512. print "Error: Unable to recover port, cannot start test"
  513. return exit_with_code(1)
  514. result = test.start(out, err)
  515. if result != 0:
  516. self.__stop_test(out, err)
  517. time.sleep(5)
  518. err.write( "ERROR: Problem starting {name}\n".format(name=test.name) )
  519. err.flush()
  520. self.__write_intermediate_results(test.name,"<setup.py>#start() returned non-zero")
  521. return exit_with_code(1)
  522. logging.info("Sleeping %s seconds to ensure framework is ready" % self.sleep)
  523. time.sleep(self.sleep)
  524. ##########################
  525. # Verify URLs
  526. ##########################
  527. logging.info("Verifying framework URLs")
  528. passed_verify = test.verify_urls(out, err)
  529. out.flush()
  530. err.flush()
  531. ##########################
  532. # Benchmark this test
  533. ##########################
  534. if self.mode == "benchmark":
  535. logging.info("Benchmarking")
  536. out.write(header("Benchmarking %s" % test.name))
  537. out.flush()
  538. test.benchmark(out, err)
  539. out.flush()
  540. err.flush()
  541. ##########################
  542. # Stop this test
  543. ##########################
  544. out.write(header("Stopping %s" % test.name))
  545. out.flush()
  546. self.__stop_test(out, err)
  547. out.flush()
  548. err.flush()
  549. time.sleep(15)
  550. if self.__is_port_bound(test.port):
  551. # This can happen sometimes - let's try again
  552. self.__stop_test(out, err)
  553. out.flush()
  554. err.flush()
  555. time.sleep(15)
  556. if self.__is_port_bound(test.port):
  557. # We gave it our all
  558. self.__write_intermediate_results(test.name, "port " + str(test.port) + " was not released by stop")
  559. err.write(header("Error: Port %s was not released by stop %s" % (test.port, test.name)))
  560. err.flush()
  561. return exit_with_code(1)
  562. out.write(header("Stopped %s" % test.name))
  563. out.flush()
  564. time.sleep(5)
  565. ##########################################################
  566. # Save results thus far into the latest results directory
  567. ##########################################################
  568. out.write(header("Saving results through %s" % test.name))
  569. out.flush()
  570. self.__write_intermediate_results(test.name,time.strftime("%Y%m%d%H%M%S", time.localtime()))
  571. if self.mode == "verify" and not passed_verify:
  572. print "Failed verify!"
  573. return exit_with_code(1)
  574. except (OSError, IOError, subprocess.CalledProcessError) as e:
  575. self.__write_intermediate_results(test.name,"<setup.py> raised an exception")
  576. err.write(header("Subprocess Error %s" % test.name))
  577. traceback.print_exc(file=err)
  578. err.flush()
  579. try:
  580. self.__stop_test(out, err)
  581. except (subprocess.CalledProcessError) as e:
  582. self.__write_intermediate_results(test.name,"<setup.py>#stop() raised an error")
  583. err.write(header("Subprocess Error: Test .stop() raised exception %s" % test.name))
  584. traceback.print_exc(file=err)
  585. err.flush()
  586. out.close()
  587. err.close()
  588. return exit_with_code(1)
  589. # TODO - subprocess should not catch this exception!
  590. # Parent process should catch it and cleanup/exit
  591. except (KeyboardInterrupt) as e:
  592. self.__stop_test(out, err)
  593. out.write(header("Cleaning up..."))
  594. out.flush()
  595. self.__finish()
  596. sys.exit(1)
  597. out.close()
  598. err.close()
  599. return exit_with_code(0)
  600. ############################################################
  601. # End __run_tests
  602. ############################################################
  603. ############################################################
  604. # __stop_test(benchmarker)
  605. # Stops all running tests
  606. ############################################################
  607. def __stop_test(self, out, err):
  608. try:
  609. subprocess.check_call('sudo killall -s 9 -u %s' % self.runner_user, shell=True, stderr=err, stdout=out)
  610. retcode = 0
  611. except Exception:
  612. retcode = 1
  613. return retcode
  614. ############################################################
  615. # End __stop_test
  616. ############################################################
  617. def is_port_bound(self, port):
  618. return self.__is_port_bound(port)
  619. ############################################################
  620. # __is_port_bound
  621. # Check if the requested port is available. If it
  622. # isn't available, then a previous test probably didn't
  623. # shutdown properly.
  624. ############################################################
  625. def __is_port_bound(self, port):
  626. port = int(port)
  627. s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
  628. try:
  629. # Try to bind to all IP addresses, this port
  630. s.bind(("", port))
  631. # If we get here, we were able to bind successfully,
  632. # which means the port is free.
  633. except socket.error:
  634. # If we get an exception, it might be because the port is still bound
  635. # which would be bad, or maybe it is a privileged port (<1024) and we
  636. # are not running as root, or maybe the server is gone, but sockets are
  637. # still in TIME_WAIT (SO_REUSEADDR). To determine which scenario, try to
  638. # connect.
  639. try:
  640. s.connect(("127.0.0.1", port))
  641. # If we get here, we were able to connect to something, which means
  642. # that the port is still bound.
  643. return True
  644. except socket.error:
  645. # An exception means that we couldn't connect, so a server probably
  646. # isn't still running on the port.
  647. pass
  648. finally:
  649. s.close()
  650. return False
  651. ############################################################
  652. # End __is_port_bound
  653. ############################################################
  654. ############################################################
  655. # __parse_results
  656. # Ensures that the system has all necessary software to run
  657. # the tests. This does not include that software for the individual
  658. # test, but covers software such as curl and weighttp that
  659. # are needed.
  660. ############################################################
  661. def __parse_results(self, tests):
  662. # Run the method to get the commmit count of each framework.
  663. self.__count_commits()
  664. # Call the method which counts the sloc for each framework
  665. self.__count_sloc()
  666. # Time to create parsed files
  667. # Aggregate JSON file
  668. with open(os.path.join(self.full_results_directory(), "results.json"), "w") as f:
  669. f.write(json.dumps(self.results, indent=2))
  670. ############################################################
  671. # End __parse_results
  672. ############################################################
  673. #############################################################
  674. # __count_sloc
  675. #############################################################
  676. def __count_sloc(self):
  677. frameworks = gather_frameworks(include=self.test,
  678. exclude=self.exclude, benchmarker=self)
  679. jsonResult = {}
  680. for framework, testlist in frameworks.iteritems():
  681. if not os.path.exists(os.path.join(testlist[0].directory, "source_code")):
  682. logging.warn("Cannot count lines of code for %s - no 'source_code' file", framework)
  683. continue
  684. # Unfortunately the source_code files use lines like
  685. # ./cpoll_cppsp/www/fortune_old instead of
  686. # ./www/fortune_old
  687. # so we have to back our working dir up one level
  688. wd = os.path.dirname(testlist[0].directory)
  689. try:
  690. command = "cloc --list-file=%s/source_code --yaml" % testlist[0].directory
  691. if os.path.exists(os.path.join(testlist[0].directory, "cloc_defs.txt")):
  692. command += " --read-lang-def %s" % os.path.join(testlist[0].directory, "cloc_defs.txt")
  693. logging.info("Using custom cloc definitions for %s", framework)
  694. # Find the last instance of the word 'code' in the yaml output. This should
  695. # be the line count for the sum of all listed files or just the line count
  696. # for the last file in the case where there's only one file listed.
  697. command = command + "| grep code | tail -1 | cut -d: -f 2"
  698. logging.debug("Running \"%s\" (cwd=%s)", command, wd)
  699. lineCount = subprocess.check_output(command, cwd=wd, shell=True)
  700. jsonResult[framework] = int(lineCount)
  701. except subprocess.CalledProcessError:
  702. continue
  703. except ValueError as ve:
  704. logging.warn("Unable to get linecount for %s due to error '%s'", framework, ve)
  705. self.results['rawData']['slocCounts'] = jsonResult
  706. ############################################################
  707. # End __count_sloc
  708. ############################################################
  709. ############################################################
  710. # __count_commits
  711. #
  712. ############################################################
  713. def __count_commits(self):
  714. frameworks = gather_frameworks(include=self.test,
  715. exclude=self.exclude, benchmarker=self)
  716. def count_commit(directory, jsonResult):
  717. command = "git rev-list HEAD -- " + directory + " | sort -u | wc -l"
  718. try:
  719. commitCount = subprocess.check_output(command, shell=True)
  720. jsonResult[framework] = int(commitCount)
  721. except subprocess.CalledProcessError:
  722. pass
  723. # Because git can be slow when run in large batches, this
  724. # calls git up to 4 times in parallel. Normal improvement is ~3-4x
  725. # in my trials, or ~100 seconds down to ~25
  726. # This is safe to parallelize as long as each thread only
  727. # accesses one key in the dictionary
  728. threads = []
  729. jsonResult = {}
  730. t1 = datetime.now()
  731. for framework, testlist in frameworks.iteritems():
  732. directory = testlist[0].directory
  733. t = threading.Thread(target=count_commit, args=(directory,jsonResult))
  734. t.start()
  735. threads.append(t)
  736. # Git has internal locks, full parallel will just cause contention
  737. # and slowness, so we rate-limit a bit
  738. if len(threads) >= 4:
  739. threads[0].join()
  740. threads.remove(threads[0])
  741. # Wait for remaining threads
  742. for t in threads:
  743. t.join()
  744. t2 = datetime.now()
  745. # print "Took %s seconds " % (t2 - t1).seconds
  746. self.results['rawData']['commitCounts'] = jsonResult
  747. self.commits = jsonResult
  748. ############################################################
  749. # End __count_commits
  750. ############################################################
  751. ############################################################
  752. # __write_intermediate_results
  753. ############################################################
  754. def __write_intermediate_results(self,test_name,status_message):
  755. try:
  756. self.results["completed"][test_name] = status_message
  757. with open(os.path.join(self.latest_results_directory, 'results.json'), 'w') as f:
  758. f.write(json.dumps(self.results, indent=2))
  759. except (IOError):
  760. logging.error("Error writing results.json")
  761. ############################################################
  762. # End __write_intermediate_results
  763. ############################################################
  764. def __load_results(self):
  765. try:
  766. with open(os.path.join(self.latest_results_directory, 'results.json')) as f:
  767. self.results = json.load(f)
  768. except (ValueError, IOError):
  769. pass
  770. ############################################################
  771. # __finish
  772. ############################################################
  773. def __finish(self):
  774. if not self.list_tests and not self.list_test_metadata and not self.parse:
  775. tests = self.__gather_tests
  776. # Normally you don't have to use Fore.BLUE before each line, but
  777. # Travis-CI seems to reset color codes on newline (see travis-ci/travis-ci#2692)
  778. # or stream flush, so we have to ensure that the color code is printed repeatedly
  779. prefix = Fore.CYAN
  780. for line in header("Verification Summary", top='=', bottom='').split('\n'):
  781. print prefix + line
  782. for test in tests:
  783. print prefix + "| Test: %s" % test.name
  784. if test.name in self.results['verify'].keys():
  785. for test_type, result in self.results['verify'][test.name].iteritems():
  786. if result.upper() == "PASS":
  787. color = Fore.GREEN
  788. elif result.upper() == "WARN":
  789. color = Fore.YELLOW
  790. else:
  791. color = Fore.RED
  792. print prefix + "| " + test_type.ljust(11) + ' : ' + color + result.upper()
  793. else:
  794. print prefix + "| " + Fore.RED + "NO RESULTS (Did framework launch?)"
  795. print prefix + header('', top='', bottom='=') + Style.RESET_ALL
  796. print "Time to complete: " + str(int(time.time() - self.start_time)) + " seconds"
  797. print "Results are saved in " + os.path.join(self.result_directory, self.timestamp)
  798. ############################################################
  799. # End __finish
  800. ############################################################
  801. ##########################################################################################
  802. # Constructor
  803. ##########################################################################################
  804. ############################################################
  805. # Initialize the benchmarker. The args are the arguments
  806. # parsed via argparser.
  807. ############################################################
  808. def __init__(self, args):
  809. # Map type strings to their objects
  810. types = dict()
  811. types['json'] = JsonTestType()
  812. types['db'] = DBTestType()
  813. types['query'] = QueryTestType()
  814. types['fortune'] = FortuneTestType()
  815. types['update'] = UpdateTestType()
  816. types['plaintext'] = PlaintextTestType()
  817. # Turn type into a map instead of a string
  818. if args['type'] == 'all':
  819. args['types'] = types
  820. else:
  821. args['types'] = { args['type'] : types[args['type']] }
  822. del args['type']
  823. args['max_threads'] = args['threads']
  824. args['max_concurrency'] = max(args['concurrency_levels'])
  825. self.__dict__.update(args)
  826. # pprint(self.__dict__)
  827. self.start_time = time.time()
  828. self.run_test_timeout_seconds = 3600
  829. # setup logging
  830. logging.basicConfig(stream=sys.stderr, level=logging.INFO)
  831. # setup some additional variables
  832. if self.database_user == None: self.database_user = self.client_user
  833. if self.database_host == None: self.database_host = self.client_host
  834. if self.database_identity_file == None: self.database_identity_file = self.client_identity_file
  835. # Remember root directory
  836. self.fwroot = setup_util.get_fwroot()
  837. # setup results and latest_results directories
  838. self.result_directory = os.path.join("results", self.name)
  839. self.latest_results_directory = self.latest_results_directory()
  840. if hasattr(self, 'parse') and self.parse != None:
  841. self.timestamp = self.parse
  842. else:
  843. self.timestamp = time.strftime("%Y%m%d%H%M%S", time.localtime())
  844. self.results = None
  845. try:
  846. with open(os.path.join(self.latest_results_directory, 'results.json'), 'r') as f:
  847. #Load json file into results object
  848. self.results = json.load(f)
  849. except IOError:
  850. logging.warn("results.json for test %s not found.",self.name)
  851. if self.results == None:
  852. self.results = dict()
  853. self.results['name'] = self.name
  854. self.results['concurrencyLevels'] = self.concurrency_levels
  855. self.results['queryIntervals'] = self.query_levels
  856. self.results['frameworks'] = [t.name for t in self.__gather_tests]
  857. self.results['duration'] = self.duration
  858. self.results['rawData'] = dict()
  859. self.results['rawData']['json'] = dict()
  860. self.results['rawData']['db'] = dict()
  861. self.results['rawData']['query'] = dict()
  862. self.results['rawData']['fortune'] = dict()
  863. self.results['rawData']['update'] = dict()
  864. self.results['rawData']['plaintext'] = dict()
  865. self.results['completed'] = dict()
  866. self.results['succeeded'] = dict()
  867. self.results['succeeded']['json'] = []
  868. self.results['succeeded']['db'] = []
  869. self.results['succeeded']['query'] = []
  870. self.results['succeeded']['fortune'] = []
  871. self.results['succeeded']['update'] = []
  872. self.results['succeeded']['plaintext'] = []
  873. self.results['failed'] = dict()
  874. self.results['failed']['json'] = []
  875. self.results['failed']['db'] = []
  876. self.results['failed']['query'] = []
  877. self.results['failed']['fortune'] = []
  878. self.results['failed']['update'] = []
  879. self.results['failed']['plaintext'] = []
  880. self.results['verify'] = dict()
  881. else:
  882. #for x in self.__gather_tests():
  883. # if x.name not in self.results['frameworks']:
  884. # self.results['frameworks'] = self.results['frameworks'] + [x.name]
  885. # Always overwrite framework list
  886. self.results['frameworks'] = [t.name for t in self.__gather_tests]
  887. # Setup the ssh command string
  888. self.database_ssh_string = "ssh -T -o StrictHostKeyChecking=no " + self.database_user + "@" + self.database_host
  889. self.client_ssh_string = "ssh -T -o StrictHostKeyChecking=no " + self.client_user + "@" + self.client_host
  890. if self.database_identity_file != None:
  891. self.database_ssh_string = self.database_ssh_string + " -i " + self.database_identity_file
  892. if self.client_identity_file != None:
  893. self.client_ssh_string = self.client_ssh_string + " -i " + self.client_identity_file
  894. if self.install is not None:
  895. install = Installer(self, self.install_strategy)
  896. install.install_software()
  897. ############################################################
  898. # End __init__
  899. ############################################################