benchmarker.py 42 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054
  1. from setup.linux.installer import Installer
  2. from setup.linux import setup_util
  3. from benchmark import framework_test
  4. from benchmark.test_types import *
  5. from utils import header
  6. from utils import gather_tests
  7. from utils import gather_frameworks
  8. from utils import verify_database_connections
  9. import os
  10. import shutil
  11. import stat
  12. import json
  13. import subprocess
  14. import traceback
  15. import time
  16. import pprint
  17. import csv
  18. import sys
  19. import logging
  20. import socket
  21. import threading
  22. import textwrap
  23. from pprint import pprint
  24. from multiprocessing import Process
  25. from datetime import datetime
  26. # Cross-platform colored text
  27. from colorama import Fore, Back, Style
  28. # Text-based progress indicators
  29. import progressbar
  30. class Benchmarker:
  31. ##########################################################################################
  32. # Public methods
  33. ##########################################################################################
  34. ############################################################
  35. # Prints all the available tests
  36. ############################################################
  37. def run_list_tests(self):
  38. all_tests = self.__gather_tests
  39. for test in all_tests:
  40. print test.name
  41. self.__finish()
  42. ############################################################
  43. # End run_list_tests
  44. ############################################################
  45. ############################################################
  46. # Prints the metadata for all the available tests
  47. ############################################################
  48. def run_list_test_metadata(self):
  49. all_tests = self.__gather_tests
  50. all_tests_json = json.dumps(map(lambda test: {
  51. "name": test.name,
  52. "approach": test.approach,
  53. "classification": test.classification,
  54. "database": test.database,
  55. "framework": test.framework,
  56. "language": test.language,
  57. "orm": test.orm,
  58. "platform": test.platform,
  59. "webserver": test.webserver,
  60. "os": test.os,
  61. "database_os": test.database_os,
  62. "display_name": test.display_name,
  63. "notes": test.notes,
  64. "versus": test.versus
  65. }, all_tests))
  66. with open(os.path.join(self.full_results_directory(), "test_metadata.json"), "w") as f:
  67. f.write(all_tests_json)
  68. self.__finish()
  69. ############################################################
  70. # End run_list_test_metadata
  71. ############################################################
  72. ############################################################
  73. # parse_timestamp
  74. # Re-parses the raw data for a given timestamp
  75. ############################################################
  76. def parse_timestamp(self):
  77. all_tests = self.__gather_tests
  78. for test in all_tests:
  79. test.parse_all()
  80. self.__parse_results(all_tests)
  81. self.__finish()
  82. ############################################################
  83. # End parse_timestamp
  84. ############################################################
  85. ############################################################
  86. # Run the tests:
  87. # This process involves setting up the client/server machines
  88. # with any necessary change. Then going through each test,
  89. # running their setup script, verifying the URLs, and
  90. # running benchmarks against them.
  91. ############################################################
  92. def run(self):
  93. ##########################
  94. # Get a list of all known
  95. # tests that we can run.
  96. ##########################
  97. all_tests = self.__gather_tests
  98. ##########################
  99. # Setup client/server
  100. ##########################
  101. print header("Preparing Server, Database, and Client ...", top='=', bottom='=')
  102. self.__setup_server()
  103. self.__setup_database()
  104. self.__setup_client()
  105. ## Check if wrk (and wrk-pipeline) is installed and executable, if not, raise an exception
  106. #if not (os.access("/usr/local/bin/wrk", os.X_OK) and os.access("/usr/local/bin/wrk-pipeline", os.X_OK)):
  107. # raise Exception("wrk and/or wrk-pipeline are not properly installed. Not running tests.")
  108. ##########################
  109. # Run tests
  110. ##########################
  111. print header("Running Tests...", top='=', bottom='=')
  112. result = self.__run_tests(all_tests)
  113. ##########################
  114. # Parse results
  115. ##########################
  116. if self.mode == "benchmark":
  117. print header("Parsing Results ...", top='=', bottom='=')
  118. self.__parse_results(all_tests)
  119. self.__finish()
  120. return result
  121. ############################################################
  122. # End run
  123. ############################################################
  124. ############################################################
  125. # database_sftp_string(batch_file)
  126. # generates a fully qualified URL for sftp to database
  127. ############################################################
  128. def database_sftp_string(self, batch_file):
  129. sftp_string = "sftp -oStrictHostKeyChecking=no "
  130. if batch_file != None: sftp_string += " -b " + batch_file + " "
  131. if self.database_identity_file != None:
  132. sftp_string += " -i " + self.database_identity_file + " "
  133. return sftp_string + self.database_user + "@" + self.database_host
  134. ############################################################
  135. # End database_sftp_string
  136. ############################################################
  137. ############################################################
  138. # client_sftp_string(batch_file)
  139. # generates a fully qualified URL for sftp to client
  140. ############################################################
  141. def client_sftp_string(self, batch_file):
  142. sftp_string = "sftp -oStrictHostKeyChecking=no "
  143. if batch_file != None: sftp_string += " -b " + batch_file + " "
  144. if self.client_identity_file != None:
  145. sftp_string += " -i " + self.client_identity_file + " "
  146. return sftp_string + self.client_user + "@" + self.client_host
  147. ############################################################
  148. # End client_sftp_string
  149. ############################################################
  150. ############################################################
  151. # generate_url(url, port)
  152. # generates a fully qualified URL for accessing a test url
  153. ############################################################
  154. def generate_url(self, url, port):
  155. return self.server_host + ":" + str(port) + url
  156. ############################################################
  157. # End generate_url
  158. ############################################################
  159. ############################################################
  160. # get_output_file(test_name, test_type)
  161. # returns the output file name for this test_name and
  162. # test_type timestamp/test_type/test_name/raw
  163. ############################################################
  164. def get_output_file(self, test_name, test_type):
  165. return os.path.join(self.result_directory, self.timestamp, test_type, test_name, "raw")
  166. ############################################################
  167. # End get_output_file
  168. ############################################################
  169. ############################################################
  170. # output_file(test_name, test_type)
  171. # returns the output file for this test_name and test_type
  172. # timestamp/test_type/test_name/raw
  173. ############################################################
  174. def output_file(self, test_name, test_type):
  175. path = self.get_output_file(test_name, test_type)
  176. try:
  177. os.makedirs(os.path.dirname(path))
  178. except OSError:
  179. pass
  180. return path
  181. ############################################################
  182. # End output_file
  183. ############################################################
  184. ############################################################
  185. # get_stats_file(test_name, test_type)
  186. # returns the stats file name for this test_name and
  187. # test_type timestamp/test_type/test_name/raw
  188. ############################################################
  189. def get_stats_file(self, test_name, test_type):
  190. return os.path.join(self.result_directory, self.timestamp, test_type, test_name, "stats")
  191. ############################################################
  192. # End get_stats_file
  193. ############################################################
  194. ############################################################
  195. # stats_file(test_name, test_type)
  196. # returns the stats file for this test_name and test_type
  197. # timestamp/test_type/test_name/raw
  198. ############################################################
  199. def stats_file(self, test_name, test_type):
  200. path = self.get_stats_file(test_name, test_type)
  201. try:
  202. os.makedirs(os.path.dirname(path))
  203. except OSError:
  204. pass
  205. return path
  206. ############################################################
  207. # End stats_file
  208. ############################################################
  209. ############################################################
  210. # full_results_directory
  211. ############################################################
  212. def full_results_directory(self):
  213. path = os.path.join(self.result_directory, self.timestamp)
  214. try:
  215. os.makedirs(path)
  216. except OSError:
  217. pass
  218. return path
  219. ############################################################
  220. # End full_results_directory
  221. ############################################################
  222. ############################################################
  223. # Latest intermediate results dirctory
  224. ############################################################
  225. def latest_results_directory(self):
  226. path = os.path.join(self.result_directory,"latest")
  227. try:
  228. os.makedirs(path)
  229. except OSError:
  230. pass
  231. # Give testrunner permission to write into results directory
  232. # so LOGDIR param always works in setup.sh
  233. # While 775 is more preferrable, we would have to ensure that
  234. # testrunner is in the group of the current user
  235. if not self.os.lower() == 'windows':
  236. mode777 = (stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR |
  237. stat.S_IRGRP | stat.S_IWGRP | stat.S_IXGRP |
  238. stat.S_IROTH | stat.S_IWOTH | stat.S_IXOTH)
  239. os.chmod(path, mode777)
  240. return path
  241. ############################################################
  242. # report_verify_results
  243. # Used by FrameworkTest to add verification details to our results
  244. #
  245. # TODO: Technically this is an IPC violation - we are accessing
  246. # the parent process' memory from the child process
  247. ############################################################
  248. def report_verify_results(self, framework, test, result):
  249. if framework.name not in self.results['verify'].keys():
  250. self.results['verify'][framework.name] = dict()
  251. self.results['verify'][framework.name][test] = result
  252. ############################################################
  253. # report_benchmark_results
  254. # Used by FrameworkTest to add benchmark data to this
  255. #
  256. # TODO: Technically this is an IPC violation - we are accessing
  257. # the parent process' memory from the child process
  258. ############################################################
  259. def report_benchmark_results(self, framework, test, results):
  260. if test not in self.results['rawData'].keys():
  261. self.results['rawData'][test] = dict()
  262. # If results has a size from the parse, then it succeeded.
  263. if results:
  264. self.results['rawData'][test][framework.name] = results
  265. # This may already be set for single-tests
  266. if framework.name not in self.results['succeeded'][test]:
  267. self.results['succeeded'][test].append(framework.name)
  268. else:
  269. # This may already be set for single-tests
  270. if framework.name not in self.results['failed'][test]:
  271. self.results['failed'][test].append(framework.name)
  272. ############################################################
  273. # End report_results
  274. ############################################################
  275. ##########################################################################################
  276. # Private methods
  277. ##########################################################################################
  278. ############################################################
  279. # Gathers all the tests
  280. ############################################################
  281. @property
  282. def __gather_tests(self):
  283. tests = gather_tests(include=self.test,
  284. exclude=self.exclude,
  285. benchmarker=self)
  286. # If the tests have been interrupted somehow, then we want to resume them where we left
  287. # off, rather than starting from the beginning
  288. if os.path.isfile('current_benchmark.txt'):
  289. with open('current_benchmark.txt', 'r') as interrupted_benchmark:
  290. interrupt_bench = interrupted_benchmark.read().strip()
  291. for index, atest in enumerate(tests):
  292. if atest.name == interrupt_bench:
  293. tests = tests[index:]
  294. break
  295. return tests
  296. ############################################################
  297. # End __gather_tests
  298. ############################################################
  299. ############################################################
  300. # Makes any necessary changes to the server that should be
  301. # made before running the tests. This involves setting kernal
  302. # settings to allow for more connections, or more file
  303. # descriptiors
  304. #
  305. # http://redmine.lighttpd.net/projects/weighttp/wiki#Troubleshooting
  306. ############################################################
  307. def __setup_server(self):
  308. try:
  309. if os.name == 'nt':
  310. return True
  311. #subprocess.check_call(["sudo","bash","-c","cd /sys/devices/system/cpu; ls -d cpu[0-9]*|while read x; do echo performance > $x/cpufreq/scaling_governor; done"])
  312. subprocess.check_call("sudo sysctl -w net.ipv4.tcp_max_syn_backlog=65535".rsplit(" "))
  313. subprocess.check_call("sudo sysctl -w net.core.somaxconn=65535".rsplit(" "))
  314. subprocess.check_call("sudo -s ulimit -n 65535".rsplit(" "))
  315. subprocess.check_call("sudo sysctl net.ipv4.tcp_tw_reuse=1".rsplit(" "))
  316. subprocess.check_call("sudo sysctl net.ipv4.tcp_tw_recycle=1".rsplit(" "))
  317. subprocess.check_call("sudo sysctl -w kernel.shmmax=134217728".rsplit(" "))
  318. subprocess.check_call("sudo sysctl -w kernel.shmall=2097152".rsplit(" "))
  319. except subprocess.CalledProcessError:
  320. return False
  321. ############################################################
  322. # End __setup_server
  323. ############################################################
  324. ############################################################
  325. # Clean up any processes that run with root privileges
  326. ############################################################
  327. def __cleanup_leftover_processes_before_test(self):
  328. p = subprocess.Popen(self.database_ssh_string, stdin=subprocess.PIPE, shell=True)
  329. p.communicate("""
  330. sudo /etc/init.d/apache2 stop
  331. """)
  332. ############################################################
  333. # Makes any necessary changes to the database machine that
  334. # should be made before running the tests. Is very similar
  335. # to the server setup, but may also include database specific
  336. # changes.
  337. ############################################################
  338. def __setup_database(self):
  339. p = subprocess.Popen(self.database_ssh_string, stdin=subprocess.PIPE, shell=True)
  340. p.communicate("""
  341. sudo sysctl -w net.ipv4.tcp_max_syn_backlog=65535
  342. sudo sysctl -w net.core.somaxconn=65535
  343. sudo sysctl -w kernel.sched_autogroup_enabled=0
  344. sudo -s ulimit -n 65535
  345. sudo sysctl net.ipv4.tcp_tw_reuse=1
  346. sudo sysctl net.ipv4.tcp_tw_recycle=1
  347. sudo sysctl -w kernel.shmmax=2147483648
  348. sudo sysctl -w kernel.shmall=2097152
  349. sudo sysctl -w kernel.sem="250 32000 256 512"
  350. """)
  351. # TODO - print kernel configuration to file
  352. # echo "Printing kernel configuration:" && sudo sysctl -a
  353. # Explanations:
  354. # net.ipv4.tcp_max_syn_backlog, net.core.somaxconn, kernel.sched_autogroup_enabled: http://tweaked.io/guide/kernel/
  355. # ulimit -n: http://www.cyberciti.biz/faq/linux-increase-the-maximum-number-of-open-files/
  356. # net.ipv4.tcp_tw_*: http://www.linuxbrigade.com/reduce-time_wait-socket-connections/
  357. # kernel.shm*: http://seriousbirder.com/blogs/linux-understanding-shmmax-and-shmall-settings/
  358. # For kernel.sem: https://access.redhat.com/documentation/en-US/Red_Hat_Enterprise_Linux/5/html/Tuning_and_Optimizing_Red_Hat_Enterprise_Linux_for_Oracle_9i_and_10g_Databases/chap-Oracle_9i_and_10g_Tuning_Guide-Setting_Semaphores.html
  359. ############################################################
  360. # End __setup_database
  361. ############################################################
  362. ############################################################
  363. # Makes any necessary changes to the client machine that
  364. # should be made before running the tests. Is very similar
  365. # to the server setup, but may also include client specific
  366. # changes.
  367. ############################################################
  368. def __setup_client(self):
  369. p = subprocess.Popen(self.client_ssh_string, stdin=subprocess.PIPE, shell=True)
  370. p.communicate("""
  371. sudo sysctl -w net.ipv4.tcp_max_syn_backlog=65535
  372. sudo sysctl -w net.core.somaxconn=65535
  373. sudo -s ulimit -n 65535
  374. sudo sysctl net.ipv4.tcp_tw_reuse=1
  375. sudo sysctl net.ipv4.tcp_tw_recycle=1
  376. sudo sysctl -w kernel.shmmax=2147483648
  377. sudo sysctl -w kernel.shmall=2097152
  378. """)
  379. ############################################################
  380. # End __setup_client
  381. ############################################################
  382. ############################################################
  383. # __run_tests
  384. #
  385. # 2013-10-02 ASB Calls each test passed in tests to
  386. # __run_test in a separate process. Each
  387. # test is given a set amount of time and if
  388. # kills the child process (and subsequently
  389. # all of its child processes). Uses
  390. # multiprocessing module.
  391. ############################################################
  392. def __run_tests(self, tests):
  393. if len(tests) == 0:
  394. return 0
  395. logging.debug("Start __run_tests.")
  396. logging.debug("__name__ = %s",__name__)
  397. error_happened = False
  398. if self.os.lower() == 'windows':
  399. logging.debug("Executing __run_tests on Windows")
  400. for test in tests:
  401. with open('current_benchmark.txt', 'w') as benchmark_resume_file:
  402. benchmark_resume_file.write(test.name)
  403. if self.__run_test(test) != 0:
  404. error_happened = True
  405. else:
  406. logging.debug("Executing __run_tests on Linux")
  407. # Setup a nice progressbar and ETA indicator
  408. widgets = [self.mode, ': ', progressbar.Percentage(),
  409. ' ', progressbar.Bar(),
  410. ' Rough ', progressbar.ETA()]
  411. pbar = progressbar.ProgressBar(widgets=widgets, maxval=len(tests)).start()
  412. pbar_test = 0
  413. # These features do not work on Windows
  414. for test in tests:
  415. pbar.update(pbar_test)
  416. pbar_test = pbar_test + 1
  417. if __name__ == 'benchmark.benchmarker':
  418. print header("Running Test: %s" % test.name)
  419. with open('current_benchmark.txt', 'w') as benchmark_resume_file:
  420. benchmark_resume_file.write(test.name)
  421. test_process = Process(target=self.__run_test, name="Test Runner (%s)" % test.name, args=(test,))
  422. test_process.start()
  423. test_process.join(self.run_test_timeout_seconds)
  424. self.__load_results() # Load intermediate result from child process
  425. if(test_process.is_alive()):
  426. logging.debug("Child process for {name} is still alive. Terminating.".format(name=test.name))
  427. self.__write_intermediate_results(test.name,"__run_test timeout (="+ str(self.run_test_timeout_seconds) + " seconds)")
  428. test_process.terminate()
  429. test_process.join()
  430. if test_process.exitcode != 0:
  431. error_happened = True
  432. pbar.finish()
  433. if os.path.isfile('current_benchmark.txt'):
  434. os.remove('current_benchmark.txt')
  435. logging.debug("End __run_tests.")
  436. if error_happened:
  437. return 1
  438. return 0
  439. ############################################################
  440. # End __run_tests
  441. ############################################################
  442. ############################################################
  443. # __run_test
  444. # 2013-10-02 ASB Previously __run_tests. This code now only
  445. # processes a single test.
  446. #
  447. # Ensures that the system has all necessary software to run
  448. # the tests. This does not include that software for the individual
  449. # test, but covers software such as curl and weighttp that
  450. # are needed.
  451. ############################################################
  452. def __run_test(self, test):
  453. # Used to capture return values
  454. def exit_with_code(code):
  455. if self.os.lower() == 'windows':
  456. return code
  457. else:
  458. sys.exit(code)
  459. logDir = os.path.join(self.latest_results_directory, 'logs', test.name.lower())
  460. try:
  461. os.makedirs(logDir)
  462. except Exception:
  463. pass
  464. with open(os.path.join(logDir, 'out.txt'), 'w') as out:
  465. if test.os.lower() != self.os.lower() or test.database_os.lower() != self.database_os.lower():
  466. out.write("OS or Database OS specified in benchmark_config.json does not match the current environment. Skipping.\n")
  467. return exit_with_code(0)
  468. # If the test is in the excludes list, we skip it
  469. if self.exclude != None and test.name in self.exclude:
  470. out.write("Test {name} has been added to the excludes list. Skipping.\n".format(name=test.name))
  471. return exit_with_code(0)
  472. out.write("test.os.lower() = {os} test.database_os.lower() = {dbos}\n".format(os=test.os.lower(),dbos=test.database_os.lower()))
  473. out.write("self.results['frameworks'] != None: {val}\n".format(val=str(self.results['frameworks'] != None)))
  474. out.write("test.name: {name}\n".format(name=str(test.name)))
  475. out.write("self.results['completed']: {completed}\n".format(completed=str(self.results['completed'])))
  476. if self.results['frameworks'] != None and test.name in self.results['completed']:
  477. out.write('Framework {name} found in latest saved data. Skipping.\n'.format(name=str(test.name)))
  478. print 'WARNING: Test {test} exists in the results directory; this must be removed before running a new test.\n'.format(test=str(test.name))
  479. return exit_with_code(1)
  480. out.flush()
  481. out.write(header("Beginning %s" % test.name, top='='))
  482. out.flush()
  483. ##########################
  484. # Start this test
  485. ##########################
  486. out.write(header("Starting %s" % test.name))
  487. out.flush()
  488. try:
  489. if test.requires_database():
  490. p = subprocess.Popen(self.database_ssh_string, stdin=subprocess.PIPE, stdout=out, stderr=out, shell=True)
  491. p.communicate("""
  492. sudo restart mysql
  493. sudo restart mongod
  494. sudo service redis-server restart
  495. sudo service postgresql restart
  496. sudo service cassandra restart
  497. /opt/elasticsearch/elasticsearch restart
  498. """)
  499. time.sleep(10)
  500. st = verify_database_connections([
  501. ("mysql", self.database_host, 3306),
  502. ("mongodb", self.database_host, 27017),
  503. ("redis", self.database_host, 6379),
  504. ("postgresql", self.database_host, 5432),
  505. ("cassandra", self.database_host, 9160),
  506. ("elasticsearch", self.database_host, 9200)
  507. ])
  508. print "database connection test results:\n" + "\n".join(st[1])
  509. self.__cleanup_leftover_processes_before_test();
  510. if self.__is_port_bound(test.port):
  511. # This can happen sometimes - let's try again
  512. self.__stop_test(out)
  513. out.flush()
  514. time.sleep(15)
  515. if self.__is_port_bound(test.port):
  516. # We gave it our all
  517. self.__write_intermediate_results(test.name, "port " + str(test.port) + " is not available before start")
  518. out.write(header("Error: Port %s is not available, cannot start %s" % (test.port, test.name)))
  519. out.flush()
  520. print "Error: Unable to recover port, cannot start test"
  521. return exit_with_code(1)
  522. result = test.start(out)
  523. if result != 0:
  524. self.__stop_test(out)
  525. time.sleep(5)
  526. out.write( "ERROR: Problem starting {name}\n".format(name=test.name) )
  527. out.flush()
  528. self.__write_intermediate_results(test.name,"<setup.py>#start() returned non-zero")
  529. return exit_with_code(1)
  530. logging.info("Sleeping %s seconds to ensure framework is ready" % self.sleep)
  531. time.sleep(self.sleep)
  532. ##########################
  533. # Verify URLs
  534. ##########################
  535. logging.info("Verifying framework URLs")
  536. verificationPath = os.path.join(logDir,"verification")
  537. try:
  538. os.makedirs(verificationPath)
  539. except OSError:
  540. pass
  541. passed_verify = test.verify_urls(verificationPath)
  542. ##########################
  543. # Benchmark this test
  544. ##########################
  545. if self.mode == "benchmark":
  546. logging.info("Benchmarking")
  547. out.write(header("Benchmarking %s" % test.name))
  548. out.flush()
  549. benchmarkPath = os.path.join(logDir,"benchmark")
  550. try:
  551. os.makedirs(benchmarkPath)
  552. except OSError:
  553. pass
  554. test.benchmark(benchmarkPath)
  555. ##########################
  556. # Stop this test
  557. ##########################
  558. out.write(header("Stopping %s" % test.name))
  559. out.flush()
  560. self.__stop_test(out)
  561. out.flush()
  562. time.sleep(15)
  563. if self.__is_port_bound(test.port):
  564. # This can happen sometimes - let's try again
  565. self.__stop_test(out)
  566. out.flush()
  567. time.sleep(15)
  568. if self.__is_port_bound(test.port):
  569. # We gave it our all
  570. self.__write_intermediate_results(test.name, "port " + str(test.port) + " was not released by stop")
  571. out.write(header("Error: Port %s was not released by stop %s" % (test.port, test.name)))
  572. out.flush()
  573. return exit_with_code(1)
  574. out.write(header("Stopped %s" % test.name))
  575. out.flush()
  576. time.sleep(5)
  577. ##########################################################
  578. # Save results thus far into the latest results directory
  579. ##########################################################
  580. out.write(header("Saving results through %s" % test.name))
  581. out.flush()
  582. self.__write_intermediate_results(test.name,time.strftime("%Y%m%d%H%M%S", time.localtime()))
  583. if self.mode == "verify" and not passed_verify:
  584. print "Failed verify!"
  585. return exit_with_code(1)
  586. except (OSError, IOError, subprocess.CalledProcessError) as e:
  587. self.__write_intermediate_results(test.name,"<setup.py> raised an exception")
  588. out.write(header("Subprocess Error %s" % test.name))
  589. traceback.print_exc(file=out)
  590. out.flush()
  591. try:
  592. self.__stop_test(out)
  593. except (subprocess.CalledProcessError) as e:
  594. self.__write_intermediate_results(test.name,"<setup.py>#stop() raised an error")
  595. out.write(header("Subprocess Error: Test .stop() raised exception %s" % test.name))
  596. traceback.print_exc(file=out)
  597. out.flush()
  598. out.close()
  599. return exit_with_code(1)
  600. # TODO - subprocess should not catch this exception!
  601. # Parent process should catch it and cleanup/exit
  602. except (KeyboardInterrupt) as e:
  603. self.__stop_test(out)
  604. out.write(header("Cleaning up..."))
  605. out.flush()
  606. self.__finish()
  607. sys.exit(1)
  608. out.close()
  609. return exit_with_code(0)
  610. ############################################################
  611. # End __run_tests
  612. ############################################################
  613. ############################################################
  614. # __stop_test(benchmarker)
  615. # Stops all running tests
  616. ############################################################
  617. def __stop_test(self, out):
  618. try:
  619. subprocess.check_call('sudo killall -s 9 -u %s' % self.runner_user, shell=True, stderr=out, stdout=out)
  620. retcode = 0
  621. except Exception:
  622. retcode = 1
  623. return retcode
  624. ############################################################
  625. # End __stop_test
  626. ############################################################
  627. def is_port_bound(self, port):
  628. return self.__is_port_bound(port)
  629. ############################################################
  630. # __is_port_bound
  631. # Check if the requested port is available. If it
  632. # isn't available, then a previous test probably didn't
  633. # shutdown properly.
  634. ############################################################
  635. def __is_port_bound(self, port):
  636. port = int(port)
  637. s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
  638. try:
  639. # Try to bind to all IP addresses, this port
  640. s.bind(("", port))
  641. # If we get here, we were able to bind successfully,
  642. # which means the port is free.
  643. except socket.error:
  644. # If we get an exception, it might be because the port is still bound
  645. # which would be bad, or maybe it is a privileged port (<1024) and we
  646. # are not running as root, or maybe the server is gone, but sockets are
  647. # still in TIME_WAIT (SO_REUSEADDR). To determine which scenario, try to
  648. # connect.
  649. try:
  650. s.connect(("127.0.0.1", port))
  651. # If we get here, we were able to connect to something, which means
  652. # that the port is still bound.
  653. return True
  654. except socket.error:
  655. # An exception means that we couldn't connect, so a server probably
  656. # isn't still running on the port.
  657. pass
  658. finally:
  659. s.close()
  660. return False
  661. ############################################################
  662. # End __is_port_bound
  663. ############################################################
  664. ############################################################
  665. # __parse_results
  666. # Ensures that the system has all necessary software to run
  667. # the tests. This does not include that software for the individual
  668. # test, but covers software such as curl and weighttp that
  669. # are needed.
  670. ############################################################
  671. def __parse_results(self, tests):
  672. # Run the method to get the commmit count of each framework.
  673. self.__count_commits()
  674. # Call the method which counts the sloc for each framework
  675. self.__count_sloc()
  676. # Time to create parsed files
  677. # Aggregate JSON file
  678. with open(os.path.join(self.full_results_directory(), "results.json"), "w") as f:
  679. f.write(json.dumps(self.results, indent=2))
  680. with open(os.path.join(self.latest_results_directory, "results.json"), "w") as latest:
  681. latest.write(json.dumps(self.results, indent=2))
  682. ############################################################
  683. # End __parse_results
  684. ############################################################
  685. #############################################################
  686. # __count_sloc
  687. #############################################################
  688. def __count_sloc(self):
  689. frameworks = gather_frameworks(include=self.test,
  690. exclude=self.exclude, benchmarker=self)
  691. jsonResult = {}
  692. for framework, testlist in frameworks.iteritems():
  693. if not os.path.exists(os.path.join(testlist[0].directory, "source_code")):
  694. logging.warn("Cannot count lines of code for %s - no 'source_code' file", framework)
  695. continue
  696. # Unfortunately the source_code files use lines like
  697. # ./cpoll_cppsp/www/fortune_old instead of
  698. # ./www/fortune_old
  699. # so we have to back our working dir up one level
  700. wd = os.path.dirname(testlist[0].directory)
  701. try:
  702. command = "cloc --list-file=%s/source_code --yaml" % testlist[0].directory
  703. if os.path.exists(os.path.join(testlist[0].directory, "cloc_defs.txt")):
  704. command += " --read-lang-def %s" % os.path.join(testlist[0].directory, "cloc_defs.txt")
  705. logging.info("Using custom cloc definitions for %s", framework)
  706. # Find the last instance of the word 'code' in the yaml output. This should
  707. # be the line count for the sum of all listed files or just the line count
  708. # for the last file in the case where there's only one file listed.
  709. command = command + "| grep code | tail -1 | cut -d: -f 2"
  710. logging.debug("Running \"%s\" (cwd=%s)", command, wd)
  711. lineCount = subprocess.check_output(command, cwd=wd, shell=True)
  712. jsonResult[framework] = int(lineCount)
  713. except subprocess.CalledProcessError:
  714. continue
  715. except ValueError as ve:
  716. logging.warn("Unable to get linecount for %s due to error '%s'", framework, ve)
  717. self.results['rawData']['slocCounts'] = jsonResult
  718. ############################################################
  719. # End __count_sloc
  720. ############################################################
  721. ############################################################
  722. # __count_commits
  723. #
  724. ############################################################
  725. def __count_commits(self):
  726. frameworks = gather_frameworks(include=self.test,
  727. exclude=self.exclude, benchmarker=self)
  728. def count_commit(directory, jsonResult):
  729. command = "git rev-list HEAD -- " + directory + " | sort -u | wc -l"
  730. try:
  731. commitCount = subprocess.check_output(command, shell=True)
  732. jsonResult[framework] = int(commitCount)
  733. except subprocess.CalledProcessError:
  734. pass
  735. # Because git can be slow when run in large batches, this
  736. # calls git up to 4 times in parallel. Normal improvement is ~3-4x
  737. # in my trials, or ~100 seconds down to ~25
  738. # This is safe to parallelize as long as each thread only
  739. # accesses one key in the dictionary
  740. threads = []
  741. jsonResult = {}
  742. t1 = datetime.now()
  743. for framework, testlist in frameworks.iteritems():
  744. directory = testlist[0].directory
  745. t = threading.Thread(target=count_commit, args=(directory,jsonResult))
  746. t.start()
  747. threads.append(t)
  748. # Git has internal locks, full parallel will just cause contention
  749. # and slowness, so we rate-limit a bit
  750. if len(threads) >= 4:
  751. threads[0].join()
  752. threads.remove(threads[0])
  753. # Wait for remaining threads
  754. for t in threads:
  755. t.join()
  756. t2 = datetime.now()
  757. # print "Took %s seconds " % (t2 - t1).seconds
  758. self.results['rawData']['commitCounts'] = jsonResult
  759. self.commits = jsonResult
  760. ############################################################
  761. # End __count_commits
  762. ############################################################
  763. ############################################################
  764. # __write_intermediate_results
  765. ############################################################
  766. def __write_intermediate_results(self,test_name,status_message):
  767. try:
  768. self.results["completed"][test_name] = status_message
  769. with open(os.path.join(self.latest_results_directory, 'results.json'), 'w') as f:
  770. f.write(json.dumps(self.results, indent=2))
  771. except (IOError):
  772. logging.error("Error writing results.json")
  773. ############################################################
  774. # End __write_intermediate_results
  775. ############################################################
  776. def __load_results(self):
  777. try:
  778. with open(os.path.join(self.latest_results_directory, 'results.json')) as f:
  779. self.results = json.load(f)
  780. except (ValueError, IOError):
  781. pass
  782. ############################################################
  783. # __finish
  784. ############################################################
  785. def __finish(self):
  786. if not self.list_tests and not self.list_test_metadata and not self.parse:
  787. tests = self.__gather_tests
  788. # Normally you don't have to use Fore.BLUE before each line, but
  789. # Travis-CI seems to reset color codes on newline (see travis-ci/travis-ci#2692)
  790. # or stream flush, so we have to ensure that the color code is printed repeatedly
  791. prefix = Fore.CYAN
  792. for line in header("Verification Summary", top='=', bottom='').split('\n'):
  793. print prefix + line
  794. for test in tests:
  795. print prefix + "| Test: %s" % test.name
  796. if test.name in self.results['verify'].keys():
  797. for test_type, result in self.results['verify'][test.name].iteritems():
  798. if result.upper() == "PASS":
  799. color = Fore.GREEN
  800. elif result.upper() == "WARN":
  801. color = Fore.YELLOW
  802. else:
  803. color = Fore.RED
  804. print prefix + "| " + test_type.ljust(11) + ' : ' + color + result.upper()
  805. else:
  806. print prefix + "| " + Fore.RED + "NO RESULTS (Did framework launch?)"
  807. print prefix + header('', top='', bottom='=') + Style.RESET_ALL
  808. print "Time to complete: " + str(int(time.time() - self.start_time)) + " seconds"
  809. print "Results are saved in " + os.path.join(self.result_directory, self.timestamp)
  810. ############################################################
  811. # End __finish
  812. ############################################################
  813. ##########################################################################################
  814. # Constructor
  815. ##########################################################################################
  816. ############################################################
  817. # Initialize the benchmarker. The args are the arguments
  818. # parsed via argparser.
  819. ############################################################
  820. def __init__(self, args):
  821. # Map type strings to their objects
  822. types = dict()
  823. types['json'] = JsonTestType()
  824. types['db'] = DBTestType()
  825. types['query'] = QueryTestType()
  826. types['fortune'] = FortuneTestType()
  827. types['update'] = UpdateTestType()
  828. types['plaintext'] = PlaintextTestType()
  829. # Turn type into a map instead of a string
  830. if args['type'] == 'all':
  831. args['types'] = types
  832. else:
  833. args['types'] = { args['type'] : types[args['type']] }
  834. del args['type']
  835. args['max_threads'] = args['threads']
  836. args['max_concurrency'] = max(args['concurrency_levels'])
  837. self.__dict__.update(args)
  838. # pprint(self.__dict__)
  839. self.start_time = time.time()
  840. self.run_test_timeout_seconds = 7200
  841. # setup logging
  842. logging.basicConfig(stream=sys.stderr, level=logging.INFO)
  843. # setup some additional variables
  844. if self.database_user == None: self.database_user = self.client_user
  845. if self.database_host == None: self.database_host = self.client_host
  846. if self.database_identity_file == None: self.database_identity_file = self.client_identity_file
  847. # Remember root directory
  848. self.fwroot = setup_util.get_fwroot()
  849. # setup results and latest_results directories
  850. self.result_directory = os.path.join("results")
  851. if (args['clean'] or args['clean_all']) and os.path.exists(os.path.join(self.fwroot, "results")):
  852. shutil.rmtree(os.path.join(self.fwroot, "results"))
  853. self.latest_results_directory = self.latest_results_directory()
  854. # remove installs directories if --clean-all provided
  855. self.install_root = "%s/%s" % (self.fwroot, "installs")
  856. if args['clean_all']:
  857. os.system("sudo rm -rf " + self.install_root)
  858. os.mkdir(self.install_root)
  859. if hasattr(self, 'parse') and self.parse != None:
  860. self.timestamp = self.parse
  861. else:
  862. self.timestamp = time.strftime("%Y%m%d%H%M%S", time.localtime())
  863. self.results = None
  864. try:
  865. with open(os.path.join(self.latest_results_directory, 'results.json'), 'r') as f:
  866. #Load json file into results object
  867. self.results = json.load(f)
  868. except IOError:
  869. logging.warn("results.json for test not found.")
  870. if self.results == None:
  871. self.results = dict()
  872. self.results['concurrencyLevels'] = self.concurrency_levels
  873. self.results['queryIntervals'] = self.query_levels
  874. self.results['frameworks'] = [t.name for t in self.__gather_tests]
  875. self.results['duration'] = self.duration
  876. self.results['rawData'] = dict()
  877. self.results['rawData']['json'] = dict()
  878. self.results['rawData']['db'] = dict()
  879. self.results['rawData']['query'] = dict()
  880. self.results['rawData']['fortune'] = dict()
  881. self.results['rawData']['update'] = dict()
  882. self.results['rawData']['plaintext'] = dict()
  883. self.results['completed'] = dict()
  884. self.results['succeeded'] = dict()
  885. self.results['succeeded']['json'] = []
  886. self.results['succeeded']['db'] = []
  887. self.results['succeeded']['query'] = []
  888. self.results['succeeded']['fortune'] = []
  889. self.results['succeeded']['update'] = []
  890. self.results['succeeded']['plaintext'] = []
  891. self.results['failed'] = dict()
  892. self.results['failed']['json'] = []
  893. self.results['failed']['db'] = []
  894. self.results['failed']['query'] = []
  895. self.results['failed']['fortune'] = []
  896. self.results['failed']['update'] = []
  897. self.results['failed']['plaintext'] = []
  898. self.results['verify'] = dict()
  899. else:
  900. #for x in self.__gather_tests():
  901. # if x.name not in self.results['frameworks']:
  902. # self.results['frameworks'] = self.results['frameworks'] + [x.name]
  903. # Always overwrite framework list
  904. self.results['frameworks'] = [t.name for t in self.__gather_tests]
  905. # Setup the ssh command string
  906. self.database_ssh_string = "ssh -T -o StrictHostKeyChecking=no " + self.database_user + "@" + self.database_host
  907. self.client_ssh_string = "ssh -T -o StrictHostKeyChecking=no " + self.client_user + "@" + self.client_host
  908. if self.database_identity_file != None:
  909. self.database_ssh_string = self.database_ssh_string + " -i " + self.database_identity_file
  910. if self.client_identity_file != None:
  911. self.client_ssh_string = self.client_ssh_string + " -i " + self.client_identity_file
  912. if self.install is not None:
  913. install = Installer(self, self.install_strategy)
  914. install.install_software()
  915. ############################################################
  916. # End __init__
  917. ############################################################