benchmarker.py 42 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057
  1. from setup.linux.installer import Installer
  2. from setup.linux import setup_util
  3. from benchmark import framework_test
  4. from benchmark.test_types import *
  5. from utils import header
  6. from utils import gather_tests
  7. from utils import gather_frameworks
  8. import os
  9. import json
  10. import subprocess
  11. import traceback
  12. import time
  13. import pprint
  14. import csv
  15. import sys
  16. import logging
  17. import socket
  18. import threading
  19. import textwrap
  20. from pprint import pprint
  21. from multiprocessing import Process
  22. from datetime import datetime
  23. # Cross-platform colored text
  24. from colorama import Fore, Back, Style
  25. # Text-based progress indicators
  26. import progressbar
  27. class Benchmarker:
  28. ##########################################################################################
  29. # Public methods
  30. ##########################################################################################
  31. ############################################################
  32. # Prints all the available tests
  33. ############################################################
  34. def run_list_tests(self):
  35. all_tests = self.__gather_tests
  36. for test in all_tests:
  37. print test.name
  38. self.__finish()
  39. ############################################################
  40. # End run_list_tests
  41. ############################################################
  42. ############################################################
  43. # Prints the metadata for all the available tests
  44. ############################################################
  45. def run_list_test_metadata(self):
  46. all_tests = self.__gather_tests
  47. all_tests_json = json.dumps(map(lambda test: {
  48. "name": test.name,
  49. "approach": test.approach,
  50. "classification": test.classification,
  51. "database": test.database,
  52. "framework": test.framework,
  53. "language": test.language,
  54. "orm": test.orm,
  55. "platform": test.platform,
  56. "webserver": test.webserver,
  57. "os": test.os,
  58. "database_os": test.database_os,
  59. "display_name": test.display_name,
  60. "notes": test.notes,
  61. "versus": test.versus
  62. }, all_tests))
  63. with open(os.path.join(self.full_results_directory(), "test_metadata.json"), "w") as f:
  64. f.write(all_tests_json)
  65. self.__finish()
  66. ############################################################
  67. # End run_list_test_metadata
  68. ############################################################
  69. ############################################################
  70. # parse_timestamp
  71. # Re-parses the raw data for a given timestamp
  72. ############################################################
  73. def parse_timestamp(self):
  74. all_tests = self.__gather_tests
  75. for test in all_tests:
  76. test.parse_all()
  77. self.__parse_results(all_tests)
  78. self.__finish()
  79. ############################################################
  80. # End parse_timestamp
  81. ############################################################
  82. ############################################################
  83. # Run the tests:
  84. # This process involves setting up the client/server machines
  85. # with any necessary change. Then going through each test,
  86. # running their setup script, verifying the URLs, and
  87. # running benchmarks against them.
  88. ############################################################
  89. def run(self):
  90. ##########################
  91. # Get a list of all known
  92. # tests that we can run.
  93. ##########################
  94. all_tests = self.__gather_tests
  95. ##########################
  96. # Setup client/server
  97. ##########################
  98. print header("Preparing Server, Database, and Client ...", top='=', bottom='=')
  99. self.__setup_server()
  100. self.__setup_database()
  101. self.__setup_client()
  102. ## Check if wrk (and wrk-pipeline) is installed and executable, if not, raise an exception
  103. #if not (os.access("/usr/local/bin/wrk", os.X_OK) and os.access("/usr/local/bin/wrk-pipeline", os.X_OK)):
  104. # raise Exception("wrk and/or wrk-pipeline are not properly installed. Not running tests.")
  105. ##########################
  106. # Run tests
  107. ##########################
  108. print header("Running Tests...", top='=', bottom='=')
  109. result = self.__run_tests(all_tests)
  110. ##########################
  111. # Parse results
  112. ##########################
  113. if self.mode == "benchmark":
  114. print header("Parsing Results ...", top='=', bottom='=')
  115. self.__parse_results(all_tests)
  116. self.__finish()
  117. return result
  118. ############################################################
  119. # End run
  120. ############################################################
  121. ############################################################
  122. # database_sftp_string(batch_file)
  123. # generates a fully qualified URL for sftp to database
  124. ############################################################
  125. def database_sftp_string(self, batch_file):
  126. sftp_string = "sftp -oStrictHostKeyChecking=no "
  127. if batch_file != None: sftp_string += " -b " + batch_file + " "
  128. if self.database_identity_file != None:
  129. sftp_string += " -i " + self.database_identity_file + " "
  130. return sftp_string + self.database_user + "@" + self.database_host
  131. ############################################################
  132. # End database_sftp_string
  133. ############################################################
  134. ############################################################
  135. # client_sftp_string(batch_file)
  136. # generates a fully qualified URL for sftp to client
  137. ############################################################
  138. def client_sftp_string(self, batch_file):
  139. sftp_string = "sftp -oStrictHostKeyChecking=no "
  140. if batch_file != None: sftp_string += " -b " + batch_file + " "
  141. if self.client_identity_file != None:
  142. sftp_string += " -i " + self.client_identity_file + " "
  143. return sftp_string + self.client_user + "@" + self.client_host
  144. ############################################################
  145. # End client_sftp_string
  146. ############################################################
  147. ############################################################
  148. # generate_url(url, port)
  149. # generates a fully qualified URL for accessing a test url
  150. ############################################################
  151. def generate_url(self, url, port):
  152. return self.server_host + ":" + str(port) + url
  153. ############################################################
  154. # End generate_url
  155. ############################################################
  156. ############################################################
  157. # get_output_file(test_name, test_type)
  158. # returns the output file name for this test_name and
  159. # test_type timestamp/test_type/test_name/raw
  160. ############################################################
  161. def get_output_file(self, test_name, test_type):
  162. return os.path.join(self.result_directory, self.timestamp, test_type, test_name, "raw")
  163. ############################################################
  164. # End get_output_file
  165. ############################################################
  166. ############################################################
  167. # output_file(test_name, test_type)
  168. # returns the output file for this test_name and test_type
  169. # timestamp/test_type/test_name/raw
  170. ############################################################
  171. def output_file(self, test_name, test_type):
  172. path = self.get_output_file(test_name, test_type)
  173. try:
  174. os.makedirs(os.path.dirname(path))
  175. except OSError:
  176. pass
  177. return path
  178. ############################################################
  179. # End output_file
  180. ############################################################
  181. ############################################################
  182. # get_stats_file(test_name, test_type)
  183. # returns the stats file name for this test_name and
  184. # test_type timestamp/test_type/test_name/raw
  185. ############################################################
  186. def get_stats_file(self, test_name, test_type):
  187. return os.path.join(self.result_directory, self.timestamp, test_type, test_name, "stats")
  188. ############################################################
  189. # End get_stats_file
  190. ############################################################
  191. ############################################################
  192. # stats_file(test_name, test_type)
  193. # returns the stats file for this test_name and test_type
  194. # timestamp/test_type/test_name/raw
  195. ############################################################
  196. def stats_file(self, test_name, test_type):
  197. path = self.get_stats_file(test_name, test_type)
  198. try:
  199. os.makedirs(os.path.dirname(path))
  200. except OSError:
  201. pass
  202. return path
  203. ############################################################
  204. # End stats_file
  205. ############################################################
  206. ############################################################
  207. # full_results_directory
  208. ############################################################
  209. def full_results_directory(self):
  210. path = os.path.join(self.result_directory, self.timestamp)
  211. try:
  212. os.makedirs(path)
  213. except OSError:
  214. pass
  215. return path
  216. ############################################################
  217. # End full_results_directory
  218. ############################################################
  219. ############################################################
  220. # Latest intermediate results dirctory
  221. ############################################################
  222. def latest_results_directory(self):
  223. path = os.path.join(self.result_directory,"latest")
  224. try:
  225. os.makedirs(path)
  226. except OSError:
  227. pass
  228. return path
  229. ############################################################
  230. # report_verify_results
  231. # Used by FrameworkTest to add verification details to our results
  232. #
  233. # TODO: Technically this is an IPC violation - we are accessing
  234. # the parent process' memory from the child process
  235. ############################################################
  236. def report_verify_results(self, framework, test, result):
  237. if framework.name not in self.results['verify'].keys():
  238. self.results['verify'][framework.name] = dict()
  239. self.results['verify'][framework.name][test] = result
  240. ############################################################
  241. # report_benchmark_results
  242. # Used by FrameworkTest to add benchmark data to this
  243. #
  244. # TODO: Technically this is an IPC violation - we are accessing
  245. # the parent process' memory from the child process
  246. ############################################################
  247. def report_benchmark_results(self, framework, test, results):
  248. if test not in self.results['rawData'].keys():
  249. self.results['rawData'][test] = dict()
  250. # If results has a size from the parse, then it succeeded.
  251. if results:
  252. self.results['rawData'][test][framework.name] = results
  253. # This may already be set for single-tests
  254. if framework.name not in self.results['succeeded'][test]:
  255. self.results['succeeded'][test].append(framework.name)
  256. else:
  257. # This may already be set for single-tests
  258. if framework.name not in self.results['failed'][test]:
  259. self.results['failed'][test].append(framework.name)
  260. ############################################################
  261. # End report_results
  262. ############################################################
  263. ##########################################################################################
  264. # Private methods
  265. ##########################################################################################
  266. ############################################################
  267. # Gathers all the tests
  268. ############################################################
  269. @property
  270. def __gather_tests(self):
  271. tests = gather_tests(include=self.test,
  272. exclude=self.exclude,
  273. benchmarker=self)
  274. # If the tests have been interrupted somehow, then we want to resume them where we left
  275. # off, rather than starting from the beginning
  276. if os.path.isfile('current_benchmark.txt'):
  277. with open('current_benchmark.txt', 'r') as interrupted_benchmark:
  278. interrupt_bench = interrupted_benchmark.read().strip()
  279. for index, atest in enumerate(tests):
  280. if atest.name == interrupt_bench:
  281. tests = tests[index:]
  282. break
  283. return tests
  284. ############################################################
  285. # End __gather_tests
  286. ############################################################
  287. ############################################################
  288. # Makes any necessary changes to the server that should be
  289. # made before running the tests. This involves setting kernal
  290. # settings to allow for more connections, or more file
  291. # descriptiors
  292. #
  293. # http://redmine.lighttpd.net/projects/weighttp/wiki#Troubleshooting
  294. ############################################################
  295. def __setup_server(self):
  296. try:
  297. if os.name == 'nt':
  298. return True
  299. subprocess.check_call(["sudo","bash","-c","cd /sys/devices/system/cpu; ls -d cpu[0-9]*|while read x; do echo performance > $x/cpufreq/scaling_governor; done"])
  300. subprocess.check_call("sudo sysctl -w net.ipv4.tcp_max_syn_backlog=65535".rsplit(" "))
  301. subprocess.check_call("sudo sysctl -w net.core.somaxconn=65535".rsplit(" "))
  302. subprocess.check_call("sudo -s ulimit -n 65535".rsplit(" "))
  303. subprocess.check_call("sudo sysctl net.ipv4.tcp_tw_reuse=1".rsplit(" "))
  304. subprocess.check_call("sudo sysctl net.ipv4.tcp_tw_recycle=1".rsplit(" "))
  305. subprocess.check_call("sudo sysctl -w kernel.shmmax=134217728".rsplit(" "))
  306. subprocess.check_call("sudo sysctl -w kernel.shmall=2097152".rsplit(" "))
  307. except subprocess.CalledProcessError:
  308. return False
  309. ############################################################
  310. # End __setup_server
  311. ############################################################
  312. ############################################################
  313. # Makes any necessary changes to the database machine that
  314. # should be made before running the tests. Is very similar
  315. # to the server setup, but may also include database specific
  316. # changes.
  317. ############################################################
  318. def __setup_database(self):
  319. p = subprocess.Popen(self.database_ssh_string, stdin=subprocess.PIPE, shell=True)
  320. p.communicate("""
  321. sudo sysctl -w net.ipv4.tcp_max_syn_backlog=65535
  322. sudo sysctl -w net.core.somaxconn=65535
  323. sudo -s ulimit -n 65535
  324. sudo sysctl net.ipv4.tcp_tw_reuse=1
  325. sudo sysctl net.ipv4.tcp_tw_recycle=1
  326. sudo sysctl -w kernel.shmmax=2147483648
  327. sudo sysctl -w kernel.shmall=2097152
  328. """)
  329. ############################################################
  330. # End __setup_database
  331. ############################################################
  332. ############################################################
  333. # Makes any necessary changes to the client machine that
  334. # should be made before running the tests. Is very similar
  335. # to the server setup, but may also include client specific
  336. # changes.
  337. ############################################################
  338. def __setup_client(self):
  339. p = subprocess.Popen(self.client_ssh_string, stdin=subprocess.PIPE, shell=True)
  340. p.communicate("""
  341. sudo sysctl -w net.ipv4.tcp_max_syn_backlog=65535
  342. sudo sysctl -w net.core.somaxconn=65535
  343. sudo -s ulimit -n 65535
  344. sudo sysctl net.ipv4.tcp_tw_reuse=1
  345. sudo sysctl net.ipv4.tcp_tw_recycle=1
  346. sudo sysctl -w kernel.shmmax=2147483648
  347. sudo sysctl -w kernel.shmall=2097152
  348. """)
  349. ############################################################
  350. # End __setup_client
  351. ############################################################
  352. ############################################################
  353. # __run_tests
  354. #
  355. # 2013-10-02 ASB Calls each test passed in tests to
  356. # __run_test in a separate process. Each
  357. # test is given a set amount of time and if
  358. # kills the child process (and subsequently
  359. # all of its child processes). Uses
  360. # multiprocessing module.
  361. ############################################################
  362. def __run_tests(self, tests):
  363. if len(tests) == 0:
  364. return 0
  365. logging.debug("Start __run_tests.")
  366. logging.debug("__name__ = %s",__name__)
  367. error_happened = False
  368. if self.os.lower() == 'windows':
  369. logging.debug("Executing __run_tests on Windows")
  370. for test in tests:
  371. with open('current_benchmark.txt', 'w') as benchmark_resume_file:
  372. benchmark_resume_file.write(test.name)
  373. if self.__run_test(test) != 0:
  374. error_happened = True
  375. else:
  376. logging.debug("Executing __run_tests on Linux")
  377. # Setup a nice progressbar and ETA indicator
  378. widgets = [self.mode, ': ', progressbar.Percentage(),
  379. ' ', progressbar.Bar(),
  380. ' Rough ', progressbar.ETA()]
  381. pbar = progressbar.ProgressBar(widgets=widgets, maxval=len(tests)).start()
  382. pbar_test = 0
  383. # These features do not work on Windows
  384. for test in tests:
  385. pbar.update(pbar_test)
  386. pbar_test = pbar_test + 1
  387. if __name__ == 'benchmark.benchmarker':
  388. print header("Running Test: %s" % test.name)
  389. with open('current_benchmark.txt', 'w') as benchmark_resume_file:
  390. benchmark_resume_file.write(test.name)
  391. test_process = Process(target=self.__run_test, name="Test Runner (%s)" % test.name, args=(test,))
  392. test_process.start()
  393. test_process.join(self.run_test_timeout_seconds)
  394. self.__load_results() # Load intermediate result from child process
  395. if(test_process.is_alive()):
  396. logging.debug("Child process for {name} is still alive. Terminating.".format(name=test.name))
  397. self.__write_intermediate_results(test.name,"__run_test timeout (="+ str(self.run_test_timeout_seconds) + " seconds)")
  398. test_process.terminate()
  399. test_process.join()
  400. if test_process.exitcode != 0:
  401. error_happened = True
  402. pbar.finish()
  403. if os.path.isfile('current_benchmark.txt'):
  404. os.remove('current_benchmark.txt')
  405. logging.debug("End __run_tests.")
  406. if error_happened:
  407. return 1
  408. return 0
  409. ############################################################
  410. # End __run_tests
  411. ############################################################
  412. ############################################################
  413. # __run_test
  414. # 2013-10-02 ASB Previously __run_tests. This code now only
  415. # processes a single test.
  416. #
  417. # Ensures that the system has all necessary software to run
  418. # the tests. This does not include that software for the individual
  419. # test, but covers software such as curl and weighttp that
  420. # are needed.
  421. ############################################################
  422. def __run_test(self, test):
  423. # Used to capture return values
  424. def exit_with_code(code):
  425. if self.os.lower() == 'windows':
  426. return code
  427. else:
  428. sys.exit(code)
  429. try:
  430. os.makedirs(os.path.join(self.latest_results_directory, 'logs', "{name}".format(name=test.name)))
  431. except Exception:
  432. pass
  433. with open(os.path.join(self.latest_results_directory, 'logs', "{name}".format(name=test.name), 'out.txt'), 'w') as out, \
  434. open(os.path.join(self.latest_results_directory, 'logs', "{name}".format(name=test.name), 'err.txt'), 'w') as err:
  435. if test.os.lower() != self.os.lower() or test.database_os.lower() != self.database_os.lower():
  436. out.write("OS or Database OS specified in benchmark_config does not match the current environment. Skipping.\n")
  437. return exit_with_code(0)
  438. # If the test is in the excludes list, we skip it
  439. if self.exclude != None and test.name in self.exclude:
  440. out.write("Test {name} has been added to the excludes list. Skipping.\n".format(name=test.name))
  441. return exit_with_code(0)
  442. out.write("test.os.lower() = {os} test.database_os.lower() = {dbos}\n".format(os=test.os.lower(),dbos=test.database_os.lower()))
  443. out.write("self.results['frameworks'] != None: {val}\n".format(val=str(self.results['frameworks'] != None)))
  444. out.write("test.name: {name}\n".format(name=str(test.name)))
  445. out.write("self.results['completed']: {completed}\n".format(completed=str(self.results['completed'])))
  446. if self.results['frameworks'] != None and test.name in self.results['completed']:
  447. out.write('Framework {name} found in latest saved data. Skipping.\n'.format(name=str(test.name)))
  448. return exit_with_code(1)
  449. out.flush()
  450. out.write(header("Beginning %s" % test.name, top='='))
  451. out.flush()
  452. ##########################
  453. # Start this test
  454. ##########################
  455. out.write(header("Starting %s" % test.name))
  456. out.flush()
  457. try:
  458. if test.requires_database():
  459. p = subprocess.Popen(self.database_ssh_string, stdin=subprocess.PIPE, stdout=out, stderr=err, shell=True)
  460. p.communicate("""
  461. sudo restart mysql
  462. sudo restart mongodb
  463. sudo service redis-server restart
  464. sudo /etc/init.d/postgresql restart
  465. """)
  466. time.sleep(10)
  467. if self.__is_port_bound(test.port):
  468. err.write(header("Error: Port %s is not available, attempting to recover" % test.port))
  469. err.flush()
  470. print "Error: Port %s is not available, attempting to recover" % test.port
  471. self.__forciblyEndPortBoundProcesses(test.port, out, err)
  472. if self.__is_port_bound(test.port):
  473. self.__write_intermediate_results(test.name, "port " + str(test.port) + " is not available before start")
  474. err.write(header("Error: Port %s is not available, cannot start %s" % (test.port, test.name)))
  475. err.flush()
  476. print "Error: Unable to recover port, cannot start test"
  477. return exit_with_code(1)
  478. result = test.start(out, err)
  479. if result != 0:
  480. test.stop(out, err)
  481. time.sleep(5)
  482. err.write( "ERROR: Problem starting {name}\n".format(name=test.name) )
  483. err.write(header("Stopped %s" % test.name))
  484. err.flush()
  485. self.__write_intermediate_results(test.name,"<setup.py>#start() returned non-zero")
  486. return exit_with_code(1)
  487. logging.info("Sleeping %s seconds to ensure framework is ready" % self.sleep)
  488. time.sleep(self.sleep)
  489. ##########################
  490. # Verify URLs
  491. ##########################
  492. logging.info("Verifying framework URLs")
  493. passed_verify = test.verify_urls(out, err)
  494. out.flush()
  495. err.flush()
  496. ##########################
  497. # Benchmark this test
  498. ##########################
  499. if self.mode == "benchmark":
  500. logging.info("Benchmarking")
  501. out.write(header("Benchmarking %s" % test.name))
  502. out.flush()
  503. test.benchmark(out, err)
  504. out.flush()
  505. err.flush()
  506. ##########################
  507. # Stop this test
  508. ##########################
  509. out.write(header("Stopping %s" % test.name))
  510. out.flush()
  511. test.stop(out, err)
  512. out.flush()
  513. err.flush()
  514. time.sleep(5)
  515. if self.__is_port_bound(test.port):
  516. err.write("Port %s was not freed. Attempting to free it." % (test.port, ))
  517. err.flush()
  518. self.__forciblyEndPortBoundProcesses(test.port, out, err)
  519. time.sleep(5)
  520. if self.__is_port_bound(test.port):
  521. err.write(header("Error: Port %s was not released by stop %s" % (test.port, test.name)))
  522. err.flush()
  523. self.__write_intermediate_results(test.name, "port " + str(test.port) + " was not released by stop")
  524. return exit_with_code(1)
  525. out.write(header("Stopped %s" % test.name))
  526. out.flush()
  527. time.sleep(5)
  528. ##########################################################
  529. # Save results thus far into toolset/benchmark/latest.json
  530. ##########################################################
  531. out.write(header("Saving results through %s" % test.name))
  532. out.flush()
  533. self.__write_intermediate_results(test.name,time.strftime("%Y%m%d%H%M%S", time.localtime()))
  534. if self.mode == "verify" and not passed_verify:
  535. print "Failed verify!"
  536. return exit_with_code(1)
  537. except (OSError, IOError, subprocess.CalledProcessError) as e:
  538. self.__write_intermediate_results(test.name,"<setup.py> raised an exception")
  539. err.write(header("Subprocess Error %s" % test.name))
  540. traceback.print_exc(file=err)
  541. err.flush()
  542. try:
  543. test.stop(out, err)
  544. except (subprocess.CalledProcessError) as e:
  545. self.__write_intermediate_results(test.name,"<setup.py>#stop() raised an error")
  546. err.write(header("Subprocess Error: Test .stop() raised exception %s" % test.name))
  547. traceback.print_exc(file=err)
  548. err.flush()
  549. out.close()
  550. err.close()
  551. return exit_with_code(1)
  552. # TODO - subprocess should not catch this exception!
  553. # Parent process should catch it and cleanup/exit
  554. except (KeyboardInterrupt) as e:
  555. test.stop(out, err)
  556. out.write(header("Cleaning up..."))
  557. out.flush()
  558. self.__finish()
  559. sys.exit(1)
  560. out.close()
  561. err.close()
  562. return exit_with_code(0)
  563. ############################################################
  564. # End __run_tests
  565. ############################################################
  566. ############################################################
  567. # __is_port_bound
  568. # Check if the requested port is available. If it
  569. # isn't available, then a previous test probably didn't
  570. # shutdown properly.
  571. ############################################################
  572. def __is_port_bound(self, port):
  573. s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
  574. try:
  575. # Try to bind to all IP addresses, this port
  576. s.bind(("", port))
  577. # If we get here, we were able to bind successfully,
  578. # which means the port is free.
  579. except Exception:
  580. # If we get an exception, it might be because the port is still bound
  581. # which would be bad, or maybe it is a privileged port (<1024) and we
  582. # are not running as root, or maybe the server is gone, but sockets are
  583. # still in TIME_WAIT (SO_REUSEADDR). To determine which scenario, try to
  584. # connect.
  585. try:
  586. s.connect(("127.0.0.1", port))
  587. # If we get here, we were able to connect to something, which means
  588. # that the port is still bound.
  589. return True
  590. except Exception:
  591. # An exception means that we couldn't connect, so a server probably
  592. # isn't still running on the port.
  593. pass
  594. finally:
  595. s.close()
  596. return False
  597. ############################################################
  598. # End __is_port_bound
  599. ############################################################
  600. def __forciblyEndPortBoundProcesses(self, test_port, out, err):
  601. p = subprocess.Popen(['sudo', 'netstat', '-lnp'], stdout=subprocess.PIPE)
  602. (ns_out, ns_err) = p.communicate()
  603. for line in ns_out.splitlines():
  604. # Handles tcp, tcp6, udp, udp6
  605. if line.startswith('tcp') or line.startswith('udp'):
  606. splitline = line.split()
  607. port = int(splitline[3].split(':')[-1])
  608. pid = splitline[-1].split('/')[0]
  609. # Sometimes the last column is just a dash
  610. if pid == '-':
  611. continue
  612. if port > 6000:
  613. try:
  614. # Never try to kill pid 0; bad form old chap.
  615. if int(pid) == 0:
  616. continue
  617. except Exception:
  618. # Trying to kill a non-number? Silly.
  619. continue
  620. ps = subprocess.Popen(['ps','p',pid], stdout=subprocess.PIPE)
  621. (out_6000, err_6000) = ps.communicate()
  622. err.write(textwrap.dedent(
  623. """
  624. Port {port} should not be open. See the following lines for information
  625. {netstat}
  626. {ps}
  627. """.format(port=port, netstat=line, ps=out_6000)))
  628. err.flush()
  629. if port == test_port:
  630. err.write( header("Error: Test port %s should not be open" % port, bottom='') )
  631. try:
  632. ps = subprocess.Popen(['ps','p',pid], stdout=subprocess.PIPE)
  633. # Store some info about this process
  634. (out_15, err_15) = ps.communicate()
  635. children = subprocess.Popen(['ps','--ppid',pid,'-o','ppid'], stdout=subprocess.PIPE)
  636. (out_children, err_children) = children.communicate()
  637. err.write(" Sending SIGTERM to this process:\n %s\n" % out_15)
  638. err.write(" Also expecting these child processes to die:\n %s\n" % out_children)
  639. subprocess.check_output(['sudo','kill',pid])
  640. # Sleep for 10 sec; kill can be finicky
  641. time.sleep(10)
  642. # Check that PID again
  643. ps = subprocess.Popen(['ps','p',pid], stdout=subprocess.PIPE)
  644. (out_9, err_9) = ps.communicate()
  645. if len(out_9.splitlines()) != 1: # One line for the header row
  646. err.write(" Process is still alive!\n")
  647. err.write(" Sending SIGKILL to this process:\n %s\n" % out_9)
  648. subprocess.check_output(['sudo','kill','-9', pid])
  649. else:
  650. err.write(" Process has been terminated\n")
  651. # Ensure all children are dead
  652. c_pids = [c_pid.strip() for c_pid in out_children.splitlines()[1:]]
  653. for c_pid in c_pids:
  654. ps = subprocess.Popen(['ps','p',c_pid], stdout=subprocess.PIPE)
  655. (out_9, err_9) = ps.communicate()
  656. if len(out_9.splitlines()) != 1: # One line for the header row
  657. err.write(" Child Process %s is still alive, sending SIGKILL\n" % c_pid)
  658. subprocess.check_output(['sudo','kill','-9', pid])
  659. except Exception as e:
  660. err.write( " Error: Unknown exception %s\n" % e )
  661. err.write( header("Done attempting to recover port %s" % port, top='') )
  662. ############################################################
  663. # __parse_results
  664. # Ensures that the system has all necessary software to run
  665. # the tests. This does not include that software for the individual
  666. # test, but covers software such as curl and weighttp that
  667. # are needed.
  668. ############################################################
  669. def __parse_results(self, tests):
  670. # Run the method to get the commmit count of each framework.
  671. self.__count_commits()
  672. # Call the method which counts the sloc for each framework
  673. self.__count_sloc()
  674. # Time to create parsed files
  675. # Aggregate JSON file
  676. with open(os.path.join(self.full_results_directory(), "results.json"), "w") as f:
  677. f.write(json.dumps(self.results, indent=2))
  678. ############################################################
  679. # End __parse_results
  680. ############################################################
  681. #############################################################
  682. # __count_sloc
  683. #############################################################
  684. def __count_sloc(self):
  685. frameworks = gather_frameworks(include=self.test,
  686. exclude=self.exclude, benchmarker=self)
  687. jsonResult = {}
  688. for framework, testlist in frameworks.iteritems():
  689. if not os.path.exists(os.path.join(testlist[0].directory, "source_code")):
  690. logging.warn("Cannot count lines of code for %s - no 'source_code' file", framework)
  691. continue
  692. # Unfortunately the source_code files use lines like
  693. # ./cpoll_cppsp/www/fortune_old instead of
  694. # ./www/fortune_old
  695. # so we have to back our working dir up one level
  696. wd = os.path.dirname(testlist[0].directory)
  697. try:
  698. command = "cloc --list-file=%s/source_code --yaml" % testlist[0].directory
  699. # Find the last instance of the word 'code' in the yaml output. This should
  700. # be the line count for the sum of all listed files or just the line count
  701. # for the last file in the case where there's only one file listed.
  702. command = command + "| grep code | tail -1 | cut -d: -f 2"
  703. logging.debug("Running \"%s\" (cwd=%s)", command, wd)
  704. lineCount = subprocess.check_output(command, cwd=wd, shell=True)
  705. jsonResult[framework] = int(lineCount)
  706. except subprocess.CalledProcessError:
  707. continue
  708. except ValueError as ve:
  709. logging.warn("Unable to get linecount for %s due to error '%s'", framework, ve)
  710. self.results['rawData']['slocCounts'] = jsonResult
  711. ############################################################
  712. # End __count_sloc
  713. ############################################################
  714. ############################################################
  715. # __count_commits
  716. #
  717. ############################################################
  718. def __count_commits(self):
  719. frameworks = gather_frameworks(include=self.test,
  720. exclude=self.exclude, benchmarker=self)
  721. def count_commit(directory, jsonResult):
  722. command = "git rev-list HEAD -- " + directory + " | sort -u | wc -l"
  723. try:
  724. commitCount = subprocess.check_output(command, shell=True)
  725. jsonResult[framework] = int(commitCount)
  726. except subprocess.CalledProcessError:
  727. pass
  728. # Because git can be slow when run in large batches, this
  729. # calls git up to 4 times in parallel. Normal improvement is ~3-4x
  730. # in my trials, or ~100 seconds down to ~25
  731. # This is safe to parallelize as long as each thread only
  732. # accesses one key in the dictionary
  733. threads = []
  734. jsonResult = {}
  735. t1 = datetime.now()
  736. for framework, testlist in frameworks.iteritems():
  737. directory = testlist[0].directory
  738. t = threading.Thread(target=count_commit, args=(directory,jsonResult))
  739. t.start()
  740. threads.append(t)
  741. # Git has internal locks, full parallel will just cause contention
  742. # and slowness, so we rate-limit a bit
  743. if len(threads) >= 4:
  744. threads[0].join()
  745. threads.remove(threads[0])
  746. # Wait for remaining threads
  747. for t in threads:
  748. t.join()
  749. t2 = datetime.now()
  750. # print "Took %s seconds " % (t2 - t1).seconds
  751. self.results['rawData']['commitCounts'] = jsonResult
  752. self.commits = jsonResult
  753. ############################################################
  754. # End __count_commits
  755. ############################################################
  756. ############################################################
  757. # __write_intermediate_results
  758. ############################################################
  759. def __write_intermediate_results(self,test_name,status_message):
  760. try:
  761. self.results["completed"][test_name] = status_message
  762. with open(os.path.join(self.latest_results_directory, 'results.json'), 'w') as f:
  763. f.write(json.dumps(self.results, indent=2))
  764. except (IOError):
  765. logging.error("Error writing results.json")
  766. ############################################################
  767. # End __write_intermediate_results
  768. ############################################################
  769. def __load_results(self):
  770. try:
  771. with open(os.path.join(self.latest_results_directory, 'results.json')) as f:
  772. self.results = json.load(f)
  773. except (ValueError, IOError):
  774. pass
  775. ############################################################
  776. # __finish
  777. ############################################################
  778. def __finish(self):
  779. tests = self.__gather_tests
  780. # Normally you don't have to use Fore.BLUE before each line, but
  781. # Travis-CI seems to reset color codes on newline (see travis-ci/travis-ci#2692)
  782. # or stream flush, so we have to ensure that the color code is printed repeatedly
  783. prefix = Fore.CYAN
  784. for line in header("Verification Summary", top='=', bottom='').split('\n'):
  785. print prefix + line
  786. for test in tests:
  787. print prefix + "| Test: %s" % test.name
  788. if test.name in self.results['verify'].keys():
  789. for test_type, result in self.results['verify'][test.name].iteritems():
  790. if result.upper() == "PASS":
  791. color = Fore.GREEN
  792. elif result.upper() == "WARN":
  793. color = Fore.YELLOW
  794. else:
  795. color = Fore.RED
  796. print prefix + "| " + test_type.ljust(11) + ' : ' + color + result.upper()
  797. else:
  798. print prefix + "| " + Fore.RED + "NO RESULTS (Did framework launch?)"
  799. print prefix + header('', top='', bottom='=') + Style.RESET_ALL
  800. print "Time to complete: " + str(int(time.time() - self.start_time)) + " seconds"
  801. print "Results are saved in " + os.path.join(self.result_directory, self.timestamp)
  802. ############################################################
  803. # End __finish
  804. ############################################################
  805. ##########################################################################################
  806. # Constructor
  807. ##########################################################################################
  808. ############################################################
  809. # Initialize the benchmarker. The args are the arguments
  810. # parsed via argparser.
  811. ############################################################
  812. def __init__(self, args):
  813. # Map type strings to their objects
  814. types = dict()
  815. types['json'] = JsonTestType()
  816. types['db'] = DBTestType()
  817. types['query'] = QueryTestType()
  818. types['fortune'] = FortuneTestType()
  819. types['update'] = UpdateTestType()
  820. types['plaintext'] = PlaintextTestType()
  821. # Turn type into a map instead of a string
  822. if args['type'] == 'all':
  823. args['types'] = types
  824. else:
  825. args['types'] = { args['type'] : types[args['type']] }
  826. del args['type']
  827. args['max_threads'] = args['threads']
  828. args['max_concurrency'] = max(args['concurrency_levels'])
  829. self.__dict__.update(args)
  830. # pprint(self.__dict__)
  831. self.start_time = time.time()
  832. self.run_test_timeout_seconds = 3600
  833. # setup logging
  834. logging.basicConfig(stream=sys.stderr, level=logging.INFO)
  835. # setup some additional variables
  836. if self.database_user == None: self.database_user = self.client_user
  837. if self.database_host == None: self.database_host = self.client_host
  838. if self.database_identity_file == None: self.database_identity_file = self.client_identity_file
  839. # Remember root directory
  840. self.fwroot = setup_util.get_fwroot()
  841. # setup results and latest_results directories
  842. self.result_directory = os.path.join("results", self.name)
  843. self.latest_results_directory = self.latest_results_directory()
  844. if hasattr(self, 'parse') and self.parse != None:
  845. self.timestamp = self.parse
  846. else:
  847. self.timestamp = time.strftime("%Y%m%d%H%M%S", time.localtime())
  848. # Load the latest data
  849. #self.latest = None
  850. #try:
  851. # with open('toolset/benchmark/latest.json', 'r') as f:
  852. # # Load json file into config object
  853. # self.latest = json.load(f)
  854. # logging.info("toolset/benchmark/latest.json loaded to self.latest")
  855. # logging.debug("contents of latest.json: " + str(json.dumps(self.latest)))
  856. #except IOError:
  857. # logging.warn("IOError on attempting to read toolset/benchmark/latest.json")
  858. #
  859. #self.results = None
  860. #try:
  861. # if self.latest != None and self.name in self.latest.keys():
  862. # with open(os.path.join(self.result_directory, str(self.latest[self.name]), 'results.json'), 'r') as f:
  863. # # Load json file into config object
  864. # self.results = json.load(f)
  865. #except IOError:
  866. # pass
  867. self.results = None
  868. try:
  869. with open(os.path.join(self.latest_results_directory, 'results.json'), 'r') as f:
  870. #Load json file into results object
  871. self.results = json.load(f)
  872. except IOError:
  873. logging.warn("results.json for test %s not found.",self.name)
  874. if self.results == None:
  875. self.results = dict()
  876. self.results['name'] = self.name
  877. self.results['concurrencyLevels'] = self.concurrency_levels
  878. self.results['queryIntervals'] = self.query_levels
  879. self.results['frameworks'] = [t.name for t in self.__gather_tests]
  880. self.results['duration'] = self.duration
  881. self.results['rawData'] = dict()
  882. self.results['rawData']['json'] = dict()
  883. self.results['rawData']['db'] = dict()
  884. self.results['rawData']['query'] = dict()
  885. self.results['rawData']['fortune'] = dict()
  886. self.results['rawData']['update'] = dict()
  887. self.results['rawData']['plaintext'] = dict()
  888. self.results['completed'] = dict()
  889. self.results['succeeded'] = dict()
  890. self.results['succeeded']['json'] = []
  891. self.results['succeeded']['db'] = []
  892. self.results['succeeded']['query'] = []
  893. self.results['succeeded']['fortune'] = []
  894. self.results['succeeded']['update'] = []
  895. self.results['succeeded']['plaintext'] = []
  896. self.results['failed'] = dict()
  897. self.results['failed']['json'] = []
  898. self.results['failed']['db'] = []
  899. self.results['failed']['query'] = []
  900. self.results['failed']['fortune'] = []
  901. self.results['failed']['update'] = []
  902. self.results['failed']['plaintext'] = []
  903. self.results['verify'] = dict()
  904. else:
  905. #for x in self.__gather_tests():
  906. # if x.name not in self.results['frameworks']:
  907. # self.results['frameworks'] = self.results['frameworks'] + [x.name]
  908. # Always overwrite framework list
  909. self.results['frameworks'] = [t.name for t in self.__gather_tests]
  910. # Setup the ssh command string
  911. self.database_ssh_string = "ssh -T -o StrictHostKeyChecking=no " + self.database_user + "@" + self.database_host
  912. self.client_ssh_string = "ssh -T -o StrictHostKeyChecking=no " + self.client_user + "@" + self.client_host
  913. if self.database_identity_file != None:
  914. self.database_ssh_string = self.database_ssh_string + " -i " + self.database_identity_file
  915. if self.client_identity_file != None:
  916. self.client_ssh_string = self.client_ssh_string + " -i " + self.client_identity_file
  917. if self.install is not None:
  918. install = Installer(self, self.install_strategy)
  919. install.install_software()
  920. ############################################################
  921. # End __init__
  922. ############################################################