benchmarker.py 40 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013
  1. from setup.linux.installer import Installer
  2. from setup.linux import setup_util
  3. from benchmark import framework_test
  4. from benchmark.test_types import *
  5. from utils import header
  6. from utils import gather_tests
  7. from utils import gather_frameworks
  8. import os
  9. import json
  10. import subprocess
  11. import traceback
  12. import time
  13. import pprint
  14. import csv
  15. import sys
  16. import logging
  17. import socket
  18. import threading
  19. from pprint import pprint
  20. from multiprocessing import Process
  21. from datetime import datetime
  22. # Cross-platform colored text
  23. from colorama import Fore, Back, Style
  24. # Text-based progress indicators
  25. import progressbar
  26. class Benchmarker:
  27. ##########################################################################################
  28. # Public methods
  29. ##########################################################################################
  30. ############################################################
  31. # Prints all the available tests
  32. ############################################################
  33. def run_list_tests(self):
  34. all_tests = self.__gather_tests
  35. for test in all_tests:
  36. print test.name
  37. self.__finish()
  38. ############################################################
  39. # End run_list_tests
  40. ############################################################
  41. ############################################################
  42. # Prints the metadata for all the available tests
  43. ############################################################
  44. def run_list_test_metadata(self):
  45. all_tests = self.__gather_tests
  46. all_tests_json = json.dumps(map(lambda test: {
  47. "name": test.name,
  48. "approach": test.approach,
  49. "classification": test.classification,
  50. "database": test.database,
  51. "framework": test.framework,
  52. "language": test.language,
  53. "orm": test.orm,
  54. "platform": test.platform,
  55. "webserver": test.webserver,
  56. "os": test.os,
  57. "database_os": test.database_os,
  58. "display_name": test.display_name,
  59. "notes": test.notes,
  60. "versus": test.versus
  61. }, all_tests))
  62. with open(os.path.join(self.full_results_directory(), "test_metadata.json"), "w") as f:
  63. f.write(all_tests_json)
  64. self.__finish()
  65. ############################################################
  66. # End run_list_test_metadata
  67. ############################################################
  68. ############################################################
  69. # parse_timestamp
  70. # Re-parses the raw data for a given timestamp
  71. ############################################################
  72. def parse_timestamp(self):
  73. all_tests = self.__gather_tests
  74. for test in all_tests:
  75. test.parse_all()
  76. self.__parse_results(all_tests)
  77. self.__finish()
  78. ############################################################
  79. # End parse_timestamp
  80. ############################################################
  81. ############################################################
  82. # Run the tests:
  83. # This process involves setting up the client/server machines
  84. # with any necessary change. Then going through each test,
  85. # running their setup script, verifying the URLs, and
  86. # running benchmarks against them.
  87. ############################################################
  88. def run(self):
  89. ##########################
  90. # Get a list of all known
  91. # tests that we can run.
  92. ##########################
  93. all_tests = self.__gather_tests
  94. ##########################
  95. # Setup client/server
  96. ##########################
  97. print header("Preparing Server, Database, and Client ...", top='=', bottom='=')
  98. self.__setup_server()
  99. self.__setup_database()
  100. self.__setup_client()
  101. ## Check if wrk (and wrk-pipeline) is installed and executable, if not, raise an exception
  102. #if not (os.access("/usr/local/bin/wrk", os.X_OK) and os.access("/usr/local/bin/wrk-pipeline", os.X_OK)):
  103. # raise Exception("wrk and/or wrk-pipeline are not properly installed. Not running tests.")
  104. ##########################
  105. # Run tests
  106. ##########################
  107. print header("Running Tests...", top='=', bottom='=')
  108. result = self.__run_tests(all_tests)
  109. ##########################
  110. # Parse results
  111. ##########################
  112. if self.mode == "benchmark":
  113. print header("Parsing Results ...", top='=', bottom='=')
  114. self.__parse_results(all_tests)
  115. self.__finish()
  116. return result
  117. ############################################################
  118. # End run
  119. ############################################################
  120. ############################################################
  121. # database_sftp_string(batch_file)
  122. # generates a fully qualified URL for sftp to database
  123. ############################################################
  124. def database_sftp_string(self, batch_file):
  125. sftp_string = "sftp -oStrictHostKeyChecking=no "
  126. if batch_file != None: sftp_string += " -b " + batch_file + " "
  127. if self.database_identity_file != None:
  128. sftp_string += " -i " + self.database_identity_file + " "
  129. return sftp_string + self.database_user + "@" + self.database_host
  130. ############################################################
  131. # End database_sftp_string
  132. ############################################################
  133. ############################################################
  134. # client_sftp_string(batch_file)
  135. # generates a fully qualified URL for sftp to client
  136. ############################################################
  137. def client_sftp_string(self, batch_file):
  138. sftp_string = "sftp -oStrictHostKeyChecking=no "
  139. if batch_file != None: sftp_string += " -b " + batch_file + " "
  140. if self.client_identity_file != None:
  141. sftp_string += " -i " + self.client_identity_file + " "
  142. return sftp_string + self.client_user + "@" + self.client_host
  143. ############################################################
  144. # End client_sftp_string
  145. ############################################################
  146. ############################################################
  147. # generate_url(url, port)
  148. # generates a fully qualified URL for accessing a test url
  149. ############################################################
  150. def generate_url(self, url, port):
  151. return self.server_host + ":" + str(port) + url
  152. ############################################################
  153. # End generate_url
  154. ############################################################
  155. ############################################################
  156. # get_output_file(test_name, test_type)
  157. # returns the output file name for this test_name and
  158. # test_type timestamp/test_type/test_name/raw
  159. ############################################################
  160. def get_output_file(self, test_name, test_type):
  161. return os.path.join(self.result_directory, self.timestamp, test_type, test_name, "raw")
  162. ############################################################
  163. # End get_output_file
  164. ############################################################
  165. ############################################################
  166. # output_file(test_name, test_type)
  167. # returns the output file for this test_name and test_type
  168. # timestamp/test_type/test_name/raw
  169. ############################################################
  170. def output_file(self, test_name, test_type):
  171. path = self.get_output_file(test_name, test_type)
  172. try:
  173. os.makedirs(os.path.dirname(path))
  174. except OSError:
  175. pass
  176. return path
  177. ############################################################
  178. # End output_file
  179. ############################################################
  180. ############################################################
  181. # get_stats_file(test_name, test_type)
  182. # returns the stats file name for this test_name and
  183. # test_type timestamp/test_type/test_name/raw
  184. ############################################################
  185. def get_stats_file(self, test_name, test_type):
  186. return os.path.join(self.result_directory, self.timestamp, test_type, test_name, "stats")
  187. ############################################################
  188. # End get_stats_file
  189. ############################################################
  190. ############################################################
  191. # stats_file(test_name, test_type)
  192. # returns the stats file for this test_name and test_type
  193. # timestamp/test_type/test_name/raw
  194. ############################################################
  195. def stats_file(self, test_name, test_type):
  196. path = self.get_stats_file(test_name, test_type)
  197. try:
  198. os.makedirs(os.path.dirname(path))
  199. except OSError:
  200. pass
  201. return path
  202. ############################################################
  203. # End stats_file
  204. ############################################################
  205. ############################################################
  206. # full_results_directory
  207. ############################################################
  208. def full_results_directory(self):
  209. path = os.path.join(self.result_directory, self.timestamp)
  210. try:
  211. os.makedirs(path)
  212. except OSError:
  213. pass
  214. return path
  215. ############################################################
  216. # End full_results_directory
  217. ############################################################
  218. ############################################################
  219. # Latest intermediate results dirctory
  220. ############################################################
  221. def latest_results_directory(self):
  222. path = os.path.join(self.result_directory,"latest")
  223. try:
  224. os.makedirs(path)
  225. except OSError:
  226. pass
  227. return path
  228. ############################################################
  229. # report_verify_results
  230. # Used by FrameworkTest to add verification details to our results
  231. #
  232. # TODO: Technically this is an IPC violation - we are accessing
  233. # the parent process' memory from the child process
  234. ############################################################
  235. def report_verify_results(self, framework, test, result):
  236. if framework.name not in self.results['verify'].keys():
  237. self.results['verify'][framework.name] = dict()
  238. self.results['verify'][framework.name][test] = result
  239. ############################################################
  240. # report_benchmark_results
  241. # Used by FrameworkTest to add benchmark data to this
  242. #
  243. # TODO: Technically this is an IPC violation - we are accessing
  244. # the parent process' memory from the child process
  245. ############################################################
  246. def report_benchmark_results(self, framework, test, results):
  247. if test not in self.results['rawData'].keys():
  248. self.results['rawData'][test] = dict()
  249. # If results has a size from the parse, then it succeeded.
  250. if results:
  251. self.results['rawData'][test][framework.name] = results
  252. # This may already be set for single-tests
  253. if framework.name not in self.results['succeeded'][test]:
  254. self.results['succeeded'][test].append(framework.name)
  255. else:
  256. # This may already be set for single-tests
  257. if framework.name not in self.results['failed'][test]:
  258. self.results['failed'][test].append(framework.name)
  259. ############################################################
  260. # End report_results
  261. ############################################################
  262. ##########################################################################################
  263. # Private methods
  264. ##########################################################################################
  265. ############################################################
  266. # Gathers all the tests
  267. ############################################################
  268. @property
  269. def __gather_tests(self):
  270. tests = gather_tests(include=self.test,
  271. exclude=self.exclude,
  272. benchmarker=self)
  273. # If the tests have been interrupted somehow, then we want to resume them where we left
  274. # off, rather than starting from the beginning
  275. if os.path.isfile('current_benchmark.txt'):
  276. with open('current_benchmark.txt', 'r') as interrupted_benchmark:
  277. interrupt_bench = interrupted_benchmark.read().strip()
  278. for index, atest in enumerate(tests):
  279. if atest.name == interrupt_bench:
  280. tests = tests[index:]
  281. break
  282. return tests
  283. ############################################################
  284. # End __gather_tests
  285. ############################################################
  286. ############################################################
  287. # Makes any necessary changes to the server that should be
  288. # made before running the tests. This involves setting kernal
  289. # settings to allow for more connections, or more file
  290. # descriptiors
  291. #
  292. # http://redmine.lighttpd.net/projects/weighttp/wiki#Troubleshooting
  293. ############################################################
  294. def __setup_server(self):
  295. try:
  296. if os.name == 'nt':
  297. return True
  298. subprocess.check_call(["sudo","bash","-c","cd /sys/devices/system/cpu; ls -d cpu[0-9]*|while read x; do echo performance > $x/cpufreq/scaling_governor; done"])
  299. subprocess.check_call("sudo sysctl -w net.ipv4.tcp_max_syn_backlog=65535".rsplit(" "))
  300. subprocess.check_call("sudo sysctl -w net.core.somaxconn=65535".rsplit(" "))
  301. subprocess.check_call("sudo -s ulimit -n 65535".rsplit(" "))
  302. subprocess.check_call("sudo sysctl net.ipv4.tcp_tw_reuse=1".rsplit(" "))
  303. subprocess.check_call("sudo sysctl net.ipv4.tcp_tw_recycle=1".rsplit(" "))
  304. subprocess.check_call("sudo sysctl -w kernel.shmmax=134217728".rsplit(" "))
  305. subprocess.check_call("sudo sysctl -w kernel.shmall=2097152".rsplit(" "))
  306. except subprocess.CalledProcessError:
  307. return False
  308. ############################################################
  309. # End __setup_server
  310. ############################################################
  311. ############################################################
  312. # Makes any necessary changes to the database machine that
  313. # should be made before running the tests. Is very similar
  314. # to the server setup, but may also include database specific
  315. # changes.
  316. ############################################################
  317. def __setup_database(self):
  318. p = subprocess.Popen(self.database_ssh_string, stdin=subprocess.PIPE, shell=True)
  319. p.communicate("""
  320. sudo sysctl -w net.ipv4.tcp_max_syn_backlog=65535
  321. sudo sysctl -w net.core.somaxconn=65535
  322. sudo -s ulimit -n 65535
  323. sudo sysctl net.ipv4.tcp_tw_reuse=1
  324. sudo sysctl net.ipv4.tcp_tw_recycle=1
  325. sudo sysctl -w kernel.shmmax=2147483648
  326. sudo sysctl -w kernel.shmall=2097152
  327. """)
  328. ############################################################
  329. # End __setup_database
  330. ############################################################
  331. ############################################################
  332. # Makes any necessary changes to the client machine that
  333. # should be made before running the tests. Is very similar
  334. # to the server setup, but may also include client specific
  335. # changes.
  336. ############################################################
  337. def __setup_client(self):
  338. p = subprocess.Popen(self.client_ssh_string, stdin=subprocess.PIPE, shell=True)
  339. p.communicate("""
  340. sudo sysctl -w net.ipv4.tcp_max_syn_backlog=65535
  341. sudo sysctl -w net.core.somaxconn=65535
  342. sudo -s ulimit -n 65535
  343. sudo sysctl net.ipv4.tcp_tw_reuse=1
  344. sudo sysctl net.ipv4.tcp_tw_recycle=1
  345. sudo sysctl -w kernel.shmmax=2147483648
  346. sudo sysctl -w kernel.shmall=2097152
  347. """)
  348. ############################################################
  349. # End __setup_client
  350. ############################################################
  351. ############################################################
  352. # __run_tests
  353. #
  354. # 2013-10-02 ASB Calls each test passed in tests to
  355. # __run_test in a separate process. Each
  356. # test is given a set amount of time and if
  357. # kills the child process (and subsequently
  358. # all of its child processes). Uses
  359. # multiprocessing module.
  360. ############################################################
  361. def __run_tests(self, tests):
  362. if len(tests) == 0:
  363. return 0
  364. logging.debug("Start __run_tests.")
  365. logging.debug("__name__ = %s",__name__)
  366. error_happened = False
  367. if self.os.lower() == 'windows':
  368. logging.debug("Executing __run_tests on Windows")
  369. for test in tests:
  370. with open('current_benchmark.txt', 'w') as benchmark_resume_file:
  371. benchmark_resume_file.write(test.name)
  372. if self.__run_test(test) != 0:
  373. error_happened = True
  374. else:
  375. logging.debug("Executing __run_tests on Linux")
  376. # Setup a nice progressbar and ETA indicator
  377. widgets = [self.mode, ': ', progressbar.Percentage(),
  378. ' ', progressbar.Bar(),
  379. ' Rough ', progressbar.ETA()]
  380. pbar = progressbar.ProgressBar(widgets=widgets, maxval=len(tests)).start()
  381. pbar_test = 0
  382. # These features do not work on Windows
  383. for test in tests:
  384. pbar.update(pbar_test)
  385. pbar_test = pbar_test + 1
  386. if __name__ == 'benchmark.benchmarker':
  387. print header("Running Test: %s" % test.name)
  388. with open('current_benchmark.txt', 'w') as benchmark_resume_file:
  389. benchmark_resume_file.write(test.name)
  390. test_process = Process(target=self.__run_test, name="Test Runner (%s)" % test.name, args=(test,))
  391. test_process.start()
  392. test_process.join(self.run_test_timeout_seconds)
  393. self.__load_results() # Load intermediate result from child process
  394. if(test_process.is_alive()):
  395. logging.debug("Child process for {name} is still alive. Terminating.".format(name=test.name))
  396. self.__write_intermediate_results(test.name,"__run_test timeout (="+ str(self.run_test_timeout_seconds) + " seconds)")
  397. test_process.terminate()
  398. test_process.join()
  399. if test_process.exitcode != 0:
  400. error_happened = True
  401. pbar.finish()
  402. if os.path.isfile('current_benchmark.txt'):
  403. os.remove('current_benchmark.txt')
  404. logging.debug("End __run_tests.")
  405. if error_happened:
  406. return 1
  407. return 0
  408. ############################################################
  409. # End __run_tests
  410. ############################################################
  411. ############################################################
  412. # __run_test
  413. # 2013-10-02 ASB Previously __run_tests. This code now only
  414. # processes a single test.
  415. #
  416. # Ensures that the system has all necessary software to run
  417. # the tests. This does not include that software for the individual
  418. # test, but covers software such as curl and weighttp that
  419. # are needed.
  420. ############################################################
  421. def __run_test(self, test):
  422. # Used to capture return values
  423. def exit_with_code(code):
  424. if self.os.lower() == 'windows':
  425. return code
  426. else:
  427. sys.exit(code)
  428. try:
  429. os.makedirs(os.path.join(self.latest_results_directory, 'logs', "{name}".format(name=test.name)))
  430. except:
  431. pass
  432. with open(os.path.join(self.latest_results_directory, 'logs', "{name}".format(name=test.name), 'out.txt'), 'w') as out, \
  433. open(os.path.join(self.latest_results_directory, 'logs', "{name}".format(name=test.name), 'err.txt'), 'w') as err:
  434. if test.os.lower() != self.os.lower() or test.database_os.lower() != self.database_os.lower():
  435. out.write("OS or Database OS specified in benchmark_config does not match the current environment. Skipping.\n")
  436. return exit_with_code(0)
  437. # If the test is in the excludes list, we skip it
  438. if self.exclude != None and test.name in self.exclude:
  439. out.write("Test {name} has been added to the excludes list. Skipping.\n".format(name=test.name))
  440. return exit_with_code(0)
  441. out.write("test.os.lower() = {os} test.database_os.lower() = {dbos}\n".format(os=test.os.lower(),dbos=test.database_os.lower()))
  442. out.write("self.results['frameworks'] != None: {val}\n".format(val=str(self.results['frameworks'] != None)))
  443. out.write("test.name: {name}\n".format(name=str(test.name)))
  444. out.write("self.results['completed']: {completed}\n".format(completed=str(self.results['completed'])))
  445. if self.results['frameworks'] != None and test.name in self.results['completed']:
  446. out.write('Framework {name} found in latest saved data. Skipping.\n'.format(name=str(test.name)))
  447. return exit_with_code(1)
  448. out.flush()
  449. out.write(header("Beginning %s" % test.name, top='='))
  450. out.flush()
  451. ##########################
  452. # Start this test
  453. ##########################
  454. out.write(header("Starting %s" % test.name))
  455. out.flush()
  456. try:
  457. if test.requires_database():
  458. p = subprocess.Popen(self.database_ssh_string, stdin=subprocess.PIPE, stdout=out, stderr=err, shell=True)
  459. p.communicate("""
  460. sudo restart mysql
  461. sudo restart mongodb
  462. sudo service redis-server restart
  463. sudo /etc/init.d/postgresql restart
  464. """)
  465. time.sleep(10)
  466. if self.__is_port_bound(test.port):
  467. self.__write_intermediate_results(test.name, "port " + str(test.port) + " is not available before start")
  468. err.write(header("Error: Port %s is not available, cannot start %s" % (test.port, test.name)))
  469. err.flush()
  470. return exit_with_code(1)
  471. result = test.start(out, err)
  472. if result != 0:
  473. test.stop(out, err)
  474. time.sleep(5)
  475. err.write( "ERROR: Problem starting {name}\n".format(name=test.name) )
  476. err.write(header("Stopped %s" % test.name))
  477. err.flush()
  478. self.__write_intermediate_results(test.name,"<setup.py>#start() returned non-zero")
  479. return exit_with_code(1)
  480. logging.info("Sleeping %s seconds to ensure framework is ready" % self.sleep)
  481. time.sleep(self.sleep)
  482. ##########################
  483. # Verify URLs
  484. ##########################
  485. logging.info("Verifying framework URLs")
  486. passed_verify = test.verify_urls(out, err)
  487. out.flush()
  488. err.flush()
  489. ##########################
  490. # Benchmark this test
  491. ##########################
  492. if self.mode == "benchmark":
  493. logging.info("Benchmarking")
  494. out.write(header("Benchmarking %s" % test.name))
  495. out.flush()
  496. test.benchmark(out, err)
  497. out.flush()
  498. err.flush()
  499. ##########################
  500. # Stop this test
  501. ##########################
  502. out.write(header("Stopping %s" % test.name))
  503. out.flush()
  504. test.stop(out, err)
  505. out.flush()
  506. err.flush()
  507. time.sleep(5)
  508. if self.__is_port_bound(test.port):
  509. err.write("Port %s was not freed. Attempting to free it." % (test.port, ))
  510. err.flush()
  511. self.__forciblyEndPortBoundProcesses(test.port, out, err)
  512. time.sleep(5)
  513. if self.__is_port_bound(test.port):
  514. err.write(header("Error: Port %s was not released by stop %s" % (test.port, test.name)))
  515. err.flush()
  516. self.__write_intermediate_results(test.name, "port " + str(test.port) + " was not released by stop")
  517. return exit_with_code(1)
  518. out.write(header("Stopped %s" % test.name))
  519. out.flush()
  520. time.sleep(5)
  521. ##########################################################
  522. # Save results thus far into toolset/benchmark/latest.json
  523. ##########################################################
  524. out.write(header("Saving results through %s" % test.name))
  525. out.flush()
  526. self.__write_intermediate_results(test.name,time.strftime("%Y%m%d%H%M%S", time.localtime()))
  527. if self.mode == "verify" and not passed_verify:
  528. print "Failed verify!"
  529. return exit_with_code(1)
  530. except (OSError, IOError, subprocess.CalledProcessError) as e:
  531. self.__write_intermediate_results(test.name,"<setup.py> raised an exception")
  532. err.write(header("Subprocess Error %s" % test.name))
  533. traceback.print_exc(file=err)
  534. err.flush()
  535. try:
  536. test.stop(out, err)
  537. except (subprocess.CalledProcessError) as e:
  538. self.__write_intermediate_results(test.name,"<setup.py>#stop() raised an error")
  539. err.write(header("Subprocess Error: Test .stop() raised exception %s" % test.name))
  540. traceback.print_exc(file=err)
  541. err.flush()
  542. out.close()
  543. err.close()
  544. return exit_with_code(1)
  545. # TODO - subprocess should not catch this exception!
  546. # Parent process should catch it and cleanup/exit
  547. except (KeyboardInterrupt) as e:
  548. test.stop(out, err)
  549. out.write(header("Cleaning up..."))
  550. out.flush()
  551. self.__finish()
  552. sys.exit(1)
  553. out.close()
  554. err.close()
  555. return exit_with_code(0)
  556. ############################################################
  557. # End __run_tests
  558. ############################################################
  559. ############################################################
  560. # __is_port_bound
  561. # Check if the requested port is available. If it
  562. # isn't available, then a previous test probably didn't
  563. # shutdown properly.
  564. ############################################################
  565. def __is_port_bound(self, port):
  566. s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
  567. try:
  568. # Try to bind to all IP addresses, this port
  569. s.bind(("", port))
  570. # If we get here, we were able to bind successfully,
  571. # which means the port is free.
  572. except:
  573. # If we get an exception, it might be because the port is still bound
  574. # which would be bad, or maybe it is a privileged port (<1024) and we
  575. # are not running as root, or maybe the server is gone, but sockets are
  576. # still in TIME_WAIT (SO_REUSEADDR). To determine which scenario, try to
  577. # connect.
  578. try:
  579. s.connect(("127.0.0.1", port))
  580. # If we get here, we were able to connect to something, which means
  581. # that the port is still bound.
  582. return True
  583. except:
  584. # An exception means that we couldn't connect, so a server probably
  585. # isn't still running on the port.
  586. pass
  587. finally:
  588. s.close()
  589. return False
  590. ############################################################
  591. # End __is_port_bound
  592. ############################################################
  593. def __forciblyEndPortBoundProcesses(self, test_port, out, err):
  594. p = subprocess.Popen(['sudo', 'netstat', '-lnp'], stdout=subprocess.PIPE)
  595. out, err = p.communicate()
  596. for line in out.splitlines():
  597. if 'tcp' in line:
  598. splitline = line.split()
  599. port = splitline[3].split(':')
  600. port = int(port[len(port) - 1].strip())
  601. if port > 6000:
  602. err.write(textwrap.dedent(
  603. """
  604. A port that shouldn't be open is open. See the following line for netstat output.
  605. {splitline}
  606. """.format(splitline=splitline)))
  607. err.flush()
  608. if port == test_port:
  609. try:
  610. pid = splitline[6].split('/')[0].strip()
  611. ps = subprocess.Popen(['ps','p',pid], stdout=subprocess.PIPE)
  612. # Store some info about this process
  613. proc = ps.communicate()
  614. os.kill(int(pid), 15)
  615. # Sleep for 10 sec; kill can be finicky
  616. time.sleep(10)
  617. # Check that PID again
  618. ps = subprocess.Popen(['ps','p',pid], stdout=subprocess.PIPE)
  619. dead = ps.communicate()
  620. if dead in proc:
  621. os.kill(int(pid), 9)
  622. except OSError:
  623. out.write( textwrap.dedent("""
  624. -----------------------------------------------------
  625. Error: Could not kill pid {pid}
  626. -----------------------------------------------------
  627. """.format(pid=str(pid))) )
  628. # This is okay; likely we killed a parent that ended
  629. # up automatically killing this before we could.
  630. ############################################################
  631. # __parse_results
  632. # Ensures that the system has all necessary software to run
  633. # the tests. This does not include that software for the individual
  634. # test, but covers software such as curl and weighttp that
  635. # are needed.
  636. ############################################################
  637. def __parse_results(self, tests):
  638. # Run the method to get the commmit count of each framework.
  639. self.__count_commits()
  640. # Call the method which counts the sloc for each framework
  641. self.__count_sloc()
  642. # Time to create parsed files
  643. # Aggregate JSON file
  644. with open(os.path.join(self.full_results_directory(), "results.json"), "w") as f:
  645. f.write(json.dumps(self.results, indent=2))
  646. ############################################################
  647. # End __parse_results
  648. ############################################################
  649. #############################################################
  650. # __count_sloc
  651. #############################################################
  652. def __count_sloc(self):
  653. frameworks = gather_frameworks(include=self.test,
  654. exclude=self.exclude, benchmarker=self)
  655. jsonResult = {}
  656. for framework, testlist in frameworks.iteritems():
  657. if not os.path.exists(os.path.join(testlist[0].directory, "source_code")):
  658. logging.warn("Cannot count lines of code for %s - no 'source_code' file", framework)
  659. continue
  660. # Unfortunately the source_code files use lines like
  661. # ./cpoll_cppsp/www/fortune_old instead of
  662. # ./www/fortune_old
  663. # so we have to back our working dir up one level
  664. wd = os.path.dirname(testlist[0].directory)
  665. try:
  666. command = "cloc --list-file=%s/source_code --yaml" % testlist[0].directory
  667. # Find the last instance of the word 'code' in the yaml output. This should
  668. # be the line count for the sum of all listed files or just the line count
  669. # for the last file in the case where there's only one file listed.
  670. command = command + "| grep code | tail -1 | cut -d: -f 2"
  671. logging.debug("Running \"%s\" (cwd=%s)", command, wd)
  672. lineCount = subprocess.check_output(command, cwd=wd, shell=True)
  673. jsonResult[framework] = int(lineCount)
  674. except subprocess.CalledProcessError:
  675. continue
  676. except ValueError as ve:
  677. logging.warn("Unable to get linecount for %s due to error '%s'", framework, ve)
  678. self.results['rawData']['slocCounts'] = jsonResult
  679. ############################################################
  680. # End __count_sloc
  681. ############################################################
  682. ############################################################
  683. # __count_commits
  684. #
  685. ############################################################
  686. def __count_commits(self):
  687. frameworks = gather_frameworks(include=self.test,
  688. exclude=self.exclude, benchmarker=self)
  689. def count_commit(directory, jsonResult):
  690. command = "git rev-list HEAD -- " + directory + " | sort -u | wc -l"
  691. try:
  692. commitCount = subprocess.check_output(command, shell=True)
  693. jsonResult[framework] = int(commitCount)
  694. except subprocess.CalledProcessError:
  695. pass
  696. # Because git can be slow when run in large batches, this
  697. # calls git up to 4 times in parallel. Normal improvement is ~3-4x
  698. # in my trials, or ~100 seconds down to ~25
  699. # This is safe to parallelize as long as each thread only
  700. # accesses one key in the dictionary
  701. threads = []
  702. jsonResult = {}
  703. t1 = datetime.now()
  704. for framework, testlist in frameworks.iteritems():
  705. directory = testlist[0].directory
  706. t = threading.Thread(target=count_commit, args=(directory,jsonResult))
  707. t.start()
  708. threads.append(t)
  709. # Git has internal locks, full parallel will just cause contention
  710. # and slowness, so we rate-limit a bit
  711. if len(threads) >= 4:
  712. threads[0].join()
  713. threads.remove(threads[0])
  714. # Wait for remaining threads
  715. for t in threads:
  716. t.join()
  717. t2 = datetime.now()
  718. # print "Took %s seconds " % (t2 - t1).seconds
  719. self.results['rawData']['commitCounts'] = jsonResult
  720. self.commits = jsonResult
  721. ############################################################
  722. # End __count_commits
  723. ############################################################
  724. ############################################################
  725. # __write_intermediate_results
  726. ############################################################
  727. def __write_intermediate_results(self,test_name,status_message):
  728. try:
  729. self.results["completed"][test_name] = status_message
  730. with open(os.path.join(self.latest_results_directory, 'results.json'), 'w') as f:
  731. f.write(json.dumps(self.results, indent=2))
  732. except (IOError):
  733. logging.error("Error writing results.json")
  734. ############################################################
  735. # End __write_intermediate_results
  736. ############################################################
  737. def __load_results(self):
  738. try:
  739. with open(os.path.join(self.latest_results_directory, 'results.json')) as f:
  740. self.results = json.load(f)
  741. except (ValueError, IOError):
  742. pass
  743. ############################################################
  744. # __finish
  745. ############################################################
  746. def __finish(self):
  747. tests = self.__gather_tests
  748. # Normally you don't have to use Fore.BLUE before each line, but
  749. # Travis-CI seems to reset color codes on newline (see travis-ci/travis-ci#2692)
  750. # or stream flush, so we have to ensure that the color code is printed repeatedly
  751. prefix = Fore.CYAN
  752. for line in header("Verification Summary", top='=', bottom='').split('\n'):
  753. print prefix + line
  754. for test in tests:
  755. print prefix + "| Test: %s" % test.name
  756. if test.name in self.results['verify'].keys():
  757. for test_type, result in self.results['verify'][test.name].iteritems():
  758. if result.upper() == "PASS":
  759. color = Fore.GREEN
  760. elif result.upper() == "WARN":
  761. color = Fore.YELLOW
  762. else:
  763. color = Fore.RED
  764. print prefix + "| " + test_type.ljust(11) + ' : ' + color + result.upper()
  765. else:
  766. print prefix + "| " + Fore.RED + "NO RESULTS (Did framework launch?)"
  767. print prefix + header('', top='', bottom='=') + Style.RESET_ALL
  768. print "Time to complete: " + str(int(time.time() - self.start_time)) + " seconds"
  769. print "Results are saved in " + os.path.join(self.result_directory, self.timestamp)
  770. ############################################################
  771. # End __finish
  772. ############################################################
  773. ##########################################################################################
  774. # Constructor
  775. ##########################################################################################
  776. ############################################################
  777. # Initialize the benchmarker. The args are the arguments
  778. # parsed via argparser.
  779. ############################################################
  780. def __init__(self, args):
  781. # Map type strings to their objects
  782. types = dict()
  783. types['json'] = JsonTestType()
  784. types['db'] = DBTestType()
  785. types['query'] = QueryTestType()
  786. types['fortune'] = FortuneTestType()
  787. types['update'] = UpdateTestType()
  788. types['plaintext'] = PlaintextTestType()
  789. # Turn type into a map instead of a string
  790. if args['type'] == 'all':
  791. args['types'] = types
  792. else:
  793. args['types'] = { args['type'] : types[args['type']] }
  794. del args['type']
  795. self.__dict__.update(args)
  796. # pprint(self.__dict__)
  797. self.start_time = time.time()
  798. self.run_test_timeout_seconds = 3600
  799. # setup logging
  800. logging.basicConfig(stream=sys.stderr, level=logging.INFO)
  801. # setup some additional variables
  802. if self.database_user == None: self.database_user = self.client_user
  803. if self.database_host == None: self.database_host = self.client_host
  804. if self.database_identity_file == None: self.database_identity_file = self.client_identity_file
  805. # Remember root directory
  806. self.fwroot = setup_util.get_fwroot()
  807. # setup results and latest_results directories
  808. self.result_directory = os.path.join("results", self.name)
  809. self.latest_results_directory = self.latest_results_directory()
  810. if self.parse != None:
  811. self.timestamp = self.parse
  812. else:
  813. self.timestamp = time.strftime("%Y%m%d%H%M%S", time.localtime())
  814. # Load the latest data
  815. #self.latest = None
  816. #try:
  817. # with open('toolset/benchmark/latest.json', 'r') as f:
  818. # # Load json file into config object
  819. # self.latest = json.load(f)
  820. # logging.info("toolset/benchmark/latest.json loaded to self.latest")
  821. # logging.debug("contents of latest.json: " + str(json.dumps(self.latest)))
  822. #except IOError:
  823. # logging.warn("IOError on attempting to read toolset/benchmark/latest.json")
  824. #
  825. #self.results = None
  826. #try:
  827. # if self.latest != None and self.name in self.latest.keys():
  828. # with open(os.path.join(self.result_directory, str(self.latest[self.name]), 'results.json'), 'r') as f:
  829. # # Load json file into config object
  830. # self.results = json.load(f)
  831. #except IOError:
  832. # pass
  833. self.results = None
  834. try:
  835. with open(os.path.join(self.latest_results_directory, 'results.json'), 'r') as f:
  836. #Load json file into results object
  837. self.results = json.load(f)
  838. except IOError:
  839. logging.warn("results.json for test %s not found.",self.name)
  840. if self.results == None:
  841. self.results = dict()
  842. self.results['name'] = self.name
  843. self.results['concurrencyLevels'] = self.concurrency_levels
  844. self.results['queryIntervals'] = self.query_levels
  845. self.results['frameworks'] = [t.name for t in self.__gather_tests]
  846. self.results['duration'] = self.duration
  847. self.results['rawData'] = dict()
  848. self.results['rawData']['json'] = dict()
  849. self.results['rawData']['db'] = dict()
  850. self.results['rawData']['query'] = dict()
  851. self.results['rawData']['fortune'] = dict()
  852. self.results['rawData']['update'] = dict()
  853. self.results['rawData']['plaintext'] = dict()
  854. self.results['completed'] = dict()
  855. self.results['succeeded'] = dict()
  856. self.results['succeeded']['json'] = []
  857. self.results['succeeded']['db'] = []
  858. self.results['succeeded']['query'] = []
  859. self.results['succeeded']['fortune'] = []
  860. self.results['succeeded']['update'] = []
  861. self.results['succeeded']['plaintext'] = []
  862. self.results['failed'] = dict()
  863. self.results['failed']['json'] = []
  864. self.results['failed']['db'] = []
  865. self.results['failed']['query'] = []
  866. self.results['failed']['fortune'] = []
  867. self.results['failed']['update'] = []
  868. self.results['failed']['plaintext'] = []
  869. self.results['verify'] = dict()
  870. else:
  871. #for x in self.__gather_tests():
  872. # if x.name not in self.results['frameworks']:
  873. # self.results['frameworks'] = self.results['frameworks'] + [x.name]
  874. # Always overwrite framework list
  875. self.results['frameworks'] = [t.name for t in self.__gather_tests]
  876. # Setup the ssh command string
  877. self.database_ssh_string = "ssh -T -o StrictHostKeyChecking=no " + self.database_user + "@" + self.database_host
  878. self.client_ssh_string = "ssh -T -o StrictHostKeyChecking=no " + self.client_user + "@" + self.client_host
  879. if self.database_identity_file != None:
  880. self.database_ssh_string = self.database_ssh_string + " -i " + self.database_identity_file
  881. if self.client_identity_file != None:
  882. self.client_ssh_string = self.client_ssh_string + " -i " + self.client_identity_file
  883. if self.install is not None:
  884. install = Installer(self, self.install_strategy)
  885. install.install_software()
  886. ############################################################
  887. # End __init__
  888. ############################################################