benchmarker.py 40 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007
  1. from setup.linux.installer import Installer
  2. from setup.linux import setup_util
  3. from benchmark import framework_test
  4. from utils import header
  5. from utils import gather_tests
  6. from utils import gather_frameworks
  7. import os
  8. import json
  9. import subprocess
  10. import traceback
  11. import time
  12. import pprint
  13. import csv
  14. import sys
  15. import logging
  16. import socket
  17. import threading
  18. from multiprocessing import Process
  19. from datetime import datetime
  20. # Cross-platform colored text
  21. from colorama import Fore, Back, Style
  22. # Text-based progress indicators
  23. import progressbar
  24. class Benchmarker:
  25. ##########################################################################################
  26. # Public methods
  27. ##########################################################################################
  28. ############################################################
  29. # Prints all the available tests
  30. ############################################################
  31. def run_list_tests(self):
  32. all_tests = self.__gather_tests
  33. for test in all_tests:
  34. print test.name
  35. self.__finish()
  36. ############################################################
  37. # End run_list_tests
  38. ############################################################
  39. ############################################################
  40. # Prints the metadata for all the available tests
  41. ############################################################
  42. def run_list_test_metadata(self):
  43. all_tests = self.__gather_tests
  44. all_tests_json = json.dumps(map(lambda test: {
  45. "name": test.name,
  46. "approach": test.approach,
  47. "classification": test.classification,
  48. "database": test.database,
  49. "framework": test.framework,
  50. "language": test.language,
  51. "orm": test.orm,
  52. "platform": test.platform,
  53. "webserver": test.webserver,
  54. "os": test.os,
  55. "database_os": test.database_os,
  56. "display_name": test.display_name,
  57. "notes": test.notes,
  58. "versus": test.versus
  59. }, all_tests))
  60. with open(os.path.join(self.full_results_directory(), "test_metadata.json"), "w") as f:
  61. f.write(all_tests_json)
  62. self.__finish()
  63. ############################################################
  64. # End run_list_test_metadata
  65. ############################################################
  66. ############################################################
  67. # parse_timestamp
  68. # Re-parses the raw data for a given timestamp
  69. ############################################################
  70. def parse_timestamp(self):
  71. all_tests = self.__gather_tests
  72. for test in all_tests:
  73. test.parse_all()
  74. self.__parse_results(all_tests)
  75. self.__finish()
  76. ############################################################
  77. # End parse_timestamp
  78. ############################################################
  79. ############################################################
  80. # Run the tests:
  81. # This process involves setting up the client/server machines
  82. # with any necessary change. Then going through each test,
  83. # running their setup script, verifying the URLs, and
  84. # running benchmarks against them.
  85. ############################################################
  86. def run(self):
  87. ##########################
  88. # Get a list of all known
  89. # tests that we can run.
  90. ##########################
  91. all_tests = self.__gather_tests
  92. ##########################
  93. # Setup client/server
  94. ##########################
  95. print header("Preparing Server, Database, and Client ...", top='=', bottom='=')
  96. self.__setup_server()
  97. self.__setup_database()
  98. self.__setup_client()
  99. ## Check if wrk (and wrk-pipeline) is installed and executable, if not, raise an exception
  100. #if not (os.access("/usr/local/bin/wrk", os.X_OK) and os.access("/usr/local/bin/wrk-pipeline", os.X_OK)):
  101. # raise Exception("wrk and/or wrk-pipeline are not properly installed. Not running tests.")
  102. ##########################
  103. # Run tests
  104. ##########################
  105. print header("Running Tests...", top='=', bottom='=')
  106. result = self.__run_tests(all_tests)
  107. ##########################
  108. # Parse results
  109. ##########################
  110. if self.mode == "benchmark":
  111. print header("Parsing Results ...", top='=', bottom='=')
  112. self.__parse_results(all_tests)
  113. self.__finish()
  114. return result
  115. ############################################################
  116. # End run
  117. ############################################################
  118. ############################################################
  119. # database_sftp_string(batch_file)
  120. # generates a fully qualified URL for sftp to database
  121. ############################################################
  122. def database_sftp_string(self, batch_file):
  123. sftp_string = "sftp -oStrictHostKeyChecking=no "
  124. if batch_file != None: sftp_string += " -b " + batch_file + " "
  125. if self.database_identity_file != None:
  126. sftp_string += " -i " + self.database_identity_file + " "
  127. return sftp_string + self.database_user + "@" + self.database_host
  128. ############################################################
  129. # End database_sftp_string
  130. ############################################################
  131. ############################################################
  132. # client_sftp_string(batch_file)
  133. # generates a fully qualified URL for sftp to client
  134. ############################################################
  135. def client_sftp_string(self, batch_file):
  136. sftp_string = "sftp -oStrictHostKeyChecking=no "
  137. if batch_file != None: sftp_string += " -b " + batch_file + " "
  138. if self.client_identity_file != None:
  139. sftp_string += " -i " + self.client_identity_file + " "
  140. return sftp_string + self.client_user + "@" + self.client_host
  141. ############################################################
  142. # End client_sftp_string
  143. ############################################################
  144. ############################################################
  145. # generate_url(url, port)
  146. # generates a fully qualified URL for accessing a test url
  147. ############################################################
  148. def generate_url(self, url, port):
  149. return self.server_host + ":" + str(port) + url
  150. ############################################################
  151. # End generate_url
  152. ############################################################
  153. ############################################################
  154. # get_output_file(test_name, test_type)
  155. # returns the output file name for this test_name and
  156. # test_type timestamp/test_type/test_name/raw
  157. ############################################################
  158. def get_output_file(self, test_name, test_type):
  159. return os.path.join(self.result_directory, self.timestamp, test_type, test_name, "raw")
  160. ############################################################
  161. # End get_output_file
  162. ############################################################
  163. ############################################################
  164. # output_file(test_name, test_type)
  165. # returns the output file for this test_name and test_type
  166. # timestamp/test_type/test_name/raw
  167. ############################################################
  168. def output_file(self, test_name, test_type):
  169. path = self.get_output_file(test_name, test_type)
  170. try:
  171. os.makedirs(os.path.dirname(path))
  172. except OSError:
  173. pass
  174. return path
  175. ############################################################
  176. # End output_file
  177. ############################################################
  178. ############################################################
  179. # get_stats_file(test_name, test_type)
  180. # returns the stats file name for this test_name and
  181. # test_type timestamp/test_type/test_name/raw
  182. ############################################################
  183. def get_stats_file(self, test_name, test_type):
  184. return os.path.join(self.result_directory, self.timestamp, test_type, test_name, "stats")
  185. ############################################################
  186. # End get_stats_file
  187. ############################################################
  188. ############################################################
  189. # stats_file(test_name, test_type)
  190. # returns the stats file for this test_name and test_type
  191. # timestamp/test_type/test_name/raw
  192. ############################################################
  193. def stats_file(self, test_name, test_type):
  194. path = self.get_stats_file(test_name, test_type)
  195. try:
  196. os.makedirs(os.path.dirname(path))
  197. except OSError:
  198. pass
  199. return path
  200. ############################################################
  201. # End stats_file
  202. ############################################################
  203. ############################################################
  204. # full_results_directory
  205. ############################################################
  206. def full_results_directory(self):
  207. path = os.path.join(self.result_directory, self.timestamp)
  208. try:
  209. os.makedirs(path)
  210. except OSError:
  211. pass
  212. return path
  213. ############################################################
  214. # End full_results_directory
  215. ############################################################
  216. ############################################################
  217. # Latest intermediate results dirctory
  218. ############################################################
  219. def latest_results_directory(self):
  220. path = os.path.join(self.result_directory,"latest")
  221. try:
  222. os.makedirs(path)
  223. except OSError:
  224. pass
  225. return path
  226. ############################################################
  227. # report_verify_results
  228. # Used by FrameworkTest to add verification details to our results
  229. #
  230. # TODO: Technically this is an IPC violation - we are accessing
  231. # the parent process' memory from the child process
  232. ############################################################
  233. def report_verify_results(self, framework, test, result):
  234. if framework.name not in self.results['verify'].keys():
  235. self.results['verify'][framework.name] = dict()
  236. self.results['verify'][framework.name][test] = result
  237. ############################################################
  238. # report_benchmark_results
  239. # Used by FrameworkTest to add benchmark data to this
  240. #
  241. # TODO: Technically this is an IPC violation - we are accessing
  242. # the parent process' memory from the child process
  243. ############################################################
  244. def report_benchmark_results(self, framework, test, results):
  245. if test not in self.results['rawData'].keys():
  246. self.results['rawData'][test] = dict()
  247. # If results has a size from the parse, then it succeeded.
  248. if results:
  249. self.results['rawData'][test][framework.name] = results
  250. # This may already be set for single-tests
  251. if framework.name not in self.results['succeeded'][test]:
  252. self.results['succeeded'][test].append(framework.name)
  253. else:
  254. # This may already be set for single-tests
  255. if framework.name not in self.results['failed'][test]:
  256. self.results['failed'][test].append(framework.name)
  257. ############################################################
  258. # End report_results
  259. ############################################################
  260. ##########################################################################################
  261. # Private methods
  262. ##########################################################################################
  263. ############################################################
  264. # Gathers all the tests
  265. ############################################################
  266. @property
  267. def __gather_tests(self):
  268. tests = gather_tests(include=self.test,
  269. exclude=self.exclude,
  270. benchmarker=self)
  271. # If the tests have been interrupted somehow, then we want to resume them where we left
  272. # off, rather than starting from the beginning
  273. if os.path.isfile('current_benchmark.txt'):
  274. with open('current_benchmark.txt', 'r') as interrupted_benchmark:
  275. interrupt_bench = interrupted_benchmark.read().strip()
  276. for index, atest in enumerate(tests):
  277. if atest.name == interrupt_bench:
  278. tests = tests[index:]
  279. break
  280. return tests
  281. ############################################################
  282. # End __gather_tests
  283. ############################################################
  284. ############################################################
  285. # Makes any necessary changes to the server that should be
  286. # made before running the tests. This involves setting kernal
  287. # settings to allow for more connections, or more file
  288. # descriptiors
  289. #
  290. # http://redmine.lighttpd.net/projects/weighttp/wiki#Troubleshooting
  291. ############################################################
  292. def __setup_server(self):
  293. try:
  294. if os.name == 'nt':
  295. return True
  296. subprocess.check_call(["sudo","bash","-c","cd /sys/devices/system/cpu; ls -d cpu[0-9]*|while read x; do echo performance > $x/cpufreq/scaling_governor; done"])
  297. subprocess.check_call("sudo sysctl -w net.ipv4.tcp_max_syn_backlog=65535".rsplit(" "))
  298. subprocess.check_call("sudo sysctl -w net.core.somaxconn=65535".rsplit(" "))
  299. subprocess.check_call("sudo -s ulimit -n 65535".rsplit(" "))
  300. subprocess.check_call("sudo sysctl net.ipv4.tcp_tw_reuse=1".rsplit(" "))
  301. subprocess.check_call("sudo sysctl net.ipv4.tcp_tw_recycle=1".rsplit(" "))
  302. subprocess.check_call("sudo sysctl -w kernel.shmmax=134217728".rsplit(" "))
  303. subprocess.check_call("sudo sysctl -w kernel.shmall=2097152".rsplit(" "))
  304. except subprocess.CalledProcessError:
  305. return False
  306. ############################################################
  307. # End __setup_server
  308. ############################################################
  309. ############################################################
  310. # Makes any necessary changes to the database machine that
  311. # should be made before running the tests. Is very similar
  312. # to the server setup, but may also include database specific
  313. # changes.
  314. ############################################################
  315. def __setup_database(self):
  316. p = subprocess.Popen(self.database_ssh_string, stdin=subprocess.PIPE, shell=True)
  317. p.communicate("""
  318. sudo sysctl -w net.ipv4.tcp_max_syn_backlog=65535
  319. sudo sysctl -w net.core.somaxconn=65535
  320. sudo -s ulimit -n 65535
  321. sudo sysctl net.ipv4.tcp_tw_reuse=1
  322. sudo sysctl net.ipv4.tcp_tw_recycle=1
  323. sudo sysctl -w kernel.shmmax=2147483648
  324. sudo sysctl -w kernel.shmall=2097152
  325. """)
  326. ############################################################
  327. # End __setup_database
  328. ############################################################
  329. ############################################################
  330. # Makes any necessary changes to the client machine that
  331. # should be made before running the tests. Is very similar
  332. # to the server setup, but may also include client specific
  333. # changes.
  334. ############################################################
  335. def __setup_client(self):
  336. p = subprocess.Popen(self.client_ssh_string, stdin=subprocess.PIPE, shell=True)
  337. p.communicate("""
  338. sudo sysctl -w net.ipv4.tcp_max_syn_backlog=65535
  339. sudo sysctl -w net.core.somaxconn=65535
  340. sudo -s ulimit -n 65535
  341. sudo sysctl net.ipv4.tcp_tw_reuse=1
  342. sudo sysctl net.ipv4.tcp_tw_recycle=1
  343. sudo sysctl -w kernel.shmmax=2147483648
  344. sudo sysctl -w kernel.shmall=2097152
  345. """)
  346. ############################################################
  347. # End __setup_client
  348. ############################################################
  349. ############################################################
  350. # __run_tests
  351. #
  352. # 2013-10-02 ASB Calls each test passed in tests to
  353. # __run_test in a separate process. Each
  354. # test is given a set amount of time and if
  355. # kills the child process (and subsequently
  356. # all of its child processes). Uses
  357. # multiprocessing module.
  358. ############################################################
  359. def __run_tests(self, tests):
  360. if len(tests) == 0:
  361. return 0
  362. logging.debug("Start __run_tests.")
  363. logging.debug("__name__ = %s",__name__)
  364. error_happened = False
  365. if self.os.lower() == 'windows':
  366. logging.debug("Executing __run_tests on Windows")
  367. for test in tests:
  368. with open('current_benchmark.txt', 'w') as benchmark_resume_file:
  369. benchmark_resume_file.write(test.name)
  370. if self.__run_test(test) != 0:
  371. error_happened = True
  372. else:
  373. logging.debug("Executing __run_tests on Linux")
  374. # Setup a nice progressbar and ETA indicator
  375. widgets = [self.mode, ': ', progressbar.Percentage(),
  376. ' ', progressbar.Bar(),
  377. ' Rough ', progressbar.ETA()]
  378. pbar = progressbar.ProgressBar(widgets=widgets, maxval=len(tests)).start()
  379. pbar_test = 0
  380. # These features do not work on Windows
  381. for test in tests:
  382. pbar.update(pbar_test)
  383. pbar_test = pbar_test + 1
  384. if __name__ == 'benchmark.benchmarker':
  385. print header("Running Test: %s" % test.name)
  386. with open('current_benchmark.txt', 'w') as benchmark_resume_file:
  387. benchmark_resume_file.write(test.name)
  388. test_process = Process(target=self.__run_test, name="Test Runner (%s)" % test.name, args=(test,))
  389. test_process.start()
  390. test_process.join(self.run_test_timeout_seconds)
  391. self.__load_results() # Load intermediate result from child process
  392. if(test_process.is_alive()):
  393. logging.debug("Child process for {name} is still alive. Terminating.".format(name=test.name))
  394. self.__write_intermediate_results(test.name,"__run_test timeout (="+ str(self.run_test_timeout_seconds) + " seconds)")
  395. test_process.terminate()
  396. test_process.join()
  397. if test_process.exitcode != 0:
  398. error_happened = True
  399. pbar.finish()
  400. if os.path.isfile('current_benchmark.txt'):
  401. os.remove('current_benchmark.txt')
  402. logging.debug("End __run_tests.")
  403. if error_happened:
  404. return 1
  405. return 0
  406. ############################################################
  407. # End __run_tests
  408. ############################################################
  409. ############################################################
  410. # __run_test
  411. # 2013-10-02 ASB Previously __run_tests. This code now only
  412. # processes a single test.
  413. #
  414. # Ensures that the system has all necessary software to run
  415. # the tests. This does not include that software for the individual
  416. # test, but covers software such as curl and weighttp that
  417. # are needed.
  418. ############################################################
  419. def __run_test(self, test):
  420. # Used to capture return values
  421. def exit_with_code(code):
  422. if self.os.lower() == 'windows':
  423. return code
  424. else:
  425. sys.exit(code)
  426. try:
  427. os.makedirs(os.path.join(self.latest_results_directory, 'logs', "{name}".format(name=test.name)))
  428. except:
  429. pass
  430. with open(os.path.join(self.latest_results_directory, 'logs', "{name}".format(name=test.name), 'out.txt'), 'w') as out, \
  431. open(os.path.join(self.latest_results_directory, 'logs', "{name}".format(name=test.name), 'err.txt'), 'w') as err:
  432. if test.os.lower() != self.os.lower() or test.database_os.lower() != self.database_os.lower():
  433. out.write("OS or Database OS specified in benchmark_config does not match the current environment. Skipping.\n")
  434. return exit_with_code(0)
  435. # If the test is in the excludes list, we skip it
  436. if self.exclude != None and test.name in self.exclude:
  437. out.write("Test {name} has been added to the excludes list. Skipping.\n".format(name=test.name))
  438. return exit_with_code(0)
  439. # If the test does not contain an implementation of the current test-type, skip it
  440. if self.type != 'all' and not test.contains_type(self.type):
  441. out.write("Test type {type} does not contain an implementation of the current test-type. Skipping.\n".format(type=self.type))
  442. return exit_with_code(0)
  443. out.write("test.os.lower() = {os} test.database_os.lower() = {dbos}\n".format(os=test.os.lower(),dbos=test.database_os.lower()))
  444. out.write("self.results['frameworks'] != None: {val}\n".format(val=str(self.results['frameworks'] != None)))
  445. out.write("test.name: {name}\n".format(name=str(test.name)))
  446. out.write("self.results['completed']: {completed}\n".format(completed=str(self.results['completed'])))
  447. if self.results['frameworks'] != None and test.name in self.results['completed']:
  448. out.write('Framework {name} found in latest saved data. Skipping.\n'.format(name=str(test.name)))
  449. return exit_with_code(1)
  450. out.flush()
  451. out.write(header("Beginning %s" % test.name, top='='))
  452. out.flush()
  453. ##########################
  454. # Start this test
  455. ##########################
  456. out.write(header("Starting %s" % test.name))
  457. out.flush()
  458. try:
  459. if test.requires_database():
  460. p = subprocess.Popen(self.database_ssh_string, stdin=subprocess.PIPE, stdout=out, stderr=err, shell=True)
  461. p.communicate("""
  462. sudo restart mysql
  463. sudo restart mongodb
  464. sudo service redis-server restart
  465. sudo /etc/init.d/postgresql restart
  466. """)
  467. time.sleep(10)
  468. if self.__is_port_bound(test.port):
  469. self.__write_intermediate_results(test.name, "port " + str(test.port) + " is not available before start")
  470. err.write(header("Error: Port %s is not available, cannot start %s" % (test.port, test.name)))
  471. err.flush()
  472. return exit_with_code(1)
  473. result = test.start(out, err)
  474. if result != 0:
  475. test.stop(out, err)
  476. time.sleep(5)
  477. err.write( "ERROR: Problem starting {name}\n".format(name=test.name) )
  478. err.write(header("Stopped %s" % test.name))
  479. err.flush()
  480. self.__write_intermediate_results(test.name,"<setup.py>#start() returned non-zero")
  481. return exit_with_code(1)
  482. time.sleep(self.sleep)
  483. ##########################
  484. # Verify URLs
  485. ##########################
  486. passed_verify = test.verify_urls(out, err)
  487. out.flush()
  488. err.flush()
  489. ##########################
  490. # Benchmark this test
  491. ##########################
  492. if self.mode == "benchmark":
  493. out.write(header("Benchmarking %s" % test.name))
  494. out.flush()
  495. test.benchmark(out, err)
  496. out.flush()
  497. err.flush()
  498. ##########################
  499. # Stop this test
  500. ##########################
  501. out.write(header("Stopping %s" % test.name))
  502. out.flush()
  503. test.stop(out, err)
  504. out.flush()
  505. err.flush()
  506. time.sleep(5)
  507. if self.__is_port_bound(test.port):
  508. err.write("Port %s was not freed. Attempting to free it." % (test.port, ))
  509. err.flush()
  510. self.__forciblyEndPortBoundProcesses(test.port, out, err)
  511. time.sleep(5)
  512. if self.__is_port_bound(test.port):
  513. err.write(header("Error: Port %s was not released by stop %s" % (test.port, test.name)))
  514. err.flush()
  515. self.__write_intermediate_results(test.name, "port " + str(test.port) + " was not released by stop")
  516. return exit_with_code(1)
  517. out.write(header("Stopped %s" % test.name))
  518. out.flush()
  519. time.sleep(5)
  520. ##########################################################
  521. # Save results thus far into toolset/benchmark/latest.json
  522. ##########################################################
  523. out.write(header("Saving results through %s" % test.name))
  524. out.flush()
  525. self.__write_intermediate_results(test.name,time.strftime("%Y%m%d%H%M%S", time.localtime()))
  526. if self.mode == "verify" and not passed_verify:
  527. print "Failed verify!"
  528. return exit_with_code(1)
  529. except (OSError, IOError, subprocess.CalledProcessError) as e:
  530. self.__write_intermediate_results(test.name,"<setup.py> raised an exception")
  531. err.write(header("Subprocess Error %s" % test.name))
  532. traceback.print_exc(file=err)
  533. err.flush()
  534. try:
  535. test.stop(out, err)
  536. except (subprocess.CalledProcessError) as e:
  537. self.__write_intermediate_results(test.name,"<setup.py>#stop() raised an error")
  538. err.write(header("Subprocess Error: Test .stop() raised exception %s" % test.name))
  539. traceback.print_exc(file=err)
  540. err.flush()
  541. out.close()
  542. err.close()
  543. return exit_with_code(1)
  544. # TODO - subprocess should not catch this exception!
  545. # Parent process should catch it and cleanup/exit
  546. except (KeyboardInterrupt) as e:
  547. test.stop(out, err)
  548. out.write(header("Cleaning up..."))
  549. out.flush()
  550. self.__finish()
  551. sys.exit(1)
  552. out.close()
  553. err.close()
  554. return exit_with_code(0)
  555. ############################################################
  556. # End __run_tests
  557. ############################################################
  558. ############################################################
  559. # __is_port_bound
  560. # Check if the requested port is available. If it
  561. # isn't available, then a previous test probably didn't
  562. # shutdown properly.
  563. ############################################################
  564. def __is_port_bound(self, port):
  565. s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
  566. try:
  567. # Try to bind to all IP addresses, this port
  568. s.bind(("", port))
  569. # If we get here, we were able to bind successfully,
  570. # which means the port is free.
  571. except:
  572. # If we get an exception, it might be because the port is still bound
  573. # which would be bad, or maybe it is a privileged port (<1024) and we
  574. # are not running as root, or maybe the server is gone, but sockets are
  575. # still in TIME_WAIT (SO_REUSEADDR). To determine which scenario, try to
  576. # connect.
  577. try:
  578. s.connect(("127.0.0.1", port))
  579. # If we get here, we were able to connect to something, which means
  580. # that the port is still bound.
  581. return True
  582. except:
  583. # An exception means that we couldn't connect, so a server probably
  584. # isn't still running on the port.
  585. pass
  586. finally:
  587. s.close()
  588. return False
  589. ############################################################
  590. # End __is_port_bound
  591. ############################################################
  592. def __forciblyEndPortBoundProcesses(self, test_port, out, err):
  593. p = subprocess.Popen(['sudo', 'netstat', '-lnp'], stdout=subprocess.PIPE)
  594. out, err = p.communicate()
  595. for line in out.splitlines():
  596. if 'tcp' in line:
  597. splitline = line.split()
  598. port = splitline[3].split(':')
  599. port = int(port[len(port) - 1].strip())
  600. if port > 6000:
  601. err.write(textwrap.dedent(
  602. """
  603. A port that shouldn't be open is open. See the following line for netstat output.
  604. {splitline}
  605. """.format(splitline=splitline)))
  606. err.flush()
  607. if port == test_port:
  608. try:
  609. pid = splitline[6].split('/')[0].strip()
  610. ps = subprocess.Popen(['ps','p',pid], stdout=subprocess.PIPE)
  611. # Store some info about this process
  612. proc = ps.communicate()
  613. os.kill(int(pid), 15)
  614. # Sleep for 10 sec; kill can be finicky
  615. time.sleep(10)
  616. # Check that PID again
  617. ps = subprocess.Popen(['ps','p',pid], stdout=subprocess.PIPE)
  618. dead = ps.communicate()
  619. if dead in proc:
  620. os.kill(int(pid), 9)
  621. except OSError:
  622. out.write( textwrap.dedent("""
  623. -----------------------------------------------------
  624. Error: Could not kill pid {pid}
  625. -----------------------------------------------------
  626. """.format(pid=str(pid))) )
  627. # This is okay; likely we killed a parent that ended
  628. # up automatically killing this before we could.
  629. ############################################################
  630. # __parse_results
  631. # Ensures that the system has all necessary software to run
  632. # the tests. This does not include that software for the individual
  633. # test, but covers software such as curl and weighttp that
  634. # are needed.
  635. ############################################################
  636. def __parse_results(self, tests):
  637. # Run the method to get the commmit count of each framework.
  638. self.__count_commits()
  639. # Call the method which counts the sloc for each framework
  640. self.__count_sloc()
  641. # Time to create parsed files
  642. # Aggregate JSON file
  643. with open(os.path.join(self.full_results_directory(), "results.json"), "w") as f:
  644. f.write(json.dumps(self.results, indent=2))
  645. ############################################################
  646. # End __parse_results
  647. ############################################################
  648. #############################################################
  649. # __count_sloc
  650. #############################################################
  651. def __count_sloc(self):
  652. frameworks = gather_frameworks(include=self.test,
  653. exclude=self.exclude, benchmarker=self)
  654. jsonResult = {}
  655. for framework, testlist in frameworks.iteritems():
  656. # Unfortunately the source_code files use lines like
  657. # ./cpoll_cppsp/www/fortune_old instead of
  658. # ./www/fortune_old
  659. # so we have to back our working dir up one level
  660. wd = os.path.dirname(testlist[0].directory)
  661. try:
  662. command = "cloc --list-file=%s/source_code --yaml" % testlist[0].directory
  663. # Find the last instance of the word 'code' in the yaml output. This should
  664. # be the line count for the sum of all listed files or just the line count
  665. # for the last file in the case where there's only one file listed.
  666. command = command + "| grep code | tail -1 | cut -d: -f 2"
  667. lineCount = subprocess.check_output(command, cwd=wd, shell=True)
  668. jsonResult[framework] = int(lineCount)
  669. except subprocess.CalledProcessError:
  670. continue
  671. self.results['rawData']['slocCounts'] = jsonResult
  672. ############################################################
  673. # End __count_sloc
  674. ############################################################
  675. ############################################################
  676. # __count_commits
  677. #
  678. ############################################################
  679. def __count_commits(self):
  680. frameworks = gather_frameworks(include=self.test,
  681. exclude=self.exclude, benchmarker=self)
  682. def count_commit(directory, jsonResult):
  683. command = "git rev-list HEAD -- " + directory + " | sort -u | wc -l"
  684. try:
  685. commitCount = subprocess.check_output(command, shell=True)
  686. jsonResult[framework] = int(commitCount)
  687. except subprocess.CalledProcessError:
  688. pass
  689. # Because git can be slow when run in large batches, this
  690. # calls git up to 4 times in parallel. Normal improvement is ~3-4x
  691. # in my trials, or ~100 seconds down to ~25
  692. # This is safe to parallelize as long as each thread only
  693. # accesses one key in the dictionary
  694. threads = []
  695. jsonResult = {}
  696. t1 = datetime.now()
  697. for framework, testlist in frameworks.iteritems():
  698. directory = testlist[0].directory
  699. t = threading.Thread(target=count_commit, args=(directory,jsonResult))
  700. t.start()
  701. threads.append(t)
  702. # Git has internal locks, full parallel will just cause contention
  703. # and slowness, so we rate-limit a bit
  704. if len(threads) >= 4:
  705. threads[0].join()
  706. threads.remove(threads[0])
  707. # Wait for remaining threads
  708. for t in threads:
  709. t.join()
  710. t2 = datetime.now()
  711. # print "Took %s seconds " % (t2 - t1).seconds
  712. self.results['rawData']['commitCounts'] = jsonResult
  713. self.commits = jsonResult
  714. ############################################################
  715. # End __count_commits
  716. ############################################################
  717. ############################################################
  718. # __write_intermediate_results
  719. ############################################################
  720. def __write_intermediate_results(self,test_name,status_message):
  721. try:
  722. self.results["completed"][test_name] = status_message
  723. with open(os.path.join(self.latest_results_directory, 'results.json'), 'w') as f:
  724. f.write(json.dumps(self.results, indent=2))
  725. except (IOError):
  726. logging.error("Error writing results.json")
  727. ############################################################
  728. # End __write_intermediate_results
  729. ############################################################
  730. def __load_results(self):
  731. try:
  732. with open(os.path.join(self.latest_results_directory, 'results.json')) as f:
  733. self.results = json.load(f)
  734. except (ValueError, IOError):
  735. pass
  736. ############################################################
  737. # __finish
  738. ############################################################
  739. def __finish(self):
  740. tests = self.__gather_tests
  741. # Normally you don't have to use Fore.BLUE before each line, but
  742. # Travis-CI seems to reset color codes on newline (see travis-ci/travis-ci#2692)
  743. # or stream flush, so we have to ensure that the color code is printed repeatedly
  744. prefix = Fore.CYAN
  745. for line in header("Verification Summary", top='=', bottom='').split('\n'):
  746. print prefix + line
  747. for test in tests:
  748. print prefix + "| Test: %s" % test.name
  749. if test.name in self.results['verify'].keys():
  750. for test_type, result in self.results['verify'][test.name].iteritems():
  751. if result.upper() == "PASS":
  752. color = Fore.GREEN
  753. elif result.upper() == "WARN":
  754. color = Fore.YELLOW
  755. else:
  756. color = Fore.RED
  757. print prefix + "| " + test_type.ljust(11) + ' : ' + color + result.upper()
  758. else:
  759. print prefix + "| " + Fore.RED + "NO RESULTS (Did framework launch?)"
  760. print prefix + header('', top='', bottom='=') + Style.RESET_ALL
  761. print "Time to complete: " + str(int(time.time() - self.start_time)) + " seconds"
  762. print "Results are saved in " + os.path.join(self.result_directory, self.timestamp)
  763. ############################################################
  764. # End __finish
  765. ############################################################
  766. ##########################################################################################
  767. # Constructor
  768. ##########################################################################################
  769. ############################################################
  770. # Initialize the benchmarker. The args are the arguments
  771. # parsed via argparser.
  772. ############################################################
  773. def __init__(self, args):
  774. self.__dict__.update(args)
  775. self.start_time = time.time()
  776. self.run_test_timeout_seconds = 3600
  777. # setup logging
  778. logging.basicConfig(stream=sys.stderr, level=logging.INFO)
  779. # setup some additional variables
  780. if self.database_user == None: self.database_user = self.client_user
  781. if self.database_host == None: self.database_host = self.client_host
  782. if self.database_identity_file == None: self.database_identity_file = self.client_identity_file
  783. # Remember root directory
  784. self.fwroot = setup_util.get_fwroot()
  785. # setup results and latest_results directories
  786. self.result_directory = os.path.join("results", self.name)
  787. self.latest_results_directory = self.latest_results_directory()
  788. if self.parse != None:
  789. self.timestamp = self.parse
  790. else:
  791. self.timestamp = time.strftime("%Y%m%d%H%M%S", time.localtime())
  792. # Setup the concurrency levels array. This array goes from
  793. # starting_concurrency to max concurrency, doubling each time
  794. self.concurrency_levels = []
  795. concurrency = self.starting_concurrency
  796. while concurrency <= self.max_concurrency:
  797. self.concurrency_levels.append(concurrency)
  798. concurrency = concurrency * 2
  799. # Setup query interval array
  800. # starts at 1, and goes up to max_queries, using the query_interval
  801. self.query_intervals = []
  802. queries = 1
  803. while queries <= self.max_queries:
  804. self.query_intervals.append(queries)
  805. if queries == 1:
  806. queries = 0
  807. queries = queries + self.query_interval
  808. # Load the latest data
  809. #self.latest = None
  810. #try:
  811. # with open('toolset/benchmark/latest.json', 'r') as f:
  812. # # Load json file into config object
  813. # self.latest = json.load(f)
  814. # logging.info("toolset/benchmark/latest.json loaded to self.latest")
  815. # logging.debug("contents of latest.json: " + str(json.dumps(self.latest)))
  816. #except IOError:
  817. # logging.warn("IOError on attempting to read toolset/benchmark/latest.json")
  818. #
  819. #self.results = None
  820. #try:
  821. # if self.latest != None and self.name in self.latest.keys():
  822. # with open(os.path.join(self.result_directory, str(self.latest[self.name]), 'results.json'), 'r') as f:
  823. # # Load json file into config object
  824. # self.results = json.load(f)
  825. #except IOError:
  826. # pass
  827. self.results = None
  828. try:
  829. with open(os.path.join(self.latest_results_directory, 'results.json'), 'r') as f:
  830. #Load json file into results object
  831. self.results = json.load(f)
  832. except IOError:
  833. logging.warn("results.json for test %s not found.",self.name)
  834. if self.results == None:
  835. self.results = dict()
  836. self.results['name'] = self.name
  837. self.results['concurrencyLevels'] = self.concurrency_levels
  838. self.results['queryIntervals'] = self.query_intervals
  839. self.results['frameworks'] = [t.name for t in self.__gather_tests]
  840. self.results['duration'] = self.duration
  841. self.results['rawData'] = dict()
  842. self.results['rawData']['json'] = dict()
  843. self.results['rawData']['db'] = dict()
  844. self.results['rawData']['query'] = dict()
  845. self.results['rawData']['fortune'] = dict()
  846. self.results['rawData']['update'] = dict()
  847. self.results['rawData']['plaintext'] = dict()
  848. self.results['completed'] = dict()
  849. self.results['succeeded'] = dict()
  850. self.results['succeeded']['json'] = []
  851. self.results['succeeded']['db'] = []
  852. self.results['succeeded']['query'] = []
  853. self.results['succeeded']['fortune'] = []
  854. self.results['succeeded']['update'] = []
  855. self.results['succeeded']['plaintext'] = []
  856. self.results['failed'] = dict()
  857. self.results['failed']['json'] = []
  858. self.results['failed']['db'] = []
  859. self.results['failed']['query'] = []
  860. self.results['failed']['fortune'] = []
  861. self.results['failed']['update'] = []
  862. self.results['failed']['plaintext'] = []
  863. self.results['verify'] = dict()
  864. else:
  865. #for x in self.__gather_tests():
  866. # if x.name not in self.results['frameworks']:
  867. # self.results['frameworks'] = self.results['frameworks'] + [x.name]
  868. # Always overwrite framework list
  869. self.results['frameworks'] = [t.name for t in self.__gather_tests]
  870. # Setup the ssh command string
  871. self.database_ssh_string = "ssh -T -o StrictHostKeyChecking=no " + self.database_user + "@" + self.database_host
  872. self.client_ssh_string = "ssh -T -o StrictHostKeyChecking=no " + self.client_user + "@" + self.client_host
  873. if self.database_identity_file != None:
  874. self.database_ssh_string = self.database_ssh_string + " -i " + self.database_identity_file
  875. if self.client_identity_file != None:
  876. self.client_ssh_string = self.client_ssh_string + " -i " + self.client_identity_file
  877. if self.install is not None:
  878. install = Installer(self, self.install_strategy)
  879. install.install_software()
  880. ############################################################
  881. # End __init__
  882. ############################################################