benchmarker.py 38 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953
  1. from setup.linux.installer import Installer
  2. from setup.linux import setup_util
  3. from benchmark import framework_test
  4. from utils import header
  5. from utils import gather_tests
  6. import os
  7. import json
  8. import subprocess
  9. import traceback
  10. import time
  11. import pprint
  12. import csv
  13. import sys
  14. import logging
  15. import socket
  16. import glob
  17. from multiprocessing import Process
  18. from datetime import datetime
  19. class Benchmarker:
  20. ##########################################################################################
  21. # Public methods
  22. ##########################################################################################
  23. ############################################################
  24. # Prints all the available tests
  25. ############################################################
  26. def run_list_tests(self):
  27. all_tests = self.__gather_tests
  28. for test in all_tests:
  29. print test.name
  30. self.__finish()
  31. ############################################################
  32. # End run_list_tests
  33. ############################################################
  34. ############################################################
  35. # Prints the metadata for all the available tests
  36. ############################################################
  37. def run_list_test_metadata(self):
  38. all_tests = self.__gather_tests
  39. all_tests_json = json.dumps(map(lambda test: {
  40. "name": test.name,
  41. "approach": test.approach,
  42. "classification": test.classification,
  43. "database": test.database,
  44. "framework": test.framework,
  45. "language": test.language,
  46. "orm": test.orm,
  47. "platform": test.platform,
  48. "webserver": test.webserver,
  49. "os": test.os,
  50. "database_os": test.database_os,
  51. "display_name": test.display_name,
  52. "notes": test.notes,
  53. "versus": test.versus
  54. }, all_tests))
  55. with open(os.path.join(self.full_results_directory(), "test_metadata.json"), "w") as f:
  56. f.write(all_tests_json)
  57. self.__finish()
  58. ############################################################
  59. # End run_list_test_metadata
  60. ############################################################
  61. ############################################################
  62. # parse_timestamp
  63. # Re-parses the raw data for a given timestamp
  64. ############################################################
  65. def parse_timestamp(self):
  66. all_tests = self.__gather_tests
  67. for test in all_tests:
  68. test.parse_all()
  69. self.__parse_results(all_tests)
  70. self.__finish()
  71. ############################################################
  72. # End parse_timestamp
  73. ############################################################
  74. ############################################################
  75. # Run the tests:
  76. # This process involves setting up the client/server machines
  77. # with any necessary change. Then going through each test,
  78. # running their setup script, verifying the URLs, and
  79. # running benchmarks against them.
  80. ############################################################
  81. def run(self):
  82. ##########################
  83. # Get a list of all known
  84. # tests that we can run.
  85. ##########################
  86. all_tests = self.__gather_tests
  87. ##########################
  88. # Setup client/server
  89. ##########################
  90. print header("Preparing Server, Database, and Client ...", top='=', bottom='=')
  91. self.__setup_server()
  92. self.__setup_database()
  93. self.__setup_client()
  94. ## Check if wrk (and wrk-pipeline) is installed and executable, if not, raise an exception
  95. #if not (os.access("/usr/local/bin/wrk", os.X_OK) and os.access("/usr/local/bin/wrk-pipeline", os.X_OK)):
  96. # raise Exception("wrk and/or wrk-pipeline are not properly installed. Not running tests.")
  97. ##########################
  98. # Run tests
  99. ##########################
  100. print header("Running Tests...", top='=', bottom='=')
  101. result = self.__run_tests(all_tests)
  102. ##########################
  103. # Parse results
  104. ##########################
  105. if self.mode == "benchmark":
  106. print header("Parsing Results ...", top='=', bottom='=')
  107. self.__parse_results(all_tests)
  108. self.__finish()
  109. return result
  110. ############################################################
  111. # End run
  112. ############################################################
  113. ############################################################
  114. # database_sftp_string(batch_file)
  115. # generates a fully qualified URL for sftp to database
  116. ############################################################
  117. def database_sftp_string(self, batch_file):
  118. sftp_string = "sftp -oStrictHostKeyChecking=no "
  119. if batch_file != None: sftp_string += " -b " + batch_file + " "
  120. if self.database_identity_file != None:
  121. sftp_string += " -i " + self.database_identity_file + " "
  122. return sftp_string + self.database_user + "@" + self.database_host
  123. ############################################################
  124. # End database_sftp_string
  125. ############################################################
  126. ############################################################
  127. # client_sftp_string(batch_file)
  128. # generates a fully qualified URL for sftp to client
  129. ############################################################
  130. def client_sftp_string(self, batch_file):
  131. sftp_string = "sftp -oStrictHostKeyChecking=no "
  132. if batch_file != None: sftp_string += " -b " + batch_file + " "
  133. if self.client_identity_file != None:
  134. sftp_string += " -i " + self.client_identity_file + " "
  135. return sftp_string + self.client_user + "@" + self.client_host
  136. ############################################################
  137. # End client_sftp_string
  138. ############################################################
  139. ############################################################
  140. # generate_url(url, port)
  141. # generates a fully qualified URL for accessing a test url
  142. ############################################################
  143. def generate_url(self, url, port):
  144. return self.server_host + ":" + str(port) + url
  145. ############################################################
  146. # End generate_url
  147. ############################################################
  148. ############################################################
  149. # get_output_file(test_name, test_type)
  150. # returns the output file name for this test_name and
  151. # test_type timestamp/test_type/test_name/raw
  152. ############################################################
  153. def get_output_file(self, test_name, test_type):
  154. return os.path.join(self.result_directory, self.timestamp, test_type, test_name, "raw")
  155. ############################################################
  156. # End get_output_file
  157. ############################################################
  158. ############################################################
  159. # output_file(test_name, test_type)
  160. # returns the output file for this test_name and test_type
  161. # timestamp/test_type/test_name/raw
  162. ############################################################
  163. def output_file(self, test_name, test_type):
  164. path = self.get_output_file(test_name, test_type)
  165. try:
  166. os.makedirs(os.path.dirname(path))
  167. except OSError:
  168. pass
  169. return path
  170. ############################################################
  171. # End output_file
  172. ############################################################
  173. ############################################################
  174. # get_warning_file(test_name, test_type)
  175. # returns the output file name for this test_name and
  176. # test_type timestamp/test_type/test_name/raw
  177. ############################################################
  178. def get_warning_file(self, test_name, test_type):
  179. return os.path.join(self.result_directory, self.timestamp, test_type, test_name, "warn")
  180. ############################################################
  181. # End get_warning_file
  182. ############################################################
  183. ############################################################
  184. # warning_file(test_name, test_type)
  185. # returns the warning file for this test_name and test_type
  186. # timestamp/test_type/test_name/raw
  187. ############################################################
  188. def warning_file(self, test_name, test_type):
  189. path = self.get_warning_file(test_name, test_type)
  190. try:
  191. os.makedirs(os.path.dirname(path))
  192. except OSError:
  193. pass
  194. return path
  195. ############################################################
  196. # End warning_file
  197. ############################################################
  198. ############################################################
  199. # get_stats_file(test_name, test_type)
  200. # returns the stats file name for this test_name and
  201. # test_type timestamp/test_type/test_name/raw
  202. ############################################################
  203. def get_stats_file(self, test_name, test_type):
  204. return os.path.join(self.result_directory, self.timestamp, test_type, test_name, "stats")
  205. ############################################################
  206. # End get_stats_file
  207. ############################################################
  208. ############################################################
  209. # stats_file(test_name, test_type)
  210. # returns the stats file for this test_name and test_type
  211. # timestamp/test_type/test_name/raw
  212. ############################################################
  213. def stats_file(self, test_name, test_type):
  214. path = self.get_stats_file(test_name, test_type)
  215. try:
  216. os.makedirs(os.path.dirname(path))
  217. except OSError:
  218. pass
  219. return path
  220. ############################################################
  221. # End stats_file
  222. ############################################################
  223. ############################################################
  224. # full_results_directory
  225. ############################################################
  226. def full_results_directory(self):
  227. path = os.path.join(self.result_directory, self.timestamp)
  228. try:
  229. os.makedirs(path)
  230. except OSError:
  231. pass
  232. return path
  233. ############################################################
  234. # End full_results_directory
  235. ############################################################
  236. ############################################################
  237. # Latest intermediate results dirctory
  238. ############################################################
  239. def latest_results_directory(self):
  240. path = os.path.join(self.result_directory,"latest")
  241. try:
  242. os.makedirs(path)
  243. except OSError:
  244. pass
  245. return path
  246. ############################################################
  247. # report_results
  248. ############################################################
  249. def report_results(self, framework, test, results):
  250. if test not in self.results['rawData'].keys():
  251. self.results['rawData'][test] = dict()
  252. # If results has a size from the parse, then it succeeded.
  253. if results:
  254. self.results['rawData'][test][framework.name] = results
  255. # This may already be set for single-tests
  256. if framework.name not in self.results['succeeded'][test]:
  257. self.results['succeeded'][test].append(framework.name)
  258. # Add this type
  259. if (os.path.exists(self.get_warning_file(framework.name, test)) and
  260. framework.name not in self.results['warning'][test]):
  261. self.results['warning'][test].append(framework.name)
  262. else:
  263. # This may already be set for single-tests
  264. if framework.name not in self.results['failed'][test]:
  265. self.results['failed'][test].append(framework.name)
  266. ############################################################
  267. # End report_results
  268. ############################################################
  269. ##########################################################################################
  270. # Private methods
  271. ##########################################################################################
  272. ############################################################
  273. # Gathers all the tests
  274. ############################################################
  275. @property
  276. def __gather_tests(self):
  277. tests = gather_tests(include=self.test,
  278. exclude=self.exclude,
  279. benchmarker=self)
  280. # If the tests have been interrupted somehow, then we want to resume them where we left
  281. # off, rather than starting from the beginning
  282. if os.path.isfile('current_benchmark.txt'):
  283. with open('current_benchmark.txt', 'r') as interrupted_benchmark:
  284. interrupt_bench = interrupted_benchmark.read()
  285. for index, atest in enumerate(tests):
  286. if atest.name == interrupt_bench:
  287. tests = tests[index:]
  288. break
  289. return tests
  290. ############################################################
  291. # End __gather_tests
  292. ############################################################
  293. ############################################################
  294. # Gathers all the frameworks
  295. ############################################################
  296. def __gather_frameworks(self):
  297. frameworks = []
  298. # Loop through each directory (we assume we're being run from the benchmarking root)
  299. for dirname, dirnames, filenames in os.walk('.'):
  300. # Look for the benchmark_config file, this will contain our framework name
  301. # It's format looks like this:
  302. #
  303. # {
  304. # "framework": "nodejs",
  305. # "tests": [{
  306. # "default": {
  307. # "setup_file": "setup",
  308. # "json_url": "/json"
  309. # },
  310. # "mysql": {
  311. # "setup_file": "setup",
  312. # "db_url": "/mysql",
  313. # "query_url": "/mysql?queries="
  314. # },
  315. # ...
  316. # }]
  317. # }
  318. if 'benchmark_config' in filenames:
  319. config = None
  320. with open(os.path.join(dirname, 'benchmark_config'), 'r') as config_file:
  321. # Load json file into config object
  322. config = json.load(config_file)
  323. if config == None:
  324. continue
  325. frameworks.append(str(config['framework']))
  326. return frameworks
  327. ############################################################
  328. # End __gather_frameworks
  329. ############################################################
  330. ############################################################
  331. # Makes any necessary changes to the server that should be
  332. # made before running the tests. This involves setting kernal
  333. # settings to allow for more connections, or more file
  334. # descriptiors
  335. #
  336. # http://redmine.lighttpd.net/projects/weighttp/wiki#Troubleshooting
  337. ############################################################
  338. def __setup_server(self):
  339. try:
  340. if os.name == 'nt':
  341. return True
  342. subprocess.check_call(["sudo","bash","-c","cd /sys/devices/system/cpu; ls -d cpu[0-9]*|while read x; do echo performance > $x/cpufreq/scaling_governor; done"])
  343. subprocess.check_call("sudo sysctl -w net.ipv4.tcp_max_syn_backlog=65535".rsplit(" "))
  344. subprocess.check_call("sudo sysctl -w net.core.somaxconn=65535".rsplit(" "))
  345. subprocess.check_call("sudo -s ulimit -n 65535".rsplit(" "))
  346. subprocess.check_call("sudo sysctl net.ipv4.tcp_tw_reuse=1".rsplit(" "))
  347. subprocess.check_call("sudo sysctl net.ipv4.tcp_tw_recycle=1".rsplit(" "))
  348. subprocess.check_call("sudo sysctl -w kernel.shmmax=134217728".rsplit(" "))
  349. subprocess.check_call("sudo sysctl -w kernel.shmall=2097152".rsplit(" "))
  350. except subprocess.CalledProcessError:
  351. return False
  352. ############################################################
  353. # End __setup_server
  354. ############################################################
  355. ############################################################
  356. # Makes any necessary changes to the database machine that
  357. # should be made before running the tests. Is very similar
  358. # to the server setup, but may also include database specific
  359. # changes.
  360. ############################################################
  361. def __setup_database(self):
  362. p = subprocess.Popen(self.database_ssh_string, stdin=subprocess.PIPE, shell=True)
  363. p.communicate("""
  364. sudo sysctl -w net.ipv4.tcp_max_syn_backlog=65535
  365. sudo sysctl -w net.core.somaxconn=65535
  366. sudo -s ulimit -n 65535
  367. sudo sysctl net.ipv4.tcp_tw_reuse=1
  368. sudo sysctl net.ipv4.tcp_tw_recycle=1
  369. sudo sysctl -w kernel.shmmax=2147483648
  370. sudo sysctl -w kernel.shmall=2097152
  371. """)
  372. ############################################################
  373. # End __setup_database
  374. ############################################################
  375. ############################################################
  376. # Makes any necessary changes to the client machine that
  377. # should be made before running the tests. Is very similar
  378. # to the server setup, but may also include client specific
  379. # changes.
  380. ############################################################
  381. def __setup_client(self):
  382. p = subprocess.Popen(self.client_ssh_string, stdin=subprocess.PIPE, shell=True)
  383. p.communicate("""
  384. sudo sysctl -w net.ipv4.tcp_max_syn_backlog=65535
  385. sudo sysctl -w net.core.somaxconn=65535
  386. sudo -s ulimit -n 65535
  387. sudo sysctl net.ipv4.tcp_tw_reuse=1
  388. sudo sysctl net.ipv4.tcp_tw_recycle=1
  389. sudo sysctl -w kernel.shmmax=2147483648
  390. sudo sysctl -w kernel.shmall=2097152
  391. """)
  392. ############################################################
  393. # End __setup_client
  394. ############################################################
  395. ############################################################
  396. # __run_tests
  397. #
  398. # 2013-10-02 ASB Calls each test passed in tests to
  399. # __run_test in a separate process. Each
  400. # test is given a set amount of time and if
  401. # kills the child process (and subsequently
  402. # all of its child processes). Uses
  403. # multiprocessing module.
  404. ############################################################
  405. def __run_tests(self, tests):
  406. logging.debug("Start __run_tests.")
  407. logging.debug("__name__ = %s",__name__)
  408. error_happened = False
  409. if self.os.lower() == 'windows':
  410. logging.debug("Executing __run_tests on Windows")
  411. for test in tests:
  412. with open('current_benchmark.txt', 'w') as benchmark_resume_file:
  413. benchmark_resume_file.write(test.name)
  414. if self.__run_test(test) != 0:
  415. error_happened = True
  416. else:
  417. logging.debug("Executing __run_tests on Linux")
  418. # These features do not work on Windows
  419. for test in tests:
  420. if __name__ == 'benchmark.benchmarker':
  421. print header("Running Test: %s" % test.name)
  422. with open('current_benchmark.txt', 'w') as benchmark_resume_file:
  423. benchmark_resume_file.write(test.name)
  424. test_process = Process(target=self.__run_test, name="Test Runner (%s)" % test.name, args=(test,))
  425. test_process.start()
  426. test_process.join(self.run_test_timeout_seconds)
  427. self.__load_results() # Load intermediate result from child process
  428. if(test_process.is_alive()):
  429. logging.debug("Child process for {name} is still alive. Terminating.".format(name=test.name))
  430. self.__write_intermediate_results(test.name,"__run_test timeout (="+ str(self.run_test_timeout_seconds) + " seconds)")
  431. test_process.terminate()
  432. test_process.join()
  433. if test_process.exitcode != 0:
  434. error_happened = True
  435. if os.path.isfile('current_benchmark.txt'):
  436. os.remove('current_benchmark.txt')
  437. logging.debug("End __run_tests.")
  438. if error_happened:
  439. return 1
  440. return 0
  441. ############################################################
  442. # End __run_tests
  443. ############################################################
  444. ############################################################
  445. # __run_test
  446. # 2013-10-02 ASB Previously __run_tests. This code now only
  447. # processes a single test.
  448. #
  449. # Ensures that the system has all necessary software to run
  450. # the tests. This does not include that software for the individual
  451. # test, but covers software such as curl and weighttp that
  452. # are needed.
  453. ############################################################
  454. def __run_test(self, test):
  455. # Used to capture return values
  456. def exit_with_code(code):
  457. if self.os.lower() == 'windows':
  458. return code
  459. else:
  460. sys.exit(code)
  461. try:
  462. os.makedirs(os.path.join(self.latest_results_directory, 'logs', "{name}".format(name=test.name)))
  463. except:
  464. pass
  465. with open(os.path.join(self.latest_results_directory, 'logs', "{name}".format(name=test.name), 'out.txt'), 'w') as out, \
  466. open(os.path.join(self.latest_results_directory, 'logs', "{name}".format(name=test.name), 'err.txt'), 'w') as err:
  467. if hasattr(test, 'skip'):
  468. if test.skip.lower() == "true":
  469. out.write("Test {name} benchmark_config specifies to skip this test. Skipping.\n".format(name=test.name))
  470. return exit_with_code(0)
  471. if test.os.lower() != self.os.lower() or test.database_os.lower() != self.database_os.lower():
  472. # the operating system requirements of this test for the
  473. # application server or the database server don't match
  474. # our current environment
  475. out.write("OS or Database OS specified in benchmark_config does not match the current environment. Skipping.\n")
  476. return exit_with_code(0)
  477. # If the test is in the excludes list, we skip it
  478. if self.exclude != None and test.name in self.exclude:
  479. out.write("Test {name} has been added to the excludes list. Skipping.\n".format(name=test.name))
  480. return exit_with_code(0)
  481. # If the test does not contain an implementation of the current test-type, skip it
  482. if self.type != 'all' and not test.contains_type(self.type):
  483. out.write("Test type {type} does not contain an implementation of the current test-type. Skipping.\n".format(type=self.type))
  484. return exit_with_code(0)
  485. out.write("test.os.lower() = {os} test.database_os.lower() = {dbos}\n".format(os=test.os.lower(),dbos=test.database_os.lower()))
  486. out.write("self.results['frameworks'] != None: {val}\n".format(val=str(self.results['frameworks'] != None)))
  487. out.write("test.name: {name}\n".format(name=str(test.name)))
  488. out.write("self.results['completed']: {completed}\n".format(completed=str(self.results['completed'])))
  489. if self.results['frameworks'] != None and test.name in self.results['completed']:
  490. out.write('Framework {name} found in latest saved data. Skipping.\n'.format(name=str(test.name)))
  491. return exit_with_code(1)
  492. out.flush()
  493. out.write(header("Beginning %s" % test.name, top='='))
  494. out.flush()
  495. ##########################
  496. # Start this test
  497. ##########################
  498. out.write(header("Starting %s" % test.name))
  499. out.flush()
  500. try:
  501. if test.requires_database():
  502. p = subprocess.Popen(self.database_ssh_string, stdin=subprocess.PIPE, stdout=out, stderr=err, shell=True)
  503. p.communicate("""
  504. sudo restart mysql
  505. sudo restart mongodb
  506. sudo service redis-server restart
  507. sudo /etc/init.d/postgresql restart
  508. """)
  509. time.sleep(10)
  510. if self.__is_port_bound(test.port):
  511. self.__write_intermediate_results(test.name, "port " + str(test.port) + " is not available before start")
  512. err.write(header("Error: Port %s is not available, cannot start %s" % (test.port, test.name)))
  513. err.flush()
  514. return exit_with_code(1)
  515. result = test.start(out, err)
  516. if result != 0:
  517. test.stop(out, err)
  518. time.sleep(5)
  519. err.write( "ERROR: Problem starting {name}\n".format(name=test.name) )
  520. err.write(header("Stopped %s" % test.name))
  521. err.flush()
  522. self.__write_intermediate_results(test.name,"<setup.py>#start() returned non-zero")
  523. return exit_with_code(1)
  524. time.sleep(self.sleep)
  525. ##########################
  526. # Verify URLs
  527. ##########################
  528. passed_verify = test.verify_urls(out, err)
  529. out.flush()
  530. err.flush()
  531. ##########################
  532. # Benchmark this test
  533. ##########################
  534. if self.mode == "benchmark":
  535. out.write(header("Benchmarking %s" % test.name))
  536. out.flush()
  537. test.benchmark(out, err)
  538. out.flush()
  539. err.flush()
  540. ##########################
  541. # Stop this test
  542. ##########################
  543. out.write(header("Stopping %s" % test.name))
  544. out.flush()
  545. test.stop(out, err)
  546. out.flush()
  547. err.flush()
  548. time.sleep(5)
  549. if self.__is_port_bound(test.port):
  550. self.__write_intermediate_results(test.name, "port " + str(test.port) + " was not released by stop")
  551. err.write(header("Error: Port %s was not released by stop %s" % (test.port, test.name)))
  552. err.flush()
  553. return exit_with_code(1)
  554. out.write(header("Stopped %s" % test.name))
  555. out.flush()
  556. time.sleep(5)
  557. ##########################################################
  558. # Save results thus far into toolset/benchmark/latest.json
  559. ##########################################################
  560. out.write(header("Saving results through %s" % test.name))
  561. out.flush()
  562. self.__write_intermediate_results(test.name,time.strftime("%Y%m%d%H%M%S", time.localtime()))
  563. if self.mode == "verify" and not passed_verify:
  564. print "Failed verify!"
  565. return exit_with_code(1)
  566. except (OSError, IOError, subprocess.CalledProcessError) as e:
  567. self.__write_intermediate_results(test.name,"<setup.py> raised an exception")
  568. err.write(header("Subprocess Error %s" % test.name))
  569. traceback.print_exc(file=err)
  570. err.flush()
  571. try:
  572. test.stop(out, err)
  573. except (subprocess.CalledProcessError) as e:
  574. self.__write_intermediate_results(test.name,"<setup.py>#stop() raised an error")
  575. err.write(header("Subprocess Error: Test .stop() raised exception %s" % test.name))
  576. traceback.print_exc(file=err)
  577. err.flush()
  578. out.close()
  579. err.close()
  580. return exit_with_code(1)
  581. # TODO - subprocess should not catch this exception!
  582. # Parent process should catch it and cleanup/exit
  583. except (KeyboardInterrupt) as e:
  584. test.stop(out, err)
  585. out.write(header("Cleaning up..."))
  586. out.flush()
  587. self.__finish()
  588. sys.exit(1)
  589. out.close()
  590. err.close()
  591. return exit_with_code(0)
  592. ############################################################
  593. # End __run_tests
  594. ############################################################
  595. ############################################################
  596. # __is_port_bound
  597. # Check if the requested port is available. If it
  598. # isn't available, then a previous test probably didn't
  599. # shutdown properly.
  600. ############################################################
  601. def __is_port_bound(self, port):
  602. s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
  603. try:
  604. # Try to bind to all IP addresses, this port
  605. s.bind(("", port))
  606. # If we get here, we were able to bind successfully,
  607. # which means the port is free.
  608. except:
  609. # If we get an exception, it might be because the port is still bound
  610. # which would be bad, or maybe it is a privileged port (<1024) and we
  611. # are not running as root, or maybe the server is gone, but sockets are
  612. # still in TIME_WAIT (SO_REUSEADDR). To determine which scenario, try to
  613. # connect.
  614. try:
  615. s.connect(("127.0.0.1", port))
  616. # If we get here, we were able to connect to something, which means
  617. # that the port is still bound.
  618. return True
  619. except:
  620. # An exception means that we couldn't connect, so a server probably
  621. # isn't still running on the port.
  622. pass
  623. finally:
  624. s.close()
  625. return False
  626. ############################################################
  627. # End __is_port_bound
  628. ############################################################
  629. ############################################################
  630. # __parse_results
  631. # Ensures that the system has all necessary software to run
  632. # the tests. This does not include that software for the individual
  633. # test, but covers software such as curl and weighttp that
  634. # are needed.
  635. ############################################################
  636. def __parse_results(self, tests):
  637. # Run the method to get the commmit count of each framework.
  638. self.__count_commits()
  639. # Call the method which counts the sloc for each framework
  640. self.__count_sloc()
  641. # Time to create parsed files
  642. # Aggregate JSON file
  643. with open(os.path.join(self.full_results_directory(), "results.json"), "w") as f:
  644. f.write(json.dumps(self.results))
  645. ############################################################
  646. # End __parse_results
  647. ############################################################
  648. #############################################################
  649. # __count_sloc
  650. # This is assumed to be run from the benchmark root directory
  651. #############################################################
  652. def __count_sloc(self):
  653. all_frameworks = self.__gather_frameworks()
  654. jsonResult = {}
  655. for framework in all_frameworks:
  656. try:
  657. command = "cloc --list-file=" + framework['directory'] + "/source_code --yaml"
  658. lineCount = subprocess.check_output(command, shell=True)
  659. # Find the last instance of the word 'code' in the yaml output. This should
  660. # be the line count for the sum of all listed files or just the line count
  661. # for the last file in the case where there's only one file listed.
  662. lineCount = lineCount[lineCount.rfind('code'):len(lineCount)]
  663. lineCount = lineCount.strip('code: ')
  664. lineCount = lineCount[0:lineCount.rfind('comment')]
  665. jsonResult[framework['name']] = int(lineCount)
  666. except:
  667. continue
  668. self.results['rawData']['slocCounts'] = jsonResult
  669. ############################################################
  670. # End __count_sloc
  671. ############################################################
  672. ############################################################
  673. # __count_commits
  674. ############################################################
  675. def __count_commits(self):
  676. all_frameworks = self.__gather_frameworks()
  677. jsonResult = {}
  678. for framework in all_frameworks:
  679. try:
  680. command = "git rev-list HEAD -- " + framework + " | sort -u | wc -l"
  681. commitCount = subprocess.check_output(command, shell=True)
  682. jsonResult[framework] = int(commitCount)
  683. except:
  684. continue
  685. self.results['rawData']['commitCounts'] = jsonResult
  686. self.commits = jsonResult
  687. ############################################################
  688. # End __count_commits
  689. ############################################################
  690. ############################################################
  691. # __write_intermediate_results
  692. ############################################################
  693. def __write_intermediate_results(self,test_name,status_message):
  694. try:
  695. self.results["completed"][test_name] = status_message
  696. with open(os.path.join(self.latest_results_directory, 'results.json'), 'w') as f:
  697. f.write(json.dumps(self.results))
  698. except (IOError):
  699. logging.error("Error writing results.json")
  700. ############################################################
  701. # End __write_intermediate_results
  702. ############################################################
  703. def __load_results(self):
  704. try:
  705. with open(os.path.join(self.latest_results_directory, 'results.json')) as f:
  706. self.results = json.load(f)
  707. except (ValueError, IOError):
  708. pass
  709. ############################################################
  710. # __finish
  711. ############################################################
  712. def __finish(self):
  713. print "Time to complete: " + str(int(time.time() - self.start_time)) + " seconds"
  714. print "Results are saved in " + os.path.join(self.result_directory, self.timestamp)
  715. ############################################################
  716. # End __finish
  717. ############################################################
  718. ##########################################################################################
  719. # Constructor
  720. ##########################################################################################
  721. ############################################################
  722. # Initialize the benchmarker. The args are the arguments
  723. # parsed via argparser.
  724. ############################################################
  725. def __init__(self, args):
  726. self.__dict__.update(args)
  727. self.start_time = time.time()
  728. self.run_test_timeout_seconds = 3600
  729. # setup logging
  730. logging.basicConfig(stream=sys.stderr, level=logging.DEBUG)
  731. # setup some additional variables
  732. if self.database_user == None: self.database_user = self.client_user
  733. if self.database_host == None: self.database_host = self.client_host
  734. if self.database_identity_file == None: self.database_identity_file = self.client_identity_file
  735. # Remember root directory
  736. self.fwroot = setup_util.get_fwroot()
  737. # setup results and latest_results directories
  738. self.result_directory = os.path.join("results", self.name)
  739. self.latest_results_directory = self.latest_results_directory()
  740. if self.parse != None:
  741. self.timestamp = self.parse
  742. else:
  743. self.timestamp = time.strftime("%Y%m%d%H%M%S", time.localtime())
  744. # Setup the concurrency levels array. This array goes from
  745. # starting_concurrency to max concurrency, doubling each time
  746. self.concurrency_levels = []
  747. concurrency = self.starting_concurrency
  748. while concurrency <= self.max_concurrency:
  749. self.concurrency_levels.append(concurrency)
  750. concurrency = concurrency * 2
  751. # Setup query interval array
  752. # starts at 1, and goes up to max_queries, using the query_interval
  753. self.query_intervals = []
  754. queries = 1
  755. while queries <= self.max_queries:
  756. self.query_intervals.append(queries)
  757. if queries == 1:
  758. queries = 0
  759. queries = queries + self.query_interval
  760. # Load the latest data
  761. #self.latest = None
  762. #try:
  763. # with open('toolset/benchmark/latest.json', 'r') as f:
  764. # # Load json file into config object
  765. # self.latest = json.load(f)
  766. # logging.info("toolset/benchmark/latest.json loaded to self.latest")
  767. # logging.debug("contents of latest.json: " + str(json.dumps(self.latest)))
  768. #except IOError:
  769. # logging.warn("IOError on attempting to read toolset/benchmark/latest.json")
  770. #
  771. #self.results = None
  772. #try:
  773. # if self.latest != None and self.name in self.latest.keys():
  774. # with open(os.path.join(self.result_directory, str(self.latest[self.name]), 'results.json'), 'r') as f:
  775. # # Load json file into config object
  776. # self.results = json.load(f)
  777. #except IOError:
  778. # pass
  779. self.results = None
  780. try:
  781. with open(os.path.join(self.latest_results_directory, 'results.json'), 'r') as f:
  782. #Load json file into results object
  783. self.results = json.load(f)
  784. except IOError:
  785. logging.warn("results.json for test %s not found.",self.name)
  786. if self.results == None:
  787. self.results = dict()
  788. self.results['name'] = self.name
  789. self.results['concurrencyLevels'] = self.concurrency_levels
  790. self.results['queryIntervals'] = self.query_intervals
  791. self.results['frameworks'] = [t.name for t in self.__gather_tests]
  792. self.results['duration'] = self.duration
  793. self.results['rawData'] = dict()
  794. self.results['rawData']['json'] = dict()
  795. self.results['rawData']['db'] = dict()
  796. self.results['rawData']['query'] = dict()
  797. self.results['rawData']['fortune'] = dict()
  798. self.results['rawData']['update'] = dict()
  799. self.results['rawData']['plaintext'] = dict()
  800. self.results['completed'] = dict()
  801. self.results['succeeded'] = dict()
  802. self.results['succeeded']['json'] = []
  803. self.results['succeeded']['db'] = []
  804. self.results['succeeded']['query'] = []
  805. self.results['succeeded']['fortune'] = []
  806. self.results['succeeded']['update'] = []
  807. self.results['succeeded']['plaintext'] = []
  808. self.results['failed'] = dict()
  809. self.results['failed']['json'] = []
  810. self.results['failed']['db'] = []
  811. self.results['failed']['query'] = []
  812. self.results['failed']['fortune'] = []
  813. self.results['failed']['update'] = []
  814. self.results['failed']['plaintext'] = []
  815. self.results['warning'] = dict()
  816. self.results['warning']['json'] = []
  817. self.results['warning']['db'] = []
  818. self.results['warning']['query'] = []
  819. self.results['warning']['fortune'] = []
  820. self.results['warning']['update'] = []
  821. self.results['warning']['plaintext'] = []
  822. else:
  823. #for x in self.__gather_tests():
  824. # if x.name not in self.results['frameworks']:
  825. # self.results['frameworks'] = self.results['frameworks'] + [x.name]
  826. # Always overwrite framework list
  827. self.results['frameworks'] = [t.name for t in self.__gather_tests]
  828. # Setup the ssh command string
  829. self.database_ssh_string = "ssh -T -o StrictHostKeyChecking=no " + self.database_user + "@" + self.database_host
  830. self.client_ssh_string = "ssh -T -o StrictHostKeyChecking=no " + self.client_user + "@" + self.client_host
  831. if self.database_identity_file != None:
  832. self.database_ssh_string = self.database_ssh_string + " -i " + self.database_identity_file
  833. if self.client_identity_file != None:
  834. self.client_ssh_string = self.client_ssh_string + " -i " + self.client_identity_file
  835. if self.install is not None:
  836. install = Installer(self, self.install_strategy)
  837. install.install_software()
  838. ############################################################
  839. # End __init__
  840. ############################################################