benchmarker.py 38 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952
  1. from setup.linux.installer import Installer
  2. from setup.linux import setup_util
  3. from benchmark import framework_test
  4. from utils import header
  5. from utils import gather_tests
  6. import os
  7. import json
  8. import subprocess
  9. import traceback
  10. import time
  11. import pprint
  12. import csv
  13. import sys
  14. import logging
  15. import socket
  16. from multiprocessing import Process
  17. from datetime import datetime
  18. class Benchmarker:
  19. ##########################################################################################
  20. # Public methods
  21. ##########################################################################################
  22. ############################################################
  23. # Prints all the available tests
  24. ############################################################
  25. def run_list_tests(self):
  26. all_tests = self.__gather_tests
  27. for test in all_tests:
  28. print test.name
  29. self.__finish()
  30. ############################################################
  31. # End run_list_tests
  32. ############################################################
  33. ############################################################
  34. # Prints the metadata for all the available tests
  35. ############################################################
  36. def run_list_test_metadata(self):
  37. all_tests = self.__gather_tests
  38. all_tests_json = json.dumps(map(lambda test: {
  39. "name": test.name,
  40. "approach": test.approach,
  41. "classification": test.classification,
  42. "database": test.database,
  43. "framework": test.framework,
  44. "language": test.language,
  45. "orm": test.orm,
  46. "platform": test.platform,
  47. "webserver": test.webserver,
  48. "os": test.os,
  49. "database_os": test.database_os,
  50. "display_name": test.display_name,
  51. "notes": test.notes,
  52. "versus": test.versus
  53. }, all_tests))
  54. with open(os.path.join(self.full_results_directory(), "test_metadata.json"), "w") as f:
  55. f.write(all_tests_json)
  56. self.__finish()
  57. ############################################################
  58. # End run_list_test_metadata
  59. ############################################################
  60. ############################################################
  61. # parse_timestamp
  62. # Re-parses the raw data for a given timestamp
  63. ############################################################
  64. def parse_timestamp(self):
  65. all_tests = self.__gather_tests
  66. for test in all_tests:
  67. test.parse_all()
  68. self.__parse_results(all_tests)
  69. self.__finish()
  70. ############################################################
  71. # End parse_timestamp
  72. ############################################################
  73. ############################################################
  74. # Run the tests:
  75. # This process involves setting up the client/server machines
  76. # with any necessary change. Then going through each test,
  77. # running their setup script, verifying the URLs, and
  78. # running benchmarks against them.
  79. ############################################################
  80. def run(self):
  81. ##########################
  82. # Get a list of all known
  83. # tests that we can run.
  84. ##########################
  85. all_tests = self.__gather_tests
  86. ##########################
  87. # Setup client/server
  88. ##########################
  89. print header("Preparing Server, Database, and Client ...", top='=', bottom='=')
  90. self.__setup_server()
  91. self.__setup_database()
  92. self.__setup_client()
  93. ## Check if wrk (and wrk-pipeline) is installed and executable, if not, raise an exception
  94. #if not (os.access("/usr/local/bin/wrk", os.X_OK) and os.access("/usr/local/bin/wrk-pipeline", os.X_OK)):
  95. # raise Exception("wrk and/or wrk-pipeline are not properly installed. Not running tests.")
  96. ##########################
  97. # Run tests
  98. ##########################
  99. print header("Running Tests...", top='=', bottom='=')
  100. result = self.__run_tests(all_tests)
  101. ##########################
  102. # Parse results
  103. ##########################
  104. if self.mode == "benchmark":
  105. print header("Parsing Results ...", top='=', bottom='=')
  106. self.__parse_results(all_tests)
  107. self.__finish()
  108. return result
  109. ############################################################
  110. # End run
  111. ############################################################
  112. ############################################################
  113. # database_sftp_string(batch_file)
  114. # generates a fully qualified URL for sftp to database
  115. ############################################################
  116. def database_sftp_string(self, batch_file):
  117. sftp_string = "sftp -oStrictHostKeyChecking=no "
  118. if batch_file != None: sftp_string += " -b " + batch_file + " "
  119. if self.database_identity_file != None:
  120. sftp_string += " -i " + self.database_identity_file + " "
  121. return sftp_string + self.database_user + "@" + self.database_host
  122. ############################################################
  123. # End database_sftp_string
  124. ############################################################
  125. ############################################################
  126. # client_sftp_string(batch_file)
  127. # generates a fully qualified URL for sftp to client
  128. ############################################################
  129. def client_sftp_string(self, batch_file):
  130. sftp_string = "sftp -oStrictHostKeyChecking=no "
  131. if batch_file != None: sftp_string += " -b " + batch_file + " "
  132. if self.client_identity_file != None:
  133. sftp_string += " -i " + self.client_identity_file + " "
  134. return sftp_string + self.client_user + "@" + self.client_host
  135. ############################################################
  136. # End client_sftp_string
  137. ############################################################
  138. ############################################################
  139. # generate_url(url, port)
  140. # generates a fully qualified URL for accessing a test url
  141. ############################################################
  142. def generate_url(self, url, port):
  143. return self.server_host + ":" + str(port) + url
  144. ############################################################
  145. # End generate_url
  146. ############################################################
  147. ############################################################
  148. # get_output_file(test_name, test_type)
  149. # returns the output file name for this test_name and
  150. # test_type timestamp/test_type/test_name/raw
  151. ############################################################
  152. def get_output_file(self, test_name, test_type):
  153. return os.path.join(self.result_directory, self.timestamp, test_type, test_name, "raw")
  154. ############################################################
  155. # End get_output_file
  156. ############################################################
  157. ############################################################
  158. # output_file(test_name, test_type)
  159. # returns the output file for this test_name and test_type
  160. # timestamp/test_type/test_name/raw
  161. ############################################################
  162. def output_file(self, test_name, test_type):
  163. path = self.get_output_file(test_name, test_type)
  164. try:
  165. os.makedirs(os.path.dirname(path))
  166. except OSError:
  167. pass
  168. return path
  169. ############################################################
  170. # End output_file
  171. ############################################################
  172. ############################################################
  173. # get_warning_file(test_name, test_type)
  174. # returns the output file name for this test_name and
  175. # test_type timestamp/test_type/test_name/raw
  176. ############################################################
  177. def get_warning_file(self, test_name, test_type):
  178. return os.path.join(self.result_directory, self.timestamp, test_type, test_name, "warn")
  179. ############################################################
  180. # End get_warning_file
  181. ############################################################
  182. ############################################################
  183. # warning_file(test_name, test_type)
  184. # returns the warning file for this test_name and test_type
  185. # timestamp/test_type/test_name/raw
  186. ############################################################
  187. def warning_file(self, test_name, test_type):
  188. path = self.get_warning_file(test_name, test_type)
  189. try:
  190. os.makedirs(os.path.dirname(path))
  191. except OSError:
  192. pass
  193. return path
  194. ############################################################
  195. # End warning_file
  196. ############################################################
  197. ############################################################
  198. # get_stats_file(test_name, test_type)
  199. # returns the stats file name for this test_name and
  200. # test_type timestamp/test_type/test_name/raw
  201. ############################################################
  202. def get_stats_file(self, test_name, test_type):
  203. return os.path.join(self.result_directory, self.timestamp, test_type, test_name, "stats")
  204. ############################################################
  205. # End get_stats_file
  206. ############################################################
  207. ############################################################
  208. # stats_file(test_name, test_type)
  209. # returns the stats file for this test_name and test_type
  210. # timestamp/test_type/test_name/raw
  211. ############################################################
  212. def stats_file(self, test_name, test_type):
  213. path = self.get_stats_file(test_name, test_type)
  214. try:
  215. os.makedirs(os.path.dirname(path))
  216. except OSError:
  217. pass
  218. return path
  219. ############################################################
  220. # End stats_file
  221. ############################################################
  222. ############################################################
  223. # full_results_directory
  224. ############################################################
  225. def full_results_directory(self):
  226. path = os.path.join(self.result_directory, self.timestamp)
  227. try:
  228. os.makedirs(path)
  229. except OSError:
  230. pass
  231. return path
  232. ############################################################
  233. # End full_results_directory
  234. ############################################################
  235. ############################################################
  236. # Latest intermediate results dirctory
  237. ############################################################
  238. def latest_results_directory(self):
  239. path = os.path.join(self.result_directory,"latest")
  240. try:
  241. os.makedirs(path)
  242. except OSError:
  243. pass
  244. return path
  245. ############################################################
  246. # report_results
  247. ############################################################
  248. def report_results(self, framework, test, results):
  249. if test not in self.results['rawData'].keys():
  250. self.results['rawData'][test] = dict()
  251. # If results has a size from the parse, then it succeeded.
  252. if results:
  253. self.results['rawData'][test][framework.name] = results
  254. # This may already be set for single-tests
  255. if framework.name not in self.results['succeeded'][test]:
  256. self.results['succeeded'][test].append(framework.name)
  257. # Add this type
  258. if (os.path.exists(self.get_warning_file(framework.name, test)) and
  259. framework.name not in self.results['warning'][test]):
  260. self.results['warning'][test].append(framework.name)
  261. else:
  262. # This may already be set for single-tests
  263. if framework.name not in self.results['failed'][test]:
  264. self.results['failed'][test].append(framework.name)
  265. ############################################################
  266. # End report_results
  267. ############################################################
  268. ##########################################################################################
  269. # Private methods
  270. ##########################################################################################
  271. ############################################################
  272. # Gathers all the tests
  273. ############################################################
  274. @property
  275. def __gather_tests(self):
  276. tests = gather_tests(include=self.test,
  277. exclude=self.exclude,
  278. benchmarker=self)
  279. # If the tests have been interrupted somehow, then we want to resume them where we left
  280. # off, rather than starting from the beginning
  281. if os.path.isfile('current_benchmark.txt'):
  282. with open('current_benchmark.txt', 'r') as interrupted_benchmark:
  283. interrupt_bench = interrupted_benchmark.read()
  284. for index, atest in enumerate(tests):
  285. if atest.name == interrupt_bench:
  286. tests = tests[index:]
  287. break
  288. return tests
  289. ############################################################
  290. # End __gather_tests
  291. ############################################################
  292. ############################################################
  293. # Gathers all the frameworks
  294. ############################################################
  295. def __gather_frameworks(self):
  296. frameworks = []
  297. # Loop through each directory (we assume we're being run from the benchmarking root)
  298. for dirname, dirnames, filenames in os.walk('.'):
  299. # Look for the benchmark_config file, this will contain our framework name
  300. # It's format looks like this:
  301. #
  302. # {
  303. # "framework": "nodejs",
  304. # "tests": [{
  305. # "default": {
  306. # "setup_file": "setup",
  307. # "json_url": "/json"
  308. # },
  309. # "mysql": {
  310. # "setup_file": "setup",
  311. # "db_url": "/mysql",
  312. # "query_url": "/mysql?queries="
  313. # },
  314. # ...
  315. # }]
  316. # }
  317. if 'benchmark_config' in filenames:
  318. config = None
  319. with open(os.path.join(dirname, 'benchmark_config'), 'r') as config_file:
  320. # Load json file into config object
  321. config = json.load(config_file)
  322. if config == None:
  323. continue
  324. frameworks.append(str(config['framework']))
  325. return frameworks
  326. ############################################################
  327. # End __gather_frameworks
  328. ############################################################
  329. ############################################################
  330. # Makes any necessary changes to the server that should be
  331. # made before running the tests. This involves setting kernal
  332. # settings to allow for more connections, or more file
  333. # descriptiors
  334. #
  335. # http://redmine.lighttpd.net/projects/weighttp/wiki#Troubleshooting
  336. ############################################################
  337. def __setup_server(self):
  338. try:
  339. if os.name == 'nt':
  340. return True
  341. subprocess.check_call(["sudo","bash","-c","cd /sys/devices/system/cpu; ls -d cpu[0-9]*|while read x; do echo performance > $x/cpufreq/scaling_governor; done"])
  342. subprocess.check_call("sudo sysctl -w net.ipv4.tcp_max_syn_backlog=65535".rsplit(" "))
  343. subprocess.check_call("sudo sysctl -w net.core.somaxconn=65535".rsplit(" "))
  344. subprocess.check_call("sudo -s ulimit -n 65535".rsplit(" "))
  345. subprocess.check_call("sudo sysctl net.ipv4.tcp_tw_reuse=1".rsplit(" "))
  346. subprocess.check_call("sudo sysctl net.ipv4.tcp_tw_recycle=1".rsplit(" "))
  347. subprocess.check_call("sudo sysctl -w kernel.shmmax=134217728".rsplit(" "))
  348. subprocess.check_call("sudo sysctl -w kernel.shmall=2097152".rsplit(" "))
  349. except subprocess.CalledProcessError:
  350. return False
  351. ############################################################
  352. # End __setup_server
  353. ############################################################
  354. ############################################################
  355. # Makes any necessary changes to the database machine that
  356. # should be made before running the tests. Is very similar
  357. # to the server setup, but may also include database specific
  358. # changes.
  359. ############################################################
  360. def __setup_database(self):
  361. p = subprocess.Popen(self.database_ssh_string, stdin=subprocess.PIPE, shell=True)
  362. p.communicate("""
  363. sudo sysctl -w net.ipv4.tcp_max_syn_backlog=65535
  364. sudo sysctl -w net.core.somaxconn=65535
  365. sudo -s ulimit -n 65535
  366. sudo sysctl net.ipv4.tcp_tw_reuse=1
  367. sudo sysctl net.ipv4.tcp_tw_recycle=1
  368. sudo sysctl -w kernel.shmmax=2147483648
  369. sudo sysctl -w kernel.shmall=2097152
  370. """)
  371. ############################################################
  372. # End __setup_database
  373. ############################################################
  374. ############################################################
  375. # Makes any necessary changes to the client machine that
  376. # should be made before running the tests. Is very similar
  377. # to the server setup, but may also include client specific
  378. # changes.
  379. ############################################################
  380. def __setup_client(self):
  381. p = subprocess.Popen(self.client_ssh_string, stdin=subprocess.PIPE, shell=True)
  382. p.communicate("""
  383. sudo sysctl -w net.ipv4.tcp_max_syn_backlog=65535
  384. sudo sysctl -w net.core.somaxconn=65535
  385. sudo -s ulimit -n 65535
  386. sudo sysctl net.ipv4.tcp_tw_reuse=1
  387. sudo sysctl net.ipv4.tcp_tw_recycle=1
  388. sudo sysctl -w kernel.shmmax=2147483648
  389. sudo sysctl -w kernel.shmall=2097152
  390. """)
  391. ############################################################
  392. # End __setup_client
  393. ############################################################
  394. ############################################################
  395. # __run_tests
  396. #
  397. # 2013-10-02 ASB Calls each test passed in tests to
  398. # __run_test in a separate process. Each
  399. # test is given a set amount of time and if
  400. # kills the child process (and subsequently
  401. # all of its child processes). Uses
  402. # multiprocessing module.
  403. ############################################################
  404. def __run_tests(self, tests):
  405. logging.debug("Start __run_tests.")
  406. logging.debug("__name__ = %s",__name__)
  407. error_happened = False
  408. if self.os.lower() == 'windows':
  409. logging.debug("Executing __run_tests on Windows")
  410. for test in tests:
  411. with open('current_benchmark.txt', 'w') as benchmark_resume_file:
  412. benchmark_resume_file.write(test.name)
  413. if self.__run_test(test) != 0:
  414. error_happened = True
  415. else:
  416. logging.debug("Executing __run_tests on Linux")
  417. # These features do not work on Windows
  418. for test in tests:
  419. if __name__ == 'benchmark.benchmarker':
  420. print header("Running Test: %s" % test.name)
  421. with open('current_benchmark.txt', 'w') as benchmark_resume_file:
  422. benchmark_resume_file.write(test.name)
  423. test_process = Process(target=self.__run_test, name="Test Runner (%s)" % test.name, args=(test,))
  424. test_process.start()
  425. test_process.join(self.run_test_timeout_seconds)
  426. self.__load_results() # Load intermediate result from child process
  427. if(test_process.is_alive()):
  428. logging.debug("Child process for {name} is still alive. Terminating.".format(name=test.name))
  429. self.__write_intermediate_results(test.name,"__run_test timeout (="+ str(self.run_test_timeout_seconds) + " seconds)")
  430. test_process.terminate()
  431. test_process.join()
  432. if test_process.exitcode != 0:
  433. error_happened = True
  434. if os.path.isfile('current_benchmark.txt'):
  435. os.remove('current_benchmark.txt')
  436. logging.debug("End __run_tests.")
  437. if error_happened:
  438. return 1
  439. return 0
  440. ############################################################
  441. # End __run_tests
  442. ############################################################
  443. ############################################################
  444. # __run_test
  445. # 2013-10-02 ASB Previously __run_tests. This code now only
  446. # processes a single test.
  447. #
  448. # Ensures that the system has all necessary software to run
  449. # the tests. This does not include that software for the individual
  450. # test, but covers software such as curl and weighttp that
  451. # are needed.
  452. ############################################################
  453. def __run_test(self, test):
  454. # Used to capture return values
  455. def exit_with_code(code):
  456. if self.os.lower() == 'windows':
  457. return code
  458. else:
  459. sys.exit(code)
  460. try:
  461. os.makedirs(os.path.join(self.latest_results_directory, 'logs', "{name}".format(name=test.name)))
  462. except:
  463. pass
  464. with open(os.path.join(self.latest_results_directory, 'logs', "{name}".format(name=test.name), 'out.txt'), 'w') as out, \
  465. open(os.path.join(self.latest_results_directory, 'logs', "{name}".format(name=test.name), 'err.txt'), 'w') as err:
  466. if hasattr(test, 'skip'):
  467. if test.skip.lower() == "true":
  468. out.write("Test {name} benchmark_config specifies to skip this test. Skipping.\n".format(name=test.name))
  469. return exit_with_code(0)
  470. if test.os.lower() != self.os.lower() or test.database_os.lower() != self.database_os.lower():
  471. # the operating system requirements of this test for the
  472. # application server or the database server don't match
  473. # our current environment
  474. out.write("OS or Database OS specified in benchmark_config does not match the current environment. Skipping.\n")
  475. return exit_with_code(0)
  476. # If the test is in the excludes list, we skip it
  477. if self.exclude != None and test.name in self.exclude:
  478. out.write("Test {name} has been added to the excludes list. Skipping.\n".format(name=test.name))
  479. return exit_with_code(0)
  480. # If the test does not contain an implementation of the current test-type, skip it
  481. if self.type != 'all' and not test.contains_type(self.type):
  482. out.write("Test type {type} does not contain an implementation of the current test-type. Skipping.\n".format(type=self.type))
  483. return exit_with_code(0)
  484. out.write("test.os.lower() = {os} test.database_os.lower() = {dbos}\n".format(os=test.os.lower(),dbos=test.database_os.lower()))
  485. out.write("self.results['frameworks'] != None: {val}\n".format(val=str(self.results['frameworks'] != None)))
  486. out.write("test.name: {name}\n".format(name=str(test.name)))
  487. out.write("self.results['completed']: {completed}\n".format(completed=str(self.results['completed'])))
  488. if self.results['frameworks'] != None and test.name in self.results['completed']:
  489. out.write('Framework {name} found in latest saved data. Skipping.\n'.format(name=str(test.name)))
  490. return exit_with_code(1)
  491. out.flush()
  492. out.write(header("Beginning %s" % test.name, top='='))
  493. out.flush()
  494. ##########################
  495. # Start this test
  496. ##########################
  497. out.write(header("Starting %s" % test.name))
  498. out.flush()
  499. try:
  500. if test.requires_database():
  501. p = subprocess.Popen(self.database_ssh_string, stdin=subprocess.PIPE, stdout=out, stderr=err, shell=True)
  502. p.communicate("""
  503. sudo restart mysql
  504. sudo restart mongodb
  505. sudo service redis-server restart
  506. sudo /etc/init.d/postgresql restart
  507. """)
  508. time.sleep(10)
  509. if self.__is_port_bound(test.port):
  510. self.__write_intermediate_results(test.name, "port " + str(test.port) + " is not available before start")
  511. err.write(header("Error: Port %s is not available, cannot start %s" % (test.port, test.name)))
  512. err.flush()
  513. return exit_with_code(1)
  514. result = test.start(out, err)
  515. if result != 0:
  516. test.stop(out, err)
  517. time.sleep(5)
  518. err.write( "ERROR: Problem starting {name}\n".format(name=test.name) )
  519. err.write(header("Stopped %s" % test.name))
  520. err.flush()
  521. self.__write_intermediate_results(test.name,"<setup.py>#start() returned non-zero")
  522. return exit_with_code(1)
  523. time.sleep(self.sleep)
  524. ##########################
  525. # Verify URLs
  526. ##########################
  527. passed_verify = test.verify_urls(out, err)
  528. out.flush()
  529. err.flush()
  530. ##########################
  531. # Benchmark this test
  532. ##########################
  533. if self.mode == "benchmark":
  534. out.write(header("Benchmarking %s" % test.name))
  535. out.flush()
  536. test.benchmark(out, err)
  537. out.flush()
  538. err.flush()
  539. ##########################
  540. # Stop this test
  541. ##########################
  542. out.write(header("Stopping %s" % test.name))
  543. out.flush()
  544. test.stop(out, err)
  545. out.flush()
  546. err.flush()
  547. time.sleep(5)
  548. if self.__is_port_bound(test.port):
  549. self.__write_intermediate_results(test.name, "port " + str(test.port) + " was not released by stop")
  550. err.write(header("Error: Port %s was not released by stop %s" % (test.port, test.name)))
  551. err.flush()
  552. return exit_with_code(1)
  553. out.write(header("Stopped %s" % test.name))
  554. out.flush()
  555. time.sleep(5)
  556. ##########################################################
  557. # Save results thus far into toolset/benchmark/latest.json
  558. ##########################################################
  559. out.write(header("Saving results through %s" % test.name))
  560. out.flush()
  561. self.__write_intermediate_results(test.name,time.strftime("%Y%m%d%H%M%S", time.localtime()))
  562. if self.mode == "verify" and not passed_verify:
  563. print "Failed verify!"
  564. return exit_with_code(1)
  565. except (OSError, IOError, subprocess.CalledProcessError) as e:
  566. self.__write_intermediate_results(test.name,"<setup.py> raised an exception")
  567. err.write(header("Subprocess Error %s" % test.name))
  568. traceback.print_exc(file=err)
  569. err.flush()
  570. try:
  571. test.stop(out, err)
  572. except (subprocess.CalledProcessError) as e:
  573. self.__write_intermediate_results(test.name,"<setup.py>#stop() raised an error")
  574. err.write(header("Subprocess Error: Test .stop() raised exception %s" % test.name))
  575. traceback.print_exc(file=err)
  576. err.flush()
  577. out.close()
  578. err.close()
  579. return exit_with_code(1)
  580. # TODO - subprocess should not catch this exception!
  581. # Parent process should catch it and cleanup/exit
  582. except (KeyboardInterrupt) as e:
  583. test.stop(out, err)
  584. out.write(header("Cleaning up..."))
  585. out.flush()
  586. self.__finish()
  587. sys.exit(1)
  588. out.close()
  589. err.close()
  590. return exit_with_code(0)
  591. ############################################################
  592. # End __run_tests
  593. ############################################################
  594. ############################################################
  595. # __is_port_bound
  596. # Check if the requested port is available. If it
  597. # isn't available, then a previous test probably didn't
  598. # shutdown properly.
  599. ############################################################
  600. def __is_port_bound(self, port):
  601. s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
  602. try:
  603. # Try to bind to all IP addresses, this port
  604. s.bind(("", port))
  605. # If we get here, we were able to bind successfully,
  606. # which means the port is free.
  607. except:
  608. # If we get an exception, it might be because the port is still bound
  609. # which would be bad, or maybe it is a privileged port (<1024) and we
  610. # are not running as root, or maybe the server is gone, but sockets are
  611. # still in TIME_WAIT (SO_REUSEADDR). To determine which scenario, try to
  612. # connect.
  613. try:
  614. s.connect(("127.0.0.1", port))
  615. # If we get here, we were able to connect to something, which means
  616. # that the port is still bound.
  617. return True
  618. except:
  619. # An exception means that we couldn't connect, so a server probably
  620. # isn't still running on the port.
  621. pass
  622. finally:
  623. s.close()
  624. return False
  625. ############################################################
  626. # End __is_port_bound
  627. ############################################################
  628. ############################################################
  629. # __parse_results
  630. # Ensures that the system has all necessary software to run
  631. # the tests. This does not include that software for the individual
  632. # test, but covers software such as curl and weighttp that
  633. # are needed.
  634. ############################################################
  635. def __parse_results(self, tests):
  636. # Run the method to get the commmit count of each framework.
  637. self.__count_commits()
  638. # Call the method which counts the sloc for each framework
  639. self.__count_sloc()
  640. # Time to create parsed files
  641. # Aggregate JSON file
  642. with open(os.path.join(self.full_results_directory(), "results.json"), "w") as f:
  643. f.write(json.dumps(self.results))
  644. ############################################################
  645. # End __parse_results
  646. ############################################################
  647. #############################################################
  648. # __count_sloc
  649. # This is assumed to be run from the benchmark root directory
  650. #############################################################
  651. def __count_sloc(self):
  652. all_frameworks = self.__gather_frameworks()
  653. jsonResult = {}
  654. for framework in all_frameworks:
  655. try:
  656. command = "cloc --list-file=" + framework['directory'] + "/source_code --yaml"
  657. lineCount = subprocess.check_output(command, shell=True)
  658. # Find the last instance of the word 'code' in the yaml output. This should
  659. # be the line count for the sum of all listed files or just the line count
  660. # for the last file in the case where there's only one file listed.
  661. lineCount = lineCount[lineCount.rfind('code'):len(lineCount)]
  662. lineCount = lineCount.strip('code: ')
  663. lineCount = lineCount[0:lineCount.rfind('comment')]
  664. jsonResult[framework['name']] = int(lineCount)
  665. except:
  666. continue
  667. self.results['rawData']['slocCounts'] = jsonResult
  668. ############################################################
  669. # End __count_sloc
  670. ############################################################
  671. ############################################################
  672. # __count_commits
  673. ############################################################
  674. def __count_commits(self):
  675. all_frameworks = self.__gather_frameworks()
  676. jsonResult = {}
  677. for framework in all_frameworks:
  678. try:
  679. command = "git rev-list HEAD -- " + framework + " | sort -u | wc -l"
  680. commitCount = subprocess.check_output(command, shell=True)
  681. jsonResult[framework] = int(commitCount)
  682. except:
  683. continue
  684. self.results['rawData']['commitCounts'] = jsonResult
  685. self.commits = jsonResult
  686. ############################################################
  687. # End __count_commits
  688. ############################################################
  689. ############################################################
  690. # __write_intermediate_results
  691. ############################################################
  692. def __write_intermediate_results(self,test_name,status_message):
  693. try:
  694. self.results["completed"][test_name] = status_message
  695. with open(os.path.join(self.latest_results_directory, 'results.json'), 'w') as f:
  696. f.write(json.dumps(self.results))
  697. except (IOError):
  698. logging.error("Error writing results.json")
  699. ############################################################
  700. # End __write_intermediate_results
  701. ############################################################
  702. def __load_results(self):
  703. try:
  704. with open(os.path.join(self.latest_results_directory, 'results.json')) as f:
  705. self.results = json.load(f)
  706. except (ValueError, IOError):
  707. pass
  708. ############################################################
  709. # __finish
  710. ############################################################
  711. def __finish(self):
  712. print "Time to complete: " + str(int(time.time() - self.start_time)) + " seconds"
  713. print "Results are saved in " + os.path.join(self.result_directory, self.timestamp)
  714. ############################################################
  715. # End __finish
  716. ############################################################
  717. ##########################################################################################
  718. # Constructor
  719. ##########################################################################################
  720. ############################################################
  721. # Initialize the benchmarker. The args are the arguments
  722. # parsed via argparser.
  723. ############################################################
  724. def __init__(self, args):
  725. self.__dict__.update(args)
  726. self.start_time = time.time()
  727. self.run_test_timeout_seconds = 3600
  728. # setup logging
  729. logging.basicConfig(stream=sys.stderr, level=logging.INFO)
  730. # setup some additional variables
  731. if self.database_user == None: self.database_user = self.client_user
  732. if self.database_host == None: self.database_host = self.client_host
  733. if self.database_identity_file == None: self.database_identity_file = self.client_identity_file
  734. # Remember root directory
  735. self.fwroot = setup_util.get_fwroot()
  736. # setup results and latest_results directories
  737. self.result_directory = os.path.join("results", self.name)
  738. self.latest_results_directory = self.latest_results_directory()
  739. if self.parse != None:
  740. self.timestamp = self.parse
  741. else:
  742. self.timestamp = time.strftime("%Y%m%d%H%M%S", time.localtime())
  743. # Setup the concurrency levels array. This array goes from
  744. # starting_concurrency to max concurrency, doubling each time
  745. self.concurrency_levels = []
  746. concurrency = self.starting_concurrency
  747. while concurrency <= self.max_concurrency:
  748. self.concurrency_levels.append(concurrency)
  749. concurrency = concurrency * 2
  750. # Setup query interval array
  751. # starts at 1, and goes up to max_queries, using the query_interval
  752. self.query_intervals = []
  753. queries = 1
  754. while queries <= self.max_queries:
  755. self.query_intervals.append(queries)
  756. if queries == 1:
  757. queries = 0
  758. queries = queries + self.query_interval
  759. # Load the latest data
  760. #self.latest = None
  761. #try:
  762. # with open('toolset/benchmark/latest.json', 'r') as f:
  763. # # Load json file into config object
  764. # self.latest = json.load(f)
  765. # logging.info("toolset/benchmark/latest.json loaded to self.latest")
  766. # logging.debug("contents of latest.json: " + str(json.dumps(self.latest)))
  767. #except IOError:
  768. # logging.warn("IOError on attempting to read toolset/benchmark/latest.json")
  769. #
  770. #self.results = None
  771. #try:
  772. # if self.latest != None and self.name in self.latest.keys():
  773. # with open(os.path.join(self.result_directory, str(self.latest[self.name]), 'results.json'), 'r') as f:
  774. # # Load json file into config object
  775. # self.results = json.load(f)
  776. #except IOError:
  777. # pass
  778. self.results = None
  779. try:
  780. with open(os.path.join(self.latest_results_directory, 'results.json'), 'r') as f:
  781. #Load json file into results object
  782. self.results = json.load(f)
  783. except IOError:
  784. logging.warn("results.json for test %s not found.",self.name)
  785. if self.results == None:
  786. self.results = dict()
  787. self.results['name'] = self.name
  788. self.results['concurrencyLevels'] = self.concurrency_levels
  789. self.results['queryIntervals'] = self.query_intervals
  790. self.results['frameworks'] = [t.name for t in self.__gather_tests]
  791. self.results['duration'] = self.duration
  792. self.results['rawData'] = dict()
  793. self.results['rawData']['json'] = dict()
  794. self.results['rawData']['db'] = dict()
  795. self.results['rawData']['query'] = dict()
  796. self.results['rawData']['fortune'] = dict()
  797. self.results['rawData']['update'] = dict()
  798. self.results['rawData']['plaintext'] = dict()
  799. self.results['completed'] = dict()
  800. self.results['succeeded'] = dict()
  801. self.results['succeeded']['json'] = []
  802. self.results['succeeded']['db'] = []
  803. self.results['succeeded']['query'] = []
  804. self.results['succeeded']['fortune'] = []
  805. self.results['succeeded']['update'] = []
  806. self.results['succeeded']['plaintext'] = []
  807. self.results['failed'] = dict()
  808. self.results['failed']['json'] = []
  809. self.results['failed']['db'] = []
  810. self.results['failed']['query'] = []
  811. self.results['failed']['fortune'] = []
  812. self.results['failed']['update'] = []
  813. self.results['failed']['plaintext'] = []
  814. self.results['warning'] = dict()
  815. self.results['warning']['json'] = []
  816. self.results['warning']['db'] = []
  817. self.results['warning']['query'] = []
  818. self.results['warning']['fortune'] = []
  819. self.results['warning']['update'] = []
  820. self.results['warning']['plaintext'] = []
  821. else:
  822. #for x in self.__gather_tests():
  823. # if x.name not in self.results['frameworks']:
  824. # self.results['frameworks'] = self.results['frameworks'] + [x.name]
  825. # Always overwrite framework list
  826. self.results['frameworks'] = [t.name for t in self.__gather_tests]
  827. # Setup the ssh command string
  828. self.database_ssh_string = "ssh -T -o StrictHostKeyChecking=no " + self.database_user + "@" + self.database_host
  829. self.client_ssh_string = "ssh -T -o StrictHostKeyChecking=no " + self.client_user + "@" + self.client_host
  830. if self.database_identity_file != None:
  831. self.database_ssh_string = self.database_ssh_string + " -i " + self.database_identity_file
  832. if self.client_identity_file != None:
  833. self.client_ssh_string = self.client_ssh_string + " -i " + self.client_identity_file
  834. if self.install is not None:
  835. install = Installer(self, self.install_strategy)
  836. install.install_software()
  837. ############################################################
  838. # End __init__
  839. ############################################################