benchmarker.py 38 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958
  1. from setup.linux.installer import Installer
  2. from setup.linux import setup_util
  3. from benchmark import framework_test
  4. from utils import header
  5. from utils import gather_tests
  6. import os
  7. import json
  8. import subprocess
  9. import traceback
  10. import time
  11. import pprint
  12. import csv
  13. import sys
  14. import logging
  15. import socket
  16. from multiprocessing import Process
  17. from datetime import datetime
  18. # Cross-platform colored text
  19. from colorama import Fore, Back, Style
  20. class Benchmarker:
  21. ##########################################################################################
  22. # Public methods
  23. ##########################################################################################
  24. ############################################################
  25. # Prints all the available tests
  26. ############################################################
  27. def run_list_tests(self):
  28. all_tests = self.__gather_tests
  29. for test in all_tests:
  30. print test.name
  31. self.__finish()
  32. ############################################################
  33. # End run_list_tests
  34. ############################################################
  35. ############################################################
  36. # Prints the metadata for all the available tests
  37. ############################################################
  38. def run_list_test_metadata(self):
  39. all_tests = self.__gather_tests
  40. all_tests_json = json.dumps(map(lambda test: {
  41. "name": test.name,
  42. "approach": test.approach,
  43. "classification": test.classification,
  44. "database": test.database,
  45. "framework": test.framework,
  46. "language": test.language,
  47. "orm": test.orm,
  48. "platform": test.platform,
  49. "webserver": test.webserver,
  50. "os": test.os,
  51. "database_os": test.database_os,
  52. "display_name": test.display_name,
  53. "notes": test.notes,
  54. "versus": test.versus
  55. }, all_tests))
  56. with open(os.path.join(self.full_results_directory(), "test_metadata.json"), "w") as f:
  57. f.write(all_tests_json)
  58. self.__finish()
  59. ############################################################
  60. # End run_list_test_metadata
  61. ############################################################
  62. ############################################################
  63. # parse_timestamp
  64. # Re-parses the raw data for a given timestamp
  65. ############################################################
  66. def parse_timestamp(self):
  67. all_tests = self.__gather_tests
  68. for test in all_tests:
  69. test.parse_all()
  70. self.__parse_results(all_tests)
  71. self.__finish()
  72. ############################################################
  73. # End parse_timestamp
  74. ############################################################
  75. ############################################################
  76. # Run the tests:
  77. # This process involves setting up the client/server machines
  78. # with any necessary change. Then going through each test,
  79. # running their setup script, verifying the URLs, and
  80. # running benchmarks against them.
  81. ############################################################
  82. def run(self):
  83. ##########################
  84. # Get a list of all known
  85. # tests that we can run.
  86. ##########################
  87. all_tests = self.__gather_tests
  88. ##########################
  89. # Setup client/server
  90. ##########################
  91. print header("Preparing Server, Database, and Client ...", top='=', bottom='=')
  92. self.__setup_server()
  93. self.__setup_database()
  94. self.__setup_client()
  95. ## Check if wrk (and wrk-pipeline) is installed and executable, if not, raise an exception
  96. #if not (os.access("/usr/local/bin/wrk", os.X_OK) and os.access("/usr/local/bin/wrk-pipeline", os.X_OK)):
  97. # raise Exception("wrk and/or wrk-pipeline are not properly installed. Not running tests.")
  98. ##########################
  99. # Run tests
  100. ##########################
  101. print header("Running Tests...", top='=', bottom='=')
  102. result = self.__run_tests(all_tests)
  103. ##########################
  104. # Parse results
  105. ##########################
  106. if self.mode == "benchmark":
  107. print header("Parsing Results ...", top='=', bottom='=')
  108. self.__parse_results(all_tests)
  109. self.__finish()
  110. return result
  111. ############################################################
  112. # End run
  113. ############################################################
  114. ############################################################
  115. # database_sftp_string(batch_file)
  116. # generates a fully qualified URL for sftp to database
  117. ############################################################
  118. def database_sftp_string(self, batch_file):
  119. sftp_string = "sftp -oStrictHostKeyChecking=no "
  120. if batch_file != None: sftp_string += " -b " + batch_file + " "
  121. if self.database_identity_file != None:
  122. sftp_string += " -i " + self.database_identity_file + " "
  123. return sftp_string + self.database_user + "@" + self.database_host
  124. ############################################################
  125. # End database_sftp_string
  126. ############################################################
  127. ############################################################
  128. # client_sftp_string(batch_file)
  129. # generates a fully qualified URL for sftp to client
  130. ############################################################
  131. def client_sftp_string(self, batch_file):
  132. sftp_string = "sftp -oStrictHostKeyChecking=no "
  133. if batch_file != None: sftp_string += " -b " + batch_file + " "
  134. if self.client_identity_file != None:
  135. sftp_string += " -i " + self.client_identity_file + " "
  136. return sftp_string + self.client_user + "@" + self.client_host
  137. ############################################################
  138. # End client_sftp_string
  139. ############################################################
  140. ############################################################
  141. # generate_url(url, port)
  142. # generates a fully qualified URL for accessing a test url
  143. ############################################################
  144. def generate_url(self, url, port):
  145. return self.server_host + ":" + str(port) + url
  146. ############################################################
  147. # End generate_url
  148. ############################################################
  149. ############################################################
  150. # get_output_file(test_name, test_type)
  151. # returns the output file name for this test_name and
  152. # test_type timestamp/test_type/test_name/raw
  153. ############################################################
  154. def get_output_file(self, test_name, test_type):
  155. return os.path.join(self.result_directory, self.timestamp, test_type, test_name, "raw")
  156. ############################################################
  157. # End get_output_file
  158. ############################################################
  159. ############################################################
  160. # output_file(test_name, test_type)
  161. # returns the output file for this test_name and test_type
  162. # timestamp/test_type/test_name/raw
  163. ############################################################
  164. def output_file(self, test_name, test_type):
  165. path = self.get_output_file(test_name, test_type)
  166. try:
  167. os.makedirs(os.path.dirname(path))
  168. except OSError:
  169. pass
  170. return path
  171. ############################################################
  172. # End output_file
  173. ############################################################
  174. ############################################################
  175. # get_stats_file(test_name, test_type)
  176. # returns the stats file name for this test_name and
  177. # test_type timestamp/test_type/test_name/raw
  178. ############################################################
  179. def get_stats_file(self, test_name, test_type):
  180. return os.path.join(self.result_directory, self.timestamp, test_type, test_name, "stats")
  181. ############################################################
  182. # End get_stats_file
  183. ############################################################
  184. ############################################################
  185. # stats_file(test_name, test_type)
  186. # returns the stats file for this test_name and test_type
  187. # timestamp/test_type/test_name/raw
  188. ############################################################
  189. def stats_file(self, test_name, test_type):
  190. path = self.get_stats_file(test_name, test_type)
  191. try:
  192. os.makedirs(os.path.dirname(path))
  193. except OSError:
  194. pass
  195. return path
  196. ############################################################
  197. # End stats_file
  198. ############################################################
  199. ############################################################
  200. # full_results_directory
  201. ############################################################
  202. def full_results_directory(self):
  203. path = os.path.join(self.result_directory, self.timestamp)
  204. try:
  205. os.makedirs(path)
  206. except OSError:
  207. pass
  208. return path
  209. ############################################################
  210. # End full_results_directory
  211. ############################################################
  212. ############################################################
  213. # Latest intermediate results dirctory
  214. ############################################################
  215. def latest_results_directory(self):
  216. path = os.path.join(self.result_directory,"latest")
  217. try:
  218. os.makedirs(path)
  219. except OSError:
  220. pass
  221. return path
  222. ############################################################
  223. # report_verify_results
  224. # Used by FrameworkTest to add verification details to our results
  225. #
  226. # TODO: Technically this is an IPC violation - we are accessing
  227. # the parent process' memory from the child process
  228. ############################################################
  229. def report_verify_results(self, framework, test, result):
  230. if framework.name not in self.results['verify'].keys():
  231. self.results['verify'][framework.name] = dict()
  232. self.results['verify'][framework.name][test] = result
  233. ############################################################
  234. # report_benchmark_results
  235. # Used by FrameworkTest to add benchmark data to this
  236. #
  237. # TODO: Technically this is an IPC violation - we are accessing
  238. # the parent process' memory from the child process
  239. ############################################################
  240. def report_benchmark_results(self, framework, test, results):
  241. if test not in self.results['rawData'].keys():
  242. self.results['rawData'][test] = dict()
  243. # If results has a size from the parse, then it succeeded.
  244. if results:
  245. self.results['rawData'][test][framework.name] = results
  246. # This may already be set for single-tests
  247. if framework.name not in self.results['succeeded'][test]:
  248. self.results['succeeded'][test].append(framework.name)
  249. else:
  250. # This may already be set for single-tests
  251. if framework.name not in self.results['failed'][test]:
  252. self.results['failed'][test].append(framework.name)
  253. ############################################################
  254. # End report_results
  255. ############################################################
  256. ##########################################################################################
  257. # Private methods
  258. ##########################################################################################
  259. ############################################################
  260. # Gathers all the tests
  261. ############################################################
  262. @property
  263. def __gather_tests(self):
  264. tests = gather_tests(include=self.test,
  265. exclude=self.exclude,
  266. benchmarker=self)
  267. # If the tests have been interrupted somehow, then we want to resume them where we left
  268. # off, rather than starting from the beginning
  269. if os.path.isfile('current_benchmark.txt'):
  270. with open('current_benchmark.txt', 'r') as interrupted_benchmark:
  271. interrupt_bench = interrupted_benchmark.read()
  272. for index, atest in enumerate(tests):
  273. if atest.name == interrupt_bench:
  274. tests = tests[index:]
  275. break
  276. return tests
  277. ############################################################
  278. # End __gather_tests
  279. ############################################################
  280. ############################################################
  281. # Gathers all the frameworks
  282. ############################################################
  283. def __gather_frameworks(self):
  284. frameworks = []
  285. # Loop through each directory (we assume we're being run from the benchmarking root)
  286. for dirname, dirnames, filenames in os.walk('.'):
  287. # Look for the benchmark_config file, this will contain our framework name
  288. # It's format looks like this:
  289. #
  290. # {
  291. # "framework": "nodejs",
  292. # "tests": [{
  293. # "default": {
  294. # "setup_file": "setup",
  295. # "json_url": "/json"
  296. # },
  297. # "mysql": {
  298. # "setup_file": "setup",
  299. # "db_url": "/mysql",
  300. # "query_url": "/mysql?queries="
  301. # },
  302. # ...
  303. # }]
  304. # }
  305. if 'benchmark_config' in filenames:
  306. config = None
  307. with open(os.path.join(dirname, 'benchmark_config'), 'r') as config_file:
  308. # Load json file into config object
  309. config = json.load(config_file)
  310. if config == None:
  311. continue
  312. frameworks.append(str(config['framework']))
  313. return frameworks
  314. ############################################################
  315. # End __gather_frameworks
  316. ############################################################
  317. ############################################################
  318. # Makes any necessary changes to the server that should be
  319. # made before running the tests. This involves setting kernal
  320. # settings to allow for more connections, or more file
  321. # descriptiors
  322. #
  323. # http://redmine.lighttpd.net/projects/weighttp/wiki#Troubleshooting
  324. ############################################################
  325. def __setup_server(self):
  326. try:
  327. if os.name == 'nt':
  328. return True
  329. subprocess.check_call(["sudo","bash","-c","cd /sys/devices/system/cpu; ls -d cpu[0-9]*|while read x; do echo performance > $x/cpufreq/scaling_governor; done"])
  330. subprocess.check_call("sudo sysctl -w net.ipv4.tcp_max_syn_backlog=65535".rsplit(" "))
  331. subprocess.check_call("sudo sysctl -w net.core.somaxconn=65535".rsplit(" "))
  332. subprocess.check_call("sudo -s ulimit -n 65535".rsplit(" "))
  333. subprocess.check_call("sudo sysctl net.ipv4.tcp_tw_reuse=1".rsplit(" "))
  334. subprocess.check_call("sudo sysctl net.ipv4.tcp_tw_recycle=1".rsplit(" "))
  335. subprocess.check_call("sudo sysctl -w kernel.shmmax=134217728".rsplit(" "))
  336. subprocess.check_call("sudo sysctl -w kernel.shmall=2097152".rsplit(" "))
  337. except subprocess.CalledProcessError:
  338. return False
  339. ############################################################
  340. # End __setup_server
  341. ############################################################
  342. ############################################################
  343. # Makes any necessary changes to the database machine that
  344. # should be made before running the tests. Is very similar
  345. # to the server setup, but may also include database specific
  346. # changes.
  347. ############################################################
  348. def __setup_database(self):
  349. p = subprocess.Popen(self.database_ssh_string, stdin=subprocess.PIPE, shell=True)
  350. p.communicate("""
  351. sudo sysctl -w net.ipv4.tcp_max_syn_backlog=65535
  352. sudo sysctl -w net.core.somaxconn=65535
  353. sudo -s ulimit -n 65535
  354. sudo sysctl net.ipv4.tcp_tw_reuse=1
  355. sudo sysctl net.ipv4.tcp_tw_recycle=1
  356. sudo sysctl -w kernel.shmmax=2147483648
  357. sudo sysctl -w kernel.shmall=2097152
  358. """)
  359. ############################################################
  360. # End __setup_database
  361. ############################################################
  362. ############################################################
  363. # Makes any necessary changes to the client machine that
  364. # should be made before running the tests. Is very similar
  365. # to the server setup, but may also include client specific
  366. # changes.
  367. ############################################################
  368. def __setup_client(self):
  369. p = subprocess.Popen(self.client_ssh_string, stdin=subprocess.PIPE, shell=True)
  370. p.communicate("""
  371. sudo sysctl -w net.ipv4.tcp_max_syn_backlog=65535
  372. sudo sysctl -w net.core.somaxconn=65535
  373. sudo -s ulimit -n 65535
  374. sudo sysctl net.ipv4.tcp_tw_reuse=1
  375. sudo sysctl net.ipv4.tcp_tw_recycle=1
  376. sudo sysctl -w kernel.shmmax=2147483648
  377. sudo sysctl -w kernel.shmall=2097152
  378. """)
  379. ############################################################
  380. # End __setup_client
  381. ############################################################
  382. ############################################################
  383. # __run_tests
  384. #
  385. # 2013-10-02 ASB Calls each test passed in tests to
  386. # __run_test in a separate process. Each
  387. # test is given a set amount of time and if
  388. # kills the child process (and subsequently
  389. # all of its child processes). Uses
  390. # multiprocessing module.
  391. ############################################################
  392. def __run_tests(self, tests):
  393. logging.debug("Start __run_tests.")
  394. logging.debug("__name__ = %s",__name__)
  395. error_happened = False
  396. if self.os.lower() == 'windows':
  397. logging.debug("Executing __run_tests on Windows")
  398. for test in tests:
  399. with open('current_benchmark.txt', 'w') as benchmark_resume_file:
  400. benchmark_resume_file.write(test.name)
  401. if self.__run_test(test) != 0:
  402. error_happened = True
  403. else:
  404. logging.debug("Executing __run_tests on Linux")
  405. # These features do not work on Windows
  406. for test in tests:
  407. if __name__ == 'benchmark.benchmarker':
  408. print header("Running Test: %s" % test.name)
  409. with open('current_benchmark.txt', 'w') as benchmark_resume_file:
  410. benchmark_resume_file.write(test.name)
  411. test_process = Process(target=self.__run_test, name="Test Runner (%s)" % test.name, args=(test,))
  412. test_process.start()
  413. test_process.join(self.run_test_timeout_seconds)
  414. self.__load_results() # Load intermediate result from child process
  415. if(test_process.is_alive()):
  416. logging.debug("Child process for {name} is still alive. Terminating.".format(name=test.name))
  417. self.__write_intermediate_results(test.name,"__run_test timeout (="+ str(self.run_test_timeout_seconds) + " seconds)")
  418. test_process.terminate()
  419. test_process.join()
  420. if test_process.exitcode != 0:
  421. error_happened = True
  422. if os.path.isfile('current_benchmark.txt'):
  423. os.remove('current_benchmark.txt')
  424. logging.debug("End __run_tests.")
  425. if error_happened:
  426. return 1
  427. return 0
  428. ############################################################
  429. # End __run_tests
  430. ############################################################
  431. ############################################################
  432. # __run_test
  433. # 2013-10-02 ASB Previously __run_tests. This code now only
  434. # processes a single test.
  435. #
  436. # Ensures that the system has all necessary software to run
  437. # the tests. This does not include that software for the individual
  438. # test, but covers software such as curl and weighttp that
  439. # are needed.
  440. ############################################################
  441. def __run_test(self, test):
  442. # Used to capture return values
  443. def exit_with_code(code):
  444. if self.os.lower() == 'windows':
  445. return code
  446. else:
  447. sys.exit(code)
  448. try:
  449. os.makedirs(os.path.join(self.latest_results_directory, 'logs', "{name}".format(name=test.name)))
  450. except:
  451. pass
  452. with open(os.path.join(self.latest_results_directory, 'logs', "{name}".format(name=test.name), 'out.txt'), 'w') as out, \
  453. open(os.path.join(self.latest_results_directory, 'logs', "{name}".format(name=test.name), 'err.txt'), 'w') as err:
  454. if hasattr(test, 'skip'):
  455. if test.skip.lower() == "true":
  456. out.write("Test {name} benchmark_config specifies to skip this test. Skipping.\n".format(name=test.name))
  457. return exit_with_code(0)
  458. if test.os.lower() != self.os.lower() or test.database_os.lower() != self.database_os.lower():
  459. # the operating system requirements of this test for the
  460. # application server or the database server don't match
  461. # our current environment
  462. out.write("OS or Database OS specified in benchmark_config does not match the current environment. Skipping.\n")
  463. return exit_with_code(0)
  464. # If the test is in the excludes list, we skip it
  465. if self.exclude != None and test.name in self.exclude:
  466. out.write("Test {name} has been added to the excludes list. Skipping.\n".format(name=test.name))
  467. return exit_with_code(0)
  468. # If the test does not contain an implementation of the current test-type, skip it
  469. if self.type != 'all' and not test.contains_type(self.type):
  470. out.write("Test type {type} does not contain an implementation of the current test-type. Skipping.\n".format(type=self.type))
  471. return exit_with_code(0)
  472. out.write("test.os.lower() = {os} test.database_os.lower() = {dbos}\n".format(os=test.os.lower(),dbos=test.database_os.lower()))
  473. out.write("self.results['frameworks'] != None: {val}\n".format(val=str(self.results['frameworks'] != None)))
  474. out.write("test.name: {name}\n".format(name=str(test.name)))
  475. out.write("self.results['completed']: {completed}\n".format(completed=str(self.results['completed'])))
  476. if self.results['frameworks'] != None and test.name in self.results['completed']:
  477. out.write('Framework {name} found in latest saved data. Skipping.\n'.format(name=str(test.name)))
  478. return exit_with_code(1)
  479. out.flush()
  480. out.write(header("Beginning %s" % test.name, top='='))
  481. out.flush()
  482. ##########################
  483. # Start this test
  484. ##########################
  485. out.write(header("Starting %s" % test.name))
  486. out.flush()
  487. try:
  488. if test.requires_database():
  489. p = subprocess.Popen(self.database_ssh_string, stdin=subprocess.PIPE, stdout=out, stderr=err, shell=True)
  490. p.communicate("""
  491. sudo restart mysql
  492. sudo restart mongodb
  493. sudo service redis-server restart
  494. sudo /etc/init.d/postgresql restart
  495. """)
  496. time.sleep(10)
  497. if self.__is_port_bound(test.port):
  498. self.__write_intermediate_results(test.name, "port " + str(test.port) + " is not available before start")
  499. err.write(header("Error: Port %s is not available, cannot start %s" % (test.port, test.name)))
  500. err.flush()
  501. return exit_with_code(1)
  502. result = test.start(out, err)
  503. if result != 0:
  504. test.stop(out, err)
  505. time.sleep(5)
  506. err.write( "ERROR: Problem starting {name}\n".format(name=test.name) )
  507. err.write(header("Stopped %s" % test.name))
  508. err.flush()
  509. self.__write_intermediate_results(test.name,"<setup.py>#start() returned non-zero")
  510. return exit_with_code(1)
  511. time.sleep(self.sleep)
  512. ##########################
  513. # Verify URLs
  514. ##########################
  515. passed_verify = test.verify_urls(out, err)
  516. out.flush()
  517. err.flush()
  518. ##########################
  519. # Benchmark this test
  520. ##########################
  521. if self.mode == "benchmark":
  522. out.write(header("Benchmarking %s" % test.name))
  523. out.flush()
  524. test.benchmark(out, err)
  525. out.flush()
  526. err.flush()
  527. ##########################
  528. # Stop this test
  529. ##########################
  530. out.write(header("Stopping %s" % test.name))
  531. out.flush()
  532. test.stop(out, err)
  533. out.flush()
  534. err.flush()
  535. time.sleep(5)
  536. if self.__is_port_bound(test.port):
  537. self.__write_intermediate_results(test.name, "port " + str(test.port) + " was not released by stop")
  538. err.write(header("Error: Port %s was not released by stop %s" % (test.port, test.name)))
  539. err.flush()
  540. return exit_with_code(1)
  541. out.write(header("Stopped %s" % test.name))
  542. out.flush()
  543. time.sleep(5)
  544. ##########################################################
  545. # Save results thus far into toolset/benchmark/latest.json
  546. ##########################################################
  547. out.write(header("Saving results through %s" % test.name))
  548. out.flush()
  549. self.__write_intermediate_results(test.name,time.strftime("%Y%m%d%H%M%S", time.localtime()))
  550. if self.mode == "verify" and not passed_verify:
  551. print "Failed verify!"
  552. return exit_with_code(1)
  553. except (OSError, IOError, subprocess.CalledProcessError) as e:
  554. self.__write_intermediate_results(test.name,"<setup.py> raised an exception")
  555. err.write(header("Subprocess Error %s" % test.name))
  556. traceback.print_exc(file=err)
  557. err.flush()
  558. try:
  559. test.stop(out, err)
  560. except (subprocess.CalledProcessError) as e:
  561. self.__write_intermediate_results(test.name,"<setup.py>#stop() raised an error")
  562. err.write(header("Subprocess Error: Test .stop() raised exception %s" % test.name))
  563. traceback.print_exc(file=err)
  564. err.flush()
  565. out.close()
  566. err.close()
  567. return exit_with_code(1)
  568. # TODO - subprocess should not catch this exception!
  569. # Parent process should catch it and cleanup/exit
  570. except (KeyboardInterrupt) as e:
  571. test.stop(out, err)
  572. out.write(header("Cleaning up..."))
  573. out.flush()
  574. self.__finish()
  575. sys.exit(1)
  576. out.close()
  577. err.close()
  578. return exit_with_code(0)
  579. ############################################################
  580. # End __run_tests
  581. ############################################################
  582. ############################################################
  583. # __is_port_bound
  584. # Check if the requested port is available. If it
  585. # isn't available, then a previous test probably didn't
  586. # shutdown properly.
  587. ############################################################
  588. def __is_port_bound(self, port):
  589. s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
  590. try:
  591. # Try to bind to all IP addresses, this port
  592. s.bind(("", port))
  593. # If we get here, we were able to bind successfully,
  594. # which means the port is free.
  595. except:
  596. # If we get an exception, it might be because the port is still bound
  597. # which would be bad, or maybe it is a privileged port (<1024) and we
  598. # are not running as root, or maybe the server is gone, but sockets are
  599. # still in TIME_WAIT (SO_REUSEADDR). To determine which scenario, try to
  600. # connect.
  601. try:
  602. s.connect(("127.0.0.1", port))
  603. # If we get here, we were able to connect to something, which means
  604. # that the port is still bound.
  605. return True
  606. except:
  607. # An exception means that we couldn't connect, so a server probably
  608. # isn't still running on the port.
  609. pass
  610. finally:
  611. s.close()
  612. return False
  613. ############################################################
  614. # End __is_port_bound
  615. ############################################################
  616. ############################################################
  617. # __parse_results
  618. # Ensures that the system has all necessary software to run
  619. # the tests. This does not include that software for the individual
  620. # test, but covers software such as curl and weighttp that
  621. # are needed.
  622. ############################################################
  623. def __parse_results(self, tests):
  624. # Run the method to get the commmit count of each framework.
  625. self.__count_commits()
  626. # Call the method which counts the sloc for each framework
  627. self.__count_sloc()
  628. # Time to create parsed files
  629. # Aggregate JSON file
  630. with open(os.path.join(self.full_results_directory(), "results.json"), "w") as f:
  631. f.write(json.dumps(self.results))
  632. ############################################################
  633. # End __parse_results
  634. ############################################################
  635. #############################################################
  636. # __count_sloc
  637. # This is assumed to be run from the benchmark root directory
  638. #############################################################
  639. def __count_sloc(self):
  640. all_frameworks = self.__gather_frameworks()
  641. jsonResult = {}
  642. for framework in all_frameworks:
  643. try:
  644. command = "cloc --list-file=" + framework['directory'] + "/source_code --yaml"
  645. lineCount = subprocess.check_output(command, shell=True)
  646. # Find the last instance of the word 'code' in the yaml output. This should
  647. # be the line count for the sum of all listed files or just the line count
  648. # for the last file in the case where there's only one file listed.
  649. lineCount = lineCount[lineCount.rfind('code'):len(lineCount)]
  650. lineCount = lineCount.strip('code: ')
  651. lineCount = lineCount[0:lineCount.rfind('comment')]
  652. jsonResult[framework['name']] = int(lineCount)
  653. except:
  654. continue
  655. self.results['rawData']['slocCounts'] = jsonResult
  656. ############################################################
  657. # End __count_sloc
  658. ############################################################
  659. ############################################################
  660. # __count_commits
  661. ############################################################
  662. def __count_commits(self):
  663. all_frameworks = self.__gather_frameworks()
  664. jsonResult = {}
  665. for framework in all_frameworks:
  666. try:
  667. command = "git rev-list HEAD -- " + framework + " | sort -u | wc -l"
  668. commitCount = subprocess.check_output(command, shell=True)
  669. jsonResult[framework] = int(commitCount)
  670. except:
  671. continue
  672. self.results['rawData']['commitCounts'] = jsonResult
  673. self.commits = jsonResult
  674. ############################################################
  675. # End __count_commits
  676. ############################################################
  677. ############################################################
  678. # __write_intermediate_results
  679. ############################################################
  680. def __write_intermediate_results(self,test_name,status_message):
  681. try:
  682. self.results["completed"][test_name] = status_message
  683. with open(os.path.join(self.latest_results_directory, 'results.json'), 'w') as f:
  684. f.write(json.dumps(self.results))
  685. except (IOError):
  686. logging.error("Error writing results.json")
  687. ############################################################
  688. # End __write_intermediate_results
  689. ############################################################
  690. def __load_results(self):
  691. try:
  692. with open(os.path.join(self.latest_results_directory, 'results.json')) as f:
  693. self.results = json.load(f)
  694. except (ValueError, IOError):
  695. pass
  696. ############################################################
  697. # __finish
  698. ############################################################
  699. def __finish(self):
  700. tests = self.__gather_tests
  701. # Normally you don't have to use Fore.BLUE before each line, but
  702. # Travis-CI seems to reset color codes on newline (see travis-ci/travis-ci#2692)
  703. # or stream flush, so we have to ensure that the color code is printed repeatedly
  704. prefix = Fore.CYAN
  705. for line in header("Verification Summary", top='=', bottom='').split('\n'):
  706. print prefix + line
  707. for test in tests:
  708. print prefix + "| Test: %s" % test.name
  709. if test.name in self.results['verify'].keys():
  710. for test_type, result in self.results['verify'][test.name].iteritems():
  711. if result.upper() == "PASS":
  712. color = Fore.GREEN
  713. elif result.upper() == "WARN":
  714. color = Fore.YELLOW
  715. else:
  716. color = Fore.RED
  717. print prefix + "| " + test_type.ljust(11) + ' : ' + color + result.upper()
  718. else:
  719. print prefix + "| " + Fore.RED + "NO RESULTS (Did framework launch?)"
  720. print prefix + header('', top='', bottom='=') + Style.RESET_ALL
  721. print "Time to complete: " + str(int(time.time() - self.start_time)) + " seconds"
  722. print "Results are saved in " + os.path.join(self.result_directory, self.timestamp)
  723. ############################################################
  724. # End __finish
  725. ############################################################
  726. ##########################################################################################
  727. # Constructor
  728. ##########################################################################################
  729. ############################################################
  730. # Initialize the benchmarker. The args are the arguments
  731. # parsed via argparser.
  732. ############################################################
  733. def __init__(self, args):
  734. self.__dict__.update(args)
  735. self.start_time = time.time()
  736. self.run_test_timeout_seconds = 3600
  737. # setup logging
  738. logging.basicConfig(stream=sys.stderr, level=logging.INFO)
  739. # setup some additional variables
  740. if self.database_user == None: self.database_user = self.client_user
  741. if self.database_host == None: self.database_host = self.client_host
  742. if self.database_identity_file == None: self.database_identity_file = self.client_identity_file
  743. # Remember root directory
  744. self.fwroot = setup_util.get_fwroot()
  745. # setup results and latest_results directories
  746. self.result_directory = os.path.join("results", self.name)
  747. self.latest_results_directory = self.latest_results_directory()
  748. if self.parse != None:
  749. self.timestamp = self.parse
  750. else:
  751. self.timestamp = time.strftime("%Y%m%d%H%M%S", time.localtime())
  752. # Setup the concurrency levels array. This array goes from
  753. # starting_concurrency to max concurrency, doubling each time
  754. self.concurrency_levels = []
  755. concurrency = self.starting_concurrency
  756. while concurrency <= self.max_concurrency:
  757. self.concurrency_levels.append(concurrency)
  758. concurrency = concurrency * 2
  759. # Setup query interval array
  760. # starts at 1, and goes up to max_queries, using the query_interval
  761. self.query_intervals = []
  762. queries = 1
  763. while queries <= self.max_queries:
  764. self.query_intervals.append(queries)
  765. if queries == 1:
  766. queries = 0
  767. queries = queries + self.query_interval
  768. # Load the latest data
  769. #self.latest = None
  770. #try:
  771. # with open('toolset/benchmark/latest.json', 'r') as f:
  772. # # Load json file into config object
  773. # self.latest = json.load(f)
  774. # logging.info("toolset/benchmark/latest.json loaded to self.latest")
  775. # logging.debug("contents of latest.json: " + str(json.dumps(self.latest)))
  776. #except IOError:
  777. # logging.warn("IOError on attempting to read toolset/benchmark/latest.json")
  778. #
  779. #self.results = None
  780. #try:
  781. # if self.latest != None and self.name in self.latest.keys():
  782. # with open(os.path.join(self.result_directory, str(self.latest[self.name]), 'results.json'), 'r') as f:
  783. # # Load json file into config object
  784. # self.results = json.load(f)
  785. #except IOError:
  786. # pass
  787. self.results = None
  788. try:
  789. with open(os.path.join(self.latest_results_directory, 'results.json'), 'r') as f:
  790. #Load json file into results object
  791. self.results = json.load(f)
  792. except IOError:
  793. logging.warn("results.json for test %s not found.",self.name)
  794. if self.results == None:
  795. self.results = dict()
  796. self.results['name'] = self.name
  797. self.results['concurrencyLevels'] = self.concurrency_levels
  798. self.results['queryIntervals'] = self.query_intervals
  799. self.results['frameworks'] = [t.name for t in self.__gather_tests]
  800. self.results['duration'] = self.duration
  801. self.results['rawData'] = dict()
  802. self.results['rawData']['json'] = dict()
  803. self.results['rawData']['db'] = dict()
  804. self.results['rawData']['query'] = dict()
  805. self.results['rawData']['fortune'] = dict()
  806. self.results['rawData']['update'] = dict()
  807. self.results['rawData']['plaintext'] = dict()
  808. self.results['completed'] = dict()
  809. self.results['succeeded'] = dict()
  810. self.results['succeeded']['json'] = []
  811. self.results['succeeded']['db'] = []
  812. self.results['succeeded']['query'] = []
  813. self.results['succeeded']['fortune'] = []
  814. self.results['succeeded']['update'] = []
  815. self.results['succeeded']['plaintext'] = []
  816. self.results['failed'] = dict()
  817. self.results['failed']['json'] = []
  818. self.results['failed']['db'] = []
  819. self.results['failed']['query'] = []
  820. self.results['failed']['fortune'] = []
  821. self.results['failed']['update'] = []
  822. self.results['failed']['plaintext'] = []
  823. self.results['verify'] = dict()
  824. else:
  825. #for x in self.__gather_tests():
  826. # if x.name not in self.results['frameworks']:
  827. # self.results['frameworks'] = self.results['frameworks'] + [x.name]
  828. # Always overwrite framework list
  829. self.results['frameworks'] = [t.name for t in self.__gather_tests]
  830. # Setup the ssh command string
  831. self.database_ssh_string = "ssh -T -o StrictHostKeyChecking=no " + self.database_user + "@" + self.database_host
  832. self.client_ssh_string = "ssh -T -o StrictHostKeyChecking=no " + self.client_user + "@" + self.client_host
  833. if self.database_identity_file != None:
  834. self.database_ssh_string = self.database_ssh_string + " -i " + self.database_identity_file
  835. if self.client_identity_file != None:
  836. self.client_ssh_string = self.client_ssh_string + " -i " + self.client_identity_file
  837. if self.install is not None:
  838. install = Installer(self, self.install_strategy)
  839. install.install_software()
  840. ############################################################
  841. # End __init__
  842. ############################################################