benchmarker.py 39 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997
  1. from setup.linux.installer import Installer
  2. from setup.linux import setup_util
  3. from benchmark import framework_test
  4. from utils import header
  5. import os
  6. import json
  7. import subprocess
  8. import traceback
  9. import time
  10. import pprint
  11. import csv
  12. import sys
  13. import logging
  14. import socket
  15. import glob
  16. from multiprocessing import Process
  17. from datetime import datetime
  18. class Benchmarker:
  19. ##########################################################################################
  20. # Public methods
  21. ##########################################################################################
  22. ############################################################
  23. # Prints all the available tests
  24. ############################################################
  25. def run_list_tests(self):
  26. all_tests = self.__gather_tests
  27. for test in all_tests:
  28. print test.name
  29. self.__finish()
  30. ############################################################
  31. # End run_list_tests
  32. ############################################################
  33. ############################################################
  34. # Prints the metadata for all the available tests
  35. ############################################################
  36. def run_list_test_metadata(self):
  37. all_tests = self.__gather_tests
  38. all_tests_json = json.dumps(map(lambda test: {
  39. "name": test.name,
  40. "approach": test.approach,
  41. "classification": test.classification,
  42. "database": test.database,
  43. "framework": test.framework,
  44. "language": test.language,
  45. "orm": test.orm,
  46. "platform": test.platform,
  47. "webserver": test.webserver,
  48. "os": test.os,
  49. "database_os": test.database_os,
  50. "display_name": test.display_name,
  51. "notes": test.notes,
  52. "versus": test.versus
  53. }, all_tests))
  54. with open(os.path.join(self.full_results_directory(), "test_metadata.json"), "w") as f:
  55. f.write(all_tests_json)
  56. self.__finish()
  57. ############################################################
  58. # End run_list_test_metadata
  59. ############################################################
  60. ############################################################
  61. # parse_timestamp
  62. # Re-parses the raw data for a given timestamp
  63. ############################################################
  64. def parse_timestamp(self):
  65. all_tests = self.__gather_tests
  66. for test in all_tests:
  67. test.parse_all()
  68. self.__parse_results(all_tests)
  69. self.__finish()
  70. ############################################################
  71. # End parse_timestamp
  72. ############################################################
  73. ############################################################
  74. # Run the tests:
  75. # This process involves setting up the client/server machines
  76. # with any necessary change. Then going through each test,
  77. # running their setup script, verifying the URLs, and
  78. # running benchmarks against them.
  79. ############################################################
  80. def run(self):
  81. ##########################
  82. # Get a list of all known
  83. # tests that we can run.
  84. ##########################
  85. all_tests = self.__gather_tests
  86. ##########################
  87. # Setup client/server
  88. ##########################
  89. print header("Preparing Server, Database, and Client ...", top='=', bottom='=')
  90. self.__setup_server()
  91. self.__setup_database()
  92. self.__setup_client()
  93. ## Check if wrk (and wrk-pipeline) is installed and executable, if not, raise an exception
  94. #if not (os.access("/usr/local/bin/wrk", os.X_OK) and os.access("/usr/local/bin/wrk-pipeline", os.X_OK)):
  95. # raise Exception("wrk and/or wrk-pipeline are not properly installed. Not running tests.")
  96. ##########################
  97. # Run tests
  98. ##########################
  99. print header("Running Tests...", top='=', bottom='=')
  100. result = self.__run_tests(all_tests)
  101. ##########################
  102. # Parse results
  103. ##########################
  104. if self.mode == "benchmark":
  105. print header("Parsing Results ...", top='=', bottom='=')
  106. self.__parse_results(all_tests)
  107. self.__finish()
  108. return result
  109. ############################################################
  110. # End run
  111. ############################################################
  112. ############################################################
  113. # database_sftp_string(batch_file)
  114. # generates a fully qualified URL for sftp to database
  115. ############################################################
  116. def database_sftp_string(self, batch_file):
  117. sftp_string = "sftp -oStrictHostKeyChecking=no "
  118. if batch_file != None: sftp_string += " -b " + batch_file + " "
  119. if self.database_identity_file != None:
  120. sftp_string += " -i " + self.database_identity_file + " "
  121. return sftp_string + self.database_user + "@" + self.database_host
  122. ############################################################
  123. # End database_sftp_string
  124. ############################################################
  125. ############################################################
  126. # client_sftp_string(batch_file)
  127. # generates a fully qualified URL for sftp to client
  128. ############################################################
  129. def client_sftp_string(self, batch_file):
  130. sftp_string = "sftp -oStrictHostKeyChecking=no "
  131. if batch_file != None: sftp_string += " -b " + batch_file + " "
  132. if self.client_identity_file != None:
  133. sftp_string += " -i " + self.client_identity_file + " "
  134. return sftp_string + self.client_user + "@" + self.client_host
  135. ############################################################
  136. # End client_sftp_string
  137. ############################################################
  138. ############################################################
  139. # generate_url(url, port)
  140. # generates a fully qualified URL for accessing a test url
  141. ############################################################
  142. def generate_url(self, url, port):
  143. return self.server_host + ":" + str(port) + url
  144. ############################################################
  145. # End generate_url
  146. ############################################################
  147. ############################################################
  148. # get_output_file(test_name, test_type)
  149. # returns the output file name for this test_name and
  150. # test_type timestamp/test_type/test_name/raw
  151. ############################################################
  152. def get_output_file(self, test_name, test_type):
  153. return os.path.join(self.result_directory, self.timestamp, test_type, test_name, "raw")
  154. ############################################################
  155. # End get_output_file
  156. ############################################################
  157. ############################################################
  158. # output_file(test_name, test_type)
  159. # returns the output file for this test_name and test_type
  160. # timestamp/test_type/test_name/raw
  161. ############################################################
  162. def output_file(self, test_name, test_type):
  163. path = self.get_output_file(test_name, test_type)
  164. try:
  165. os.makedirs(os.path.dirname(path))
  166. except OSError:
  167. pass
  168. return path
  169. ############################################################
  170. # End output_file
  171. ############################################################
  172. ############################################################
  173. # get_warning_file(test_name, test_type)
  174. # returns the output file name for this test_name and
  175. # test_type timestamp/test_type/test_name/raw
  176. ############################################################
  177. def get_warning_file(self, test_name, test_type):
  178. return os.path.join(self.result_directory, self.timestamp, test_type, test_name, "warn")
  179. ############################################################
  180. # End get_warning_file
  181. ############################################################
  182. ############################################################
  183. # warning_file(test_name, test_type)
  184. # returns the warning file for this test_name and test_type
  185. # timestamp/test_type/test_name/raw
  186. ############################################################
  187. def warning_file(self, test_name, test_type):
  188. path = self.get_warning_file(test_name, test_type)
  189. try:
  190. os.makedirs(os.path.dirname(path))
  191. except OSError:
  192. pass
  193. return path
  194. ############################################################
  195. # End warning_file
  196. ############################################################
  197. ############################################################
  198. # get_stats_file(test_name, test_type)
  199. # returns the stats file name for this test_name and
  200. # test_type timestamp/test_type/test_name/raw
  201. ############################################################
  202. def get_stats_file(self, test_name, test_type):
  203. return os.path.join(self.result_directory, self.timestamp, test_type, test_name, "stats")
  204. ############################################################
  205. # End get_stats_file
  206. ############################################################
  207. ############################################################
  208. # stats_file(test_name, test_type)
  209. # returns the stats file for this test_name and test_type
  210. # timestamp/test_type/test_name/raw
  211. ############################################################
  212. def stats_file(self, test_name, test_type):
  213. path = self.get_stats_file(test_name, test_type)
  214. try:
  215. os.makedirs(os.path.dirname(path))
  216. except OSError:
  217. pass
  218. return path
  219. ############################################################
  220. # End stats_file
  221. ############################################################
  222. ############################################################
  223. # full_results_directory
  224. ############################################################
  225. def full_results_directory(self):
  226. path = os.path.join(self.result_directory, self.timestamp)
  227. try:
  228. os.makedirs(path)
  229. except OSError:
  230. pass
  231. return path
  232. ############################################################
  233. # End full_results_directory
  234. ############################################################
  235. ############################################################
  236. # Latest intermediate results dirctory
  237. ############################################################
  238. def latest_results_directory(self):
  239. path = os.path.join(self.result_directory,"latest")
  240. try:
  241. os.makedirs(path)
  242. except OSError:
  243. pass
  244. return path
  245. ############################################################
  246. # report_results
  247. ############################################################
  248. def report_results(self, framework, test, results):
  249. if test not in self.results['rawData'].keys():
  250. self.results['rawData'][test] = dict()
  251. # If results has a size from the parse, then it succeeded.
  252. if results:
  253. self.results['rawData'][test][framework.name] = results
  254. # This may already be set for single-tests
  255. if framework.name not in self.results['succeeded'][test]:
  256. self.results['succeeded'][test].append(framework.name)
  257. # Add this type
  258. if (os.path.exists(self.get_warning_file(framework.name, test)) and
  259. framework.name not in self.results['warning'][test]):
  260. self.results['warning'][test].append(framework.name)
  261. else:
  262. # This may already be set for single-tests
  263. if framework.name not in self.results['failed'][test]:
  264. self.results['failed'][test].append(framework.name)
  265. ############################################################
  266. # End report_results
  267. ############################################################
  268. ##########################################################################################
  269. # Private methods
  270. ##########################################################################################
  271. ############################################################
  272. # Gathers all the tests
  273. ############################################################
  274. @property
  275. def __gather_tests(self):
  276. tests = []
  277. # Assume we are running from FrameworkBenchmarks
  278. config_files = glob.glob('*/benchmark_config')
  279. for config_file_name in config_files:
  280. # Look for the benchmark_config file, this will set up our tests.
  281. # Its format looks like this:
  282. #
  283. # {
  284. # "framework": "nodejs",
  285. # "tests": [{
  286. # "default": {
  287. # "setup_file": "setup",
  288. # "json_url": "/json"
  289. # },
  290. # "mysql": {
  291. # "setup_file": "setup",
  292. # "db_url": "/mysql",
  293. # "query_url": "/mysql?queries="
  294. # },
  295. # ...
  296. # }]
  297. # }
  298. config = None
  299. with open(config_file_name, 'r') as config_file:
  300. # Load json file into config object
  301. try:
  302. config = json.load(config_file)
  303. except:
  304. print("Error loading '%s'." % config_file_name)
  305. raise
  306. if config is None:
  307. continue
  308. test = framework_test.parse_config(config, os.path.dirname(config_file_name), self)
  309. # If the user specified which tests to run, then
  310. # we can skip over tests that are not in that list
  311. if self.test == None:
  312. tests = tests + test
  313. else:
  314. for atest in test:
  315. if atest.name in self.test:
  316. tests.append(atest)
  317. tests.sort(key=lambda x: x.name)
  318. # If the tests have been interrupted somehow, then we want to resume them where we left
  319. # off, rather than starting from the beginning
  320. if os.path.isfile('current_benchmark.txt'):
  321. with open('current_benchmark.txt', 'r') as interrupted_benchmark:
  322. interrupt_bench = interrupted_benchmark.read()
  323. for index, atest in enumerate(tests):
  324. if atest.name == interrupt_bench:
  325. tests = tests[index:]
  326. break
  327. return tests
  328. ############################################################
  329. # End __gather_tests
  330. ############################################################
  331. ############################################################
  332. # Gathers all the frameworks
  333. ############################################################
  334. def __gather_frameworks(self):
  335. frameworks = []
  336. # Loop through each directory (we assume we're being run from the benchmarking root)
  337. for dirname, dirnames, filenames in os.walk('.'):
  338. # Look for the benchmark_config file, this will contain our framework name
  339. # It's format looks like this:
  340. #
  341. # {
  342. # "framework": "nodejs",
  343. # "tests": [{
  344. # "default": {
  345. # "setup_file": "setup",
  346. # "json_url": "/json"
  347. # },
  348. # "mysql": {
  349. # "setup_file": "setup",
  350. # "db_url": "/mysql",
  351. # "query_url": "/mysql?queries="
  352. # },
  353. # ...
  354. # }]
  355. # }
  356. if 'benchmark_config' in filenames:
  357. config = None
  358. with open(os.path.join(dirname, 'benchmark_config'), 'r') as config_file:
  359. # Load json file into config object
  360. config = json.load(config_file)
  361. if config == None:
  362. continue
  363. frameworks.append(str(config['framework']))
  364. return frameworks
  365. ############################################################
  366. # End __gather_frameworks
  367. ############################################################
  368. ############################################################
  369. # Makes any necessary changes to the server that should be
  370. # made before running the tests. This involves setting kernal
  371. # settings to allow for more connections, or more file
  372. # descriptiors
  373. #
  374. # http://redmine.lighttpd.net/projects/weighttp/wiki#Troubleshooting
  375. ############################################################
  376. def __setup_server(self):
  377. try:
  378. if os.name == 'nt':
  379. return True
  380. subprocess.check_call(["sudo","bash","-c","cd /sys/devices/system/cpu; ls -d cpu[0-9]*|while read x; do echo performance > $x/cpufreq/scaling_governor; done"])
  381. subprocess.check_call("sudo sysctl -w net.ipv4.tcp_max_syn_backlog=65535".rsplit(" "))
  382. subprocess.check_call("sudo sysctl -w net.core.somaxconn=65535".rsplit(" "))
  383. subprocess.check_call("sudo -s ulimit -n 65535".rsplit(" "))
  384. subprocess.check_call("sudo sysctl net.ipv4.tcp_tw_reuse=1".rsplit(" "))
  385. subprocess.check_call("sudo sysctl net.ipv4.tcp_tw_recycle=1".rsplit(" "))
  386. subprocess.check_call("sudo sysctl -w kernel.shmmax=134217728".rsplit(" "))
  387. subprocess.check_call("sudo sysctl -w kernel.shmall=2097152".rsplit(" "))
  388. except subprocess.CalledProcessError:
  389. return False
  390. ############################################################
  391. # End __setup_server
  392. ############################################################
  393. ############################################################
  394. # Makes any necessary changes to the database machine that
  395. # should be made before running the tests. Is very similar
  396. # to the server setup, but may also include database specific
  397. # changes.
  398. ############################################################
  399. def __setup_database(self):
  400. p = subprocess.Popen(self.database_ssh_string, stdin=subprocess.PIPE, shell=True)
  401. p.communicate("""
  402. sudo sysctl -w net.ipv4.tcp_max_syn_backlog=65535
  403. sudo sysctl -w net.core.somaxconn=65535
  404. sudo -s ulimit -n 65535
  405. sudo sysctl net.ipv4.tcp_tw_reuse=1
  406. sudo sysctl net.ipv4.tcp_tw_recycle=1
  407. sudo sysctl -w kernel.shmmax=2147483648
  408. sudo sysctl -w kernel.shmall=2097152
  409. """)
  410. ############################################################
  411. # End __setup_database
  412. ############################################################
  413. ############################################################
  414. # Makes any necessary changes to the client machine that
  415. # should be made before running the tests. Is very similar
  416. # to the server setup, but may also include client specific
  417. # changes.
  418. ############################################################
  419. def __setup_client(self):
  420. p = subprocess.Popen(self.client_ssh_string, stdin=subprocess.PIPE, shell=True)
  421. p.communicate("""
  422. sudo sysctl -w net.ipv4.tcp_max_syn_backlog=65535
  423. sudo sysctl -w net.core.somaxconn=65535
  424. sudo -s ulimit -n 65535
  425. sudo sysctl net.ipv4.tcp_tw_reuse=1
  426. sudo sysctl net.ipv4.tcp_tw_recycle=1
  427. sudo sysctl -w kernel.shmmax=2147483648
  428. sudo sysctl -w kernel.shmall=2097152
  429. """)
  430. ############################################################
  431. # End __setup_client
  432. ############################################################
  433. ############################################################
  434. # __run_tests
  435. #
  436. # 2013-10-02 ASB Calls each test passed in tests to
  437. # __run_test in a separate process. Each
  438. # test is given a set amount of time and if
  439. # kills the child process (and subsequently
  440. # all of its child processes). Uses
  441. # multiprocessing module.
  442. ############################################################
  443. def __run_tests(self, tests):
  444. logging.debug("Start __run_tests.")
  445. logging.debug("__name__ = %s",__name__)
  446. error_happened = False
  447. if self.os.lower() == 'windows':
  448. logging.debug("Executing __run_tests on Windows")
  449. for test in tests:
  450. with open('current_benchmark.txt', 'w') as benchmark_resume_file:
  451. benchmark_resume_file.write(test.name)
  452. if self.__run_test(test) != 0:
  453. error_happened = True
  454. else:
  455. logging.debug("Executing __run_tests on Linux")
  456. # These features do not work on Windows
  457. for test in tests:
  458. if __name__ == 'benchmark.benchmarker':
  459. print header("Running Test: %s" % test.name)
  460. with open('current_benchmark.txt', 'w') as benchmark_resume_file:
  461. benchmark_resume_file.write(test.name)
  462. test_process = Process(target=self.__run_test, name="Test Runner (%s)" % test.name, args=(test,))
  463. test_process.start()
  464. test_process.join(self.run_test_timeout_seconds)
  465. self.__load_results() # Load intermediate result from child process
  466. if(test_process.is_alive()):
  467. logging.debug("Child process for {name} is still alive. Terminating.".format(name=test.name))
  468. self.__write_intermediate_results(test.name,"__run_test timeout (="+ str(self.run_test_timeout_seconds) + " seconds)")
  469. test_process.terminate()
  470. test_process.join()
  471. if test_process.exitcode != 0:
  472. error_happened = True
  473. if os.path.isfile('current_benchmark.txt'):
  474. os.remove('current_benchmark.txt')
  475. logging.debug("End __run_tests.")
  476. if error_happened:
  477. return 1
  478. return 0
  479. ############################################################
  480. # End __run_tests
  481. ############################################################
  482. ############################################################
  483. # __run_test
  484. # 2013-10-02 ASB Previously __run_tests. This code now only
  485. # processes a single test.
  486. #
  487. # Ensures that the system has all necessary software to run
  488. # the tests. This does not include that software for the individual
  489. # test, but covers software such as curl and weighttp that
  490. # are needed.
  491. ############################################################
  492. def __run_test(self, test):
  493. # Used to capture return values
  494. def exit_with_code(code):
  495. if self.os.lower() == 'windows':
  496. return code
  497. else:
  498. sys.exit(code)
  499. try:
  500. os.makedirs(os.path.join(self.latest_results_directory, 'logs', "{name}".format(name=test.name)))
  501. except:
  502. pass
  503. with open(os.path.join(self.latest_results_directory, 'logs', "{name}".format(name=test.name), 'out.txt'), 'w') as out, \
  504. open(os.path.join(self.latest_results_directory, 'logs', "{name}".format(name=test.name), 'err.txt'), 'w') as err:
  505. if hasattr(test, 'skip'):
  506. if test.skip.lower() == "true":
  507. out.write("Test {name} benchmark_config specifies to skip this test. Skipping.\n".format(name=test.name))
  508. return exit_with_code(0)
  509. if test.os.lower() != self.os.lower() or test.database_os.lower() != self.database_os.lower():
  510. # the operating system requirements of this test for the
  511. # application server or the database server don't match
  512. # our current environment
  513. out.write("OS or Database OS specified in benchmark_config does not match the current environment. Skipping.\n")
  514. return exit_with_code(0)
  515. # If the test is in the excludes list, we skip it
  516. if self.exclude != None and test.name in self.exclude:
  517. out.write("Test {name} has been added to the excludes list. Skipping.\n".format(name=test.name))
  518. return exit_with_code(0)
  519. # If the test does not contain an implementation of the current test-type, skip it
  520. if self.type != 'all' and not test.contains_type(self.type):
  521. out.write("Test type {type} does not contain an implementation of the current test-type. Skipping.\n".format(type=self.type))
  522. return exit_with_code(0)
  523. out.write("test.os.lower() = {os} test.database_os.lower() = {dbos}\n".format(os=test.os.lower(),dbos=test.database_os.lower()))
  524. out.write("self.results['frameworks'] != None: {val}\n".format(val=str(self.results['frameworks'] != None)))
  525. out.write("test.name: {name}\n".format(name=str(test.name)))
  526. out.write("self.results['completed']: {completed}\n".format(completed=str(self.results['completed'])))
  527. if self.results['frameworks'] != None and test.name in self.results['completed']:
  528. out.write('Framework {name} found in latest saved data. Skipping.\n'.format(name=str(test.name)))
  529. return exit_with_code(1)
  530. out.flush()
  531. out.write(header("Beginning %s" % test.name, top='='))
  532. out.flush()
  533. ##########################
  534. # Start this test
  535. ##########################
  536. out.write(header("Starting %s" % test.name))
  537. out.flush()
  538. try:
  539. if test.requires_database():
  540. p = subprocess.Popen(self.database_ssh_string, stdin=subprocess.PIPE, stdout=out, stderr=err, shell=True)
  541. p.communicate("""
  542. sudo restart mysql
  543. sudo restart mongodb
  544. sudo service redis-server restart
  545. sudo /etc/init.d/postgresql restart
  546. """)
  547. time.sleep(10)
  548. if self.__is_port_bound(test.port):
  549. self.__write_intermediate_results(test.name, "port " + str(test.port) + " is not available before start")
  550. err.write(header("Error: Port %s is not available, cannot start %s" % (test.port, test.name)))
  551. err.flush()
  552. return exit_with_code(1)
  553. result = test.start(out, err)
  554. if result != 0:
  555. test.stop(out, err)
  556. time.sleep(5)
  557. err.write( "ERROR: Problem starting {name}\n".format(name=test.name) )
  558. err.write(header("Stopped %s" % test.name))
  559. err.flush()
  560. self.__write_intermediate_results(test.name,"<setup.py>#start() returned non-zero")
  561. return exit_with_code(1)
  562. time.sleep(self.sleep)
  563. ##########################
  564. # Verify URLs
  565. ##########################
  566. passed_verify = test.verify_urls(out, err)
  567. out.flush()
  568. err.flush()
  569. ##########################
  570. # Benchmark this test
  571. ##########################
  572. if self.mode == "benchmark":
  573. out.write(header("Benchmarking %s" % test.name))
  574. out.flush()
  575. test.benchmark(out, err)
  576. out.flush()
  577. err.flush()
  578. ##########################
  579. # Stop this test
  580. ##########################
  581. out.write(header("Stopping %s" % test.name))
  582. out.flush()
  583. test.stop(out, err)
  584. out.flush()
  585. err.flush()
  586. time.sleep(5)
  587. if self.__is_port_bound(test.port):
  588. self.__write_intermediate_results(test.name, "port " + str(test.port) + " was not released by stop")
  589. err.write(header("Error: Port %s was not released by stop %s" % (test.port, test.name)))
  590. err.flush()
  591. return exit_with_code(1)
  592. out.write(header("Stopped %s" % test.name))
  593. out.flush()
  594. time.sleep(5)
  595. ##########################################################
  596. # Save results thus far into toolset/benchmark/latest.json
  597. ##########################################################
  598. out.write(header("Saving results through %s" % test.name))
  599. out.flush()
  600. self.__write_intermediate_results(test.name,time.strftime("%Y%m%d%H%M%S", time.localtime()))
  601. if self.mode == "verify" and not passed_verify:
  602. print "Failed verify!"
  603. return exit_with_code(1)
  604. except (OSError, IOError, subprocess.CalledProcessError) as e:
  605. self.__write_intermediate_results(test.name,"<setup.py> raised an exception")
  606. err.write(header("Subprocess Error %s" % test.name))
  607. traceback.print_exc(file=err)
  608. err.flush()
  609. try:
  610. test.stop(out, err)
  611. except (subprocess.CalledProcessError) as e:
  612. self.__write_intermediate_results(test.name,"<setup.py>#stop() raised an error")
  613. err.write(header("Subprocess Error: Test .stop() raised exception %s" % test.name))
  614. traceback.print_exc(file=err)
  615. err.flush()
  616. out.close()
  617. err.close()
  618. return exit_with_code(1)
  619. # TODO - subprocess should not catch this exception!
  620. # Parent process should catch it and cleanup/exit
  621. except (KeyboardInterrupt) as e:
  622. test.stop(out, err)
  623. out.write(header("Cleaning up..."))
  624. out.flush()
  625. self.__finish()
  626. sys.exit(1)
  627. out.close()
  628. err.close()
  629. return exit_with_code(0)
  630. ############################################################
  631. # End __run_tests
  632. ############################################################
  633. ############################################################
  634. # __is_port_bound
  635. # Check if the requested port is available. If it
  636. # isn't available, then a previous test probably didn't
  637. # shutdown properly.
  638. ############################################################
  639. def __is_port_bound(self, port):
  640. s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
  641. try:
  642. # Try to bind to all IP addresses, this port
  643. s.bind(("", port))
  644. # If we get here, we were able to bind successfully,
  645. # which means the port is free.
  646. except:
  647. # If we get an exception, it might be because the port is still bound
  648. # which would be bad, or maybe it is a privileged port (<1024) and we
  649. # are not running as root, or maybe the server is gone, but sockets are
  650. # still in TIME_WAIT (SO_REUSEADDR). To determine which scenario, try to
  651. # connect.
  652. try:
  653. s.connect(("127.0.0.1", port))
  654. # If we get here, we were able to connect to something, which means
  655. # that the port is still bound.
  656. return True
  657. except:
  658. # An exception means that we couldn't connect, so a server probably
  659. # isn't still running on the port.
  660. pass
  661. finally:
  662. s.close()
  663. return False
  664. ############################################################
  665. # End __is_port_bound
  666. ############################################################
  667. ############################################################
  668. # __parse_results
  669. # Ensures that the system has all necessary software to run
  670. # the tests. This does not include that software for the individual
  671. # test, but covers software such as curl and weighttp that
  672. # are needed.
  673. ############################################################
  674. def __parse_results(self, tests):
  675. # Run the method to get the commmit count of each framework.
  676. self.__count_commits()
  677. # Call the method which counts the sloc for each framework
  678. self.__count_sloc()
  679. # Time to create parsed files
  680. # Aggregate JSON file
  681. with open(os.path.join(self.full_results_directory(), "results.json"), "w") as f:
  682. f.write(json.dumps(self.results))
  683. ############################################################
  684. # End __parse_results
  685. ############################################################
  686. #############################################################
  687. # __count_sloc
  688. # This is assumed to be run from the benchmark root directory
  689. #############################################################
  690. def __count_sloc(self):
  691. all_frameworks = self.__gather_frameworks()
  692. jsonResult = {}
  693. for framework in all_frameworks:
  694. try:
  695. command = "cloc --list-file=" + framework['directory'] + "/source_code --yaml"
  696. lineCount = subprocess.check_output(command, shell=True)
  697. # Find the last instance of the word 'code' in the yaml output. This should
  698. # be the line count for the sum of all listed files or just the line count
  699. # for the last file in the case where there's only one file listed.
  700. lineCount = lineCount[lineCount.rfind('code'):len(lineCount)]
  701. lineCount = lineCount.strip('code: ')
  702. lineCount = lineCount[0:lineCount.rfind('comment')]
  703. jsonResult[framework['name']] = int(lineCount)
  704. except:
  705. continue
  706. self.results['rawData']['slocCounts'] = jsonResult
  707. ############################################################
  708. # End __count_sloc
  709. ############################################################
  710. ############################################################
  711. # __count_commits
  712. ############################################################
  713. def __count_commits(self):
  714. all_frameworks = self.__gather_frameworks()
  715. jsonResult = {}
  716. for framework in all_frameworks:
  717. try:
  718. command = "git rev-list HEAD -- " + framework + " | sort -u | wc -l"
  719. commitCount = subprocess.check_output(command, shell=True)
  720. jsonResult[framework] = int(commitCount)
  721. except:
  722. continue
  723. self.results['rawData']['commitCounts'] = jsonResult
  724. self.commits = jsonResult
  725. ############################################################
  726. # End __count_commits
  727. ############################################################
  728. ############################################################
  729. # __write_intermediate_results
  730. ############################################################
  731. def __write_intermediate_results(self,test_name,status_message):
  732. try:
  733. self.results["completed"][test_name] = status_message
  734. with open(os.path.join(self.latest_results_directory, 'results.json'), 'w') as f:
  735. f.write(json.dumps(self.results))
  736. except (IOError):
  737. logging.error("Error writing results.json")
  738. ############################################################
  739. # End __write_intermediate_results
  740. ############################################################
  741. def __load_results(self):
  742. try:
  743. with open(os.path.join(self.latest_results_directory, 'results.json')) as f:
  744. self.results = json.load(f)
  745. except (ValueError, IOError):
  746. pass
  747. ############################################################
  748. # __finish
  749. ############################################################
  750. def __finish(self):
  751. print "Time to complete: " + str(int(time.time() - self.start_time)) + " seconds"
  752. print "Results are saved in " + os.path.join(self.result_directory, self.timestamp)
  753. ############################################################
  754. # End __finish
  755. ############################################################
  756. ##########################################################################################
  757. # Constructor
  758. ##########################################################################################
  759. ############################################################
  760. # Initialize the benchmarker. The args are the arguments
  761. # parsed via argparser.
  762. ############################################################
  763. def __init__(self, args):
  764. self.__dict__.update(args)
  765. self.start_time = time.time()
  766. self.run_test_timeout_seconds = 3600
  767. # setup logging
  768. logging.basicConfig(stream=sys.stderr, level=logging.DEBUG)
  769. # setup some additional variables
  770. if self.database_user == None: self.database_user = self.client_user
  771. if self.database_host == None: self.database_host = self.client_host
  772. if self.database_identity_file == None: self.database_identity_file = self.client_identity_file
  773. # Remember root directory
  774. self.fwroot = setup_util.get_fwroot()
  775. # setup results and latest_results directories
  776. self.result_directory = os.path.join("results", self.name)
  777. self.latest_results_directory = self.latest_results_directory()
  778. if self.parse != None:
  779. self.timestamp = self.parse
  780. else:
  781. self.timestamp = time.strftime("%Y%m%d%H%M%S", time.localtime())
  782. # Setup the concurrency levels array. This array goes from
  783. # starting_concurrency to max concurrency, doubling each time
  784. self.concurrency_levels = []
  785. concurrency = self.starting_concurrency
  786. while concurrency <= self.max_concurrency:
  787. self.concurrency_levels.append(concurrency)
  788. concurrency = concurrency * 2
  789. # Setup query interval array
  790. # starts at 1, and goes up to max_queries, using the query_interval
  791. self.query_intervals = []
  792. queries = 1
  793. while queries <= self.max_queries:
  794. self.query_intervals.append(queries)
  795. if queries == 1:
  796. queries = 0
  797. queries = queries + self.query_interval
  798. # Load the latest data
  799. #self.latest = None
  800. #try:
  801. # with open('toolset/benchmark/latest.json', 'r') as f:
  802. # # Load json file into config object
  803. # self.latest = json.load(f)
  804. # logging.info("toolset/benchmark/latest.json loaded to self.latest")
  805. # logging.debug("contents of latest.json: " + str(json.dumps(self.latest)))
  806. #except IOError:
  807. # logging.warn("IOError on attempting to read toolset/benchmark/latest.json")
  808. #
  809. #self.results = None
  810. #try:
  811. # if self.latest != None and self.name in self.latest.keys():
  812. # with open(os.path.join(self.result_directory, str(self.latest[self.name]), 'results.json'), 'r') as f:
  813. # # Load json file into config object
  814. # self.results = json.load(f)
  815. #except IOError:
  816. # pass
  817. self.results = None
  818. try:
  819. with open(os.path.join(self.latest_results_directory, 'results.json'), 'r') as f:
  820. #Load json file into results object
  821. self.results = json.load(f)
  822. except IOError:
  823. logging.warn("results.json for test %s not found.",self.name)
  824. if self.results == None:
  825. self.results = dict()
  826. self.results['name'] = self.name
  827. self.results['concurrencyLevels'] = self.concurrency_levels
  828. self.results['queryIntervals'] = self.query_intervals
  829. self.results['frameworks'] = [t.name for t in self.__gather_tests]
  830. self.results['duration'] = self.duration
  831. self.results['rawData'] = dict()
  832. self.results['rawData']['json'] = dict()
  833. self.results['rawData']['db'] = dict()
  834. self.results['rawData']['query'] = dict()
  835. self.results['rawData']['fortune'] = dict()
  836. self.results['rawData']['update'] = dict()
  837. self.results['rawData']['plaintext'] = dict()
  838. self.results['completed'] = dict()
  839. self.results['succeeded'] = dict()
  840. self.results['succeeded']['json'] = []
  841. self.results['succeeded']['db'] = []
  842. self.results['succeeded']['query'] = []
  843. self.results['succeeded']['fortune'] = []
  844. self.results['succeeded']['update'] = []
  845. self.results['succeeded']['plaintext'] = []
  846. self.results['failed'] = dict()
  847. self.results['failed']['json'] = []
  848. self.results['failed']['db'] = []
  849. self.results['failed']['query'] = []
  850. self.results['failed']['fortune'] = []
  851. self.results['failed']['update'] = []
  852. self.results['failed']['plaintext'] = []
  853. self.results['warning'] = dict()
  854. self.results['warning']['json'] = []
  855. self.results['warning']['db'] = []
  856. self.results['warning']['query'] = []
  857. self.results['warning']['fortune'] = []
  858. self.results['warning']['update'] = []
  859. self.results['warning']['plaintext'] = []
  860. else:
  861. #for x in self.__gather_tests():
  862. # if x.name not in self.results['frameworks']:
  863. # self.results['frameworks'] = self.results['frameworks'] + [x.name]
  864. # Always overwrite framework list
  865. self.results['frameworks'] = [t.name for t in self.__gather_tests]
  866. # Setup the ssh command string
  867. self.database_ssh_string = "ssh -T -o StrictHostKeyChecking=no " + self.database_user + "@" + self.database_host
  868. self.client_ssh_string = "ssh -T -o StrictHostKeyChecking=no " + self.client_user + "@" + self.client_host
  869. if self.database_identity_file != None:
  870. self.database_ssh_string = self.database_ssh_string + " -i " + self.database_identity_file
  871. if self.client_identity_file != None:
  872. self.client_ssh_string = self.client_ssh_string + " -i " + self.client_identity_file
  873. if self.install is not None:
  874. install = Installer(self, self.install_strategy)
  875. install.install_software()
  876. ############################################################
  877. # End __init__
  878. ############################################################