benchmarker.py 34 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885
  1. from setup.linux.installer import Installer
  2. from benchmark import framework_test
  3. import os
  4. import json
  5. import subprocess
  6. import time
  7. import textwrap
  8. import pprint
  9. import csv
  10. import sys
  11. import logging
  12. import socket
  13. from multiprocessing import Process
  14. from datetime import datetime
  15. class Benchmarker:
  16. ##########################################################################################
  17. # Public methods
  18. ##########################################################################################
  19. ############################################################
  20. # Prints all the available tests
  21. ############################################################
  22. def run_list_tests(self):
  23. all_tests = self.__gather_tests
  24. for test in all_tests:
  25. print test.name
  26. self.__finish()
  27. ############################################################
  28. # End run_list_tests
  29. ############################################################
  30. ############################################################
  31. # Prints the metadata for all the available tests
  32. ############################################################
  33. def run_list_test_metadata(self):
  34. all_tests = self.__gather_tests
  35. all_tests_json = json.dumps(map(lambda test: {
  36. "name": test.name,
  37. "approach": test.approach,
  38. "classification": test.classification,
  39. "database": test.database,
  40. "framework": test.framework,
  41. "language": test.language,
  42. "orm": test.orm,
  43. "platform": test.platform,
  44. "webserver": test.webserver,
  45. "os": test.os,
  46. "database_os": test.database_os,
  47. "display_name": test.display_name,
  48. "notes": test.notes,
  49. "versus": test.versus
  50. }, all_tests))
  51. with open(os.path.join(self.full_results_directory(), "test_metadata.json"), "w") as f:
  52. f.write(all_tests_json)
  53. self.__finish()
  54. ############################################################
  55. # End run_list_test_metadata
  56. ############################################################
  57. ############################################################
  58. # parse_timestamp
  59. # Re-parses the raw data for a given timestamp
  60. ############################################################
  61. def parse_timestamp(self):
  62. all_tests = self.__gather_tests
  63. for test in all_tests:
  64. test.parse_all()
  65. self.__parse_results(all_tests)
  66. self.__finish()
  67. ############################################################
  68. # End parse_timestamp
  69. ############################################################
  70. ############################################################
  71. # Run the tests:
  72. # This process involves setting up the client/server machines
  73. # with any necessary change. Then going through each test,
  74. # running their setup script, verifying the URLs, and
  75. # running benchmarks against them.
  76. ############################################################
  77. def run(self):
  78. ##########################
  79. # Get a list of all known
  80. # tests that we can run.
  81. ##########################
  82. all_tests = self.__gather_tests
  83. ##########################
  84. # Setup client/server
  85. ##########################
  86. print textwrap.dedent("""
  87. =====================================================
  88. Preparing Server, Database, and Client ...
  89. =====================================================
  90. """)
  91. self.__setup_server()
  92. self.__setup_database()
  93. self.__setup_client()
  94. ##########################
  95. # Run tests
  96. ##########################
  97. print textwrap.dedent("""
  98. =====================================================
  99. Running Tests ...
  100. =====================================================
  101. """)
  102. self.__run_tests(all_tests)
  103. ##########################
  104. # Parse results
  105. ##########################
  106. if self.mode == "benchmark":
  107. print textwrap.dedent("""
  108. =====================================================
  109. Parsing Results ...
  110. =====================================================
  111. """)
  112. self.__parse_results(all_tests)
  113. self.__finish()
  114. ############################################################
  115. # End run
  116. ############################################################
  117. ############################################################
  118. # database_sftp_string(batch_file)
  119. # generates a fully qualified URL for sftp to database
  120. ############################################################
  121. def database_sftp_string(self, batch_file):
  122. sftp_string = "sftp -oStrictHostKeyChecking=no "
  123. if batch_file != None: sftp_string += " -b " + batch_file + " "
  124. if self.database_identity_file != None:
  125. sftp_string += " -i " + self.database_identity_file + " "
  126. return sftp_string + self.database_user + "@" + self.database_host
  127. ############################################################
  128. # End database_sftp_string
  129. ############################################################
  130. ############################################################
  131. # client_sftp_string(batch_file)
  132. # generates a fully qualified URL for sftp to client
  133. ############################################################
  134. def client_sftp_string(self, batch_file):
  135. sftp_string = "sftp -oStrictHostKeyChecking=no "
  136. if batch_file != None: sftp_string += " -b " + batch_file + " "
  137. if self.client_identity_file != None:
  138. sftp_string += " -i " + self.client_identity_file + " "
  139. return sftp_string + self.client_user + "@" + self.client_host
  140. ############################################################
  141. # End client_sftp_string
  142. ############################################################
  143. ############################################################
  144. # generate_url(url, port)
  145. # generates a fully qualified URL for accessing a test url
  146. ############################################################
  147. def generate_url(self, url, port):
  148. return self.server_host + ":" + str(port) + url
  149. ############################################################
  150. # End generate_url
  151. ############################################################
  152. ############################################################
  153. # output_file(test_name, test_type)
  154. # returns the output file for this test_name and test_type
  155. # timestamp/test_type/test_name/raw
  156. ############################################################
  157. def output_file(self, test_name, test_type):
  158. path = os.path.join(self.result_directory, self.timestamp, test_type, test_name, "raw")
  159. try:
  160. os.makedirs(os.path.dirname(path))
  161. except OSError:
  162. pass
  163. return path
  164. ############################################################
  165. # End output_file
  166. ############################################################
  167. ############################################################
  168. # full_results_directory
  169. ############################################################
  170. def full_results_directory(self):
  171. path = os.path.join(self.result_directory, self.timestamp)
  172. try:
  173. os.makedirs(path)
  174. except OSError:
  175. pass
  176. return path
  177. ############################################################
  178. # End output_file
  179. ############################################################
  180. ############################################################
  181. # Latest intermediate results dirctory
  182. ############################################################
  183. def latest_results_directory(self):
  184. path = os.path.join(self.result_directory,"latest")
  185. try:
  186. os.makedirs(path)
  187. except OSError:
  188. pass
  189. return path
  190. ############################################################
  191. # report_results
  192. ############################################################
  193. def report_results(self, framework, test, results):
  194. if test not in self.results['rawData'].keys():
  195. self.results['rawData'][test] = dict()
  196. self.results['rawData'][test][framework.name] = results
  197. ############################################################
  198. # End report_results
  199. ############################################################
  200. ##########################################################################################
  201. # Private methods
  202. ##########################################################################################
  203. ############################################################
  204. # Gathers all the tests
  205. ############################################################
  206. @property
  207. def __gather_tests(self):
  208. tests = []
  209. # Loop through each directory (we assume we're being run from the benchmarking root)
  210. # and look for the files that signify a benchmark test
  211. for dirname, dirnames, filenames in os.walk('.'):
  212. # Look for the benchmark_config file, this will set up our tests.
  213. # Its format looks like this:
  214. #
  215. # {
  216. # "framework": "nodejs",
  217. # "tests": [{
  218. # "default": {
  219. # "setup_file": "setup",
  220. # "json_url": "/json"
  221. # },
  222. # "mysql": {
  223. # "setup_file": "setup",
  224. # "db_url": "/mysql",
  225. # "query_url": "/mysql?queries="
  226. # },
  227. # ...
  228. # }]
  229. # }
  230. if 'benchmark_config' in filenames:
  231. config = None
  232. config_file_name = os.path.join(dirname, 'benchmark_config')
  233. with open(config_file_name, 'r') as config_file:
  234. # Load json file into config object
  235. try:
  236. config = json.load(config_file)
  237. except:
  238. print("Error loading '%s'." % config_file_name)
  239. raise
  240. if config == None:
  241. continue
  242. tests = tests + framework_test.parse_config(config, dirname[2:], self)
  243. tests.sort(key=lambda x: x.name)
  244. return tests
  245. ############################################################
  246. # End __gather_tests
  247. ############################################################
  248. ############################################################
  249. # Gathers all the frameworks
  250. ############################################################
  251. def __gather_frameworks(self):
  252. frameworks = []
  253. # Loop through each directory (we assume we're being run from the benchmarking root)
  254. for dirname, dirnames, filenames in os.walk('.'):
  255. # Look for the benchmark_config file, this will contain our framework name
  256. # It's format looks like this:
  257. #
  258. # {
  259. # "framework": "nodejs",
  260. # "tests": [{
  261. # "default": {
  262. # "setup_file": "setup",
  263. # "json_url": "/json"
  264. # },
  265. # "mysql": {
  266. # "setup_file": "setup",
  267. # "db_url": "/mysql",
  268. # "query_url": "/mysql?queries="
  269. # },
  270. # ...
  271. # }]
  272. # }
  273. if 'benchmark_config' in filenames:
  274. config = None
  275. with open(os.path.join(dirname, 'benchmark_config'), 'r') as config_file:
  276. # Load json file into config object
  277. config = json.load(config_file)
  278. if config == None:
  279. continue
  280. frameworks.append(str(config['framework']))
  281. return frameworks
  282. ############################################################
  283. # End __gather_frameworks
  284. ############################################################
  285. ############################################################
  286. # Makes any necessary changes to the server that should be
  287. # made before running the tests. This involves setting kernal
  288. # settings to allow for more connections, or more file
  289. # descriptiors
  290. #
  291. # http://redmine.lighttpd.net/projects/weighttp/wiki#Troubleshooting
  292. ############################################################
  293. def __setup_server(self):
  294. try:
  295. if os.name == 'nt':
  296. return True
  297. subprocess.check_call(["sudo","bash","-c","cd /sys/devices/system/cpu; ls -d cpu*|while read x; do echo performance > $x/cpufreq/scaling_governor; done"])
  298. subprocess.check_call("sudo sysctl -w net.ipv4.tcp_max_syn_backlog=65535".rsplit(" "))
  299. subprocess.check_call("sudo sysctl -w net.core.somaxconn=65535".rsplit(" "))
  300. subprocess.check_call("sudo -s ulimit -n 65535".rsplit(" "))
  301. subprocess.check_call("sudo sysctl net.ipv4.tcp_tw_reuse=1".rsplit(" "))
  302. subprocess.check_call("sudo sysctl net.ipv4.tcp_tw_recycle=1".rsplit(" "))
  303. subprocess.check_call("sudo sysctl -w kernel.shmmax=134217728".rsplit(" "))
  304. subprocess.check_call("sudo sysctl -w kernel.shmall=2097152".rsplit(" "))
  305. except subprocess.CalledProcessError:
  306. return False
  307. ############################################################
  308. # End __setup_server
  309. ############################################################
  310. ############################################################
  311. # Makes any necessary changes to the database machine that
  312. # should be made before running the tests. Is very similar
  313. # to the server setup, but may also include database specific
  314. # changes.
  315. ############################################################
  316. def __setup_database(self):
  317. p = subprocess.Popen(self.database_ssh_string, stdin=subprocess.PIPE, shell=True)
  318. p.communicate("""
  319. sudo sysctl -w net.ipv4.tcp_max_syn_backlog=65535
  320. sudo sysctl -w net.core.somaxconn=65535
  321. sudo -s ulimit -n 65535
  322. sudo sysctl net.ipv4.tcp_tw_reuse=1
  323. sudo sysctl net.ipv4.tcp_tw_recycle=1
  324. sudo sysctl -w kernel.shmmax=2147483648
  325. sudo sysctl -w kernel.shmall=2097152
  326. """)
  327. ############################################################
  328. # End __setup_database
  329. ############################################################
  330. ############################################################
  331. # Makes any necessary changes to the client machine that
  332. # should be made before running the tests. Is very similar
  333. # to the server setup, but may also include client specific
  334. # changes.
  335. ############################################################
  336. def __setup_client(self):
  337. p = subprocess.Popen(self.client_ssh_string, stdin=subprocess.PIPE, shell=True)
  338. p.communicate("""
  339. sudo sysctl -w net.ipv4.tcp_max_syn_backlog=65535
  340. sudo sysctl -w net.core.somaxconn=65535
  341. sudo -s ulimit -n 65535
  342. sudo sysctl net.ipv4.tcp_tw_reuse=1
  343. sudo sysctl net.ipv4.tcp_tw_recycle=1
  344. sudo sysctl -w kernel.shmmax=2147483648
  345. sudo sysctl -w kernel.shmall=2097152
  346. """)
  347. ############################################################
  348. # End __setup_client
  349. ############################################################
  350. ############################################################
  351. # __run_tests
  352. #
  353. # 2013-10-02 ASB Calls each test passed in tests to
  354. # __run_test in a separate process. Each
  355. # test is given a set amount of time and if
  356. # kills the child process (and subsequently
  357. # all of its child processes). Uses
  358. # multiprocessing module.
  359. ############################################################
  360. def __run_tests(self, tests):
  361. logging.debug("Start __run_tests.")
  362. logging.debug("__name__ = %s",__name__)
  363. if self.os.lower() == 'windows':
  364. logging.debug("Executing __run_tests on Windows")
  365. for test in tests:
  366. self.__run_test(test)
  367. else:
  368. logging.debug("Executing __run_tests on Linux")
  369. # These features do not work on Windows
  370. for test in tests:
  371. if __name__ == 'benchmark.benchmarker':
  372. test_process = Process(target=self.__run_test, args=(test,))
  373. test_process.start()
  374. test_process.join(self.run_test_timeout_seconds)
  375. if(test_process.is_alive()):
  376. logging.debug("Child process for {name} is still alive. Terminating.".format(name=test.name))
  377. self.__write_intermediate_results(test.name,"__run_test timeout (="+ str(self.run_test_timeout_seconds) + " seconds)")
  378. test_process.terminate()
  379. logging.debug("End __run_tests.")
  380. ############################################################
  381. # End __run_tests
  382. ############################################################
  383. ############################################################
  384. # __run_test
  385. # 2013-10-02 ASB Previously __run_tests. This code now only
  386. # processes a single test.
  387. #
  388. # Ensures that the system has all necessary software to run
  389. # the tests. This does not include that software for the individual
  390. # test, but covers software such as curl and weighttp that
  391. # are needed.
  392. ############################################################
  393. def __run_test(self, test):
  394. try:
  395. os.makedirs(os.path.join(self.latest_results_directory, 'logs'))
  396. except:
  397. pass
  398. with open(os.path.join(self.latest_results_directory, 'logs', 'out', "{name}.txt".format(name=test.name)), 'w') as out, \
  399. open(os.path.join(self.latest_results_directory, 'logs', 'err', "{name}.txt".format(name=test.name)), 'w') as err:
  400. # If the user specified which tests to run, then
  401. # we can skip over tests that are not in that list
  402. if self.test != None and test.name not in self.test:
  403. return
  404. if hasattr(test, 'skip'):
  405. if test.skip.lower() == "true":
  406. out.write("Test {name} benchmark_config specifies to skip this test. Skipping.\n".format(name=test.name))
  407. return
  408. if test.os.lower() != self.os.lower() or test.database_os.lower() != self.database_os.lower():
  409. # the operating system requirements of this test for the
  410. # application server or the database server don't match
  411. # our current environment
  412. out.write("OS or Database OS specified in benchmark_config does not match the current environment. Skipping.\n")
  413. return
  414. # If the test is in the excludes list, we skip it
  415. if self.exclude != None and test.name in self.exclude:
  416. out.write("Test {name} has been added to the excludes list. Skipping.\n".format(name=test.name))
  417. return
  418. # If the test does not contain an implementation of the current test-type, skip it
  419. if self.type != 'all' and not test.contains_type(self.type):
  420. out.write("Test type {type} does not contain an implementation of the current test-type. Skipping.\n".format(type=self.type))
  421. return
  422. out.write("test.os.lower() = {os} test.database_os.lower() = {dbos}\n".format(os=test.os.lower(),dbos=test.database_os.lower()))
  423. out.write("self.results['frameworks'] != None: {val}\n".format(val=str(self.results['frameworks'] != None)))
  424. out.write("test.name: {name}\n".format(name=str(test.name)))
  425. out.write("self.results['completed']: {completed}\n".format(completed=str(self.results['completed'])))
  426. if self.results['frameworks'] != None and test.name in self.results['completed']:
  427. out.write('Framework {name} found in latest saved data. Skipping.\n'.format(name=str(test.name)))
  428. return
  429. out.flush()
  430. out.write( textwrap.dedent("""
  431. =====================================================
  432. Beginning {name}
  433. -----------------------------------------------------
  434. """.format(name=test.name)) )
  435. out.flush()
  436. ##########################
  437. # Start this test
  438. ##########################
  439. out.write( textwrap.dedent("""
  440. -----------------------------------------------------
  441. Starting {name}
  442. -----------------------------------------------------
  443. """.format(name=test.name)) )
  444. out.flush()
  445. try:
  446. p = subprocess.Popen(self.database_ssh_string, stdin=subprocess.PIPE, stdout=out, stderr=err, shell=True)
  447. p.communicate("""
  448. sudo restart mysql
  449. sudo restart mongodb
  450. sudo /etc/init.d/postgresql restart
  451. """)
  452. time.sleep(10)
  453. if self.__is_port_bound(test.port):
  454. self.__write_intermediate_results(test.name, "port " + str(test.port) + " is not available before start")
  455. err.write( textwrap.dedent("""
  456. ---------------------------------------------------------
  457. Error: Port {port} is not available before start {name}
  458. ---------------------------------------------------------
  459. """.format(name=test.name, port=str(test.port))) )
  460. out.flush()
  461. return
  462. result = test.start(out)
  463. if result != 0:
  464. test.stop(out)
  465. time.sleep(5)
  466. err.write( "ERROR: Problem starting {name}\n".format(name=test.name) )
  467. err.write( textwrap.dedent("""
  468. -----------------------------------------------------
  469. Stopped {name}
  470. -----------------------------------------------------
  471. """.format(name=test.name)) )
  472. err.flush()
  473. self.__write_intermediate_results(test.name,"<setup.py>#start() returned non-zero")
  474. return
  475. time.sleep(self.sleep)
  476. ##########################
  477. # Verify URLs
  478. ##########################
  479. out.write( textwrap.dedent("""
  480. -----------------------------------------------------
  481. Verifying URLs for {name}
  482. -----------------------------------------------------
  483. """.format(name=test.name)) )
  484. test.verify_urls(out)
  485. out.flush()
  486. ##########################
  487. # Benchmark this test
  488. ##########################
  489. if self.mode == "benchmark":
  490. out.write( textwrap.dedent("""
  491. -----------------------------------------------------
  492. Benchmarking {name} ...
  493. -----------------------------------------------------
  494. """.format(name=test.name)) )
  495. out.flush()
  496. test.benchmark(out)
  497. ##########################
  498. # Stop this test
  499. ##########################
  500. out.write( textwrap.dedent("""
  501. -----------------------------------------------------
  502. Stopping {name}
  503. -----------------------------------------------------
  504. """.format(name=test.name)) )
  505. out.flush()
  506. test.stop(out)
  507. time.sleep(5)
  508. if self.__is_port_bound(test.port):
  509. self.__write_intermediate_results(test.name, "port " + str(test.port) + " was not released by stop")
  510. err.write( textwrap.dedent("""
  511. -----------------------------------------------------
  512. Error: Port {port} was not released by stop {name}
  513. -----------------------------------------------------
  514. """.format(name=test.name, port=str(test.port))) )
  515. out.flush()
  516. return
  517. out.write( textwrap.dedent("""
  518. -----------------------------------------------------
  519. Stopped {name}
  520. -----------------------------------------------------
  521. """.format(name=test.name)) )
  522. out.flush()
  523. time.sleep(5)
  524. ##########################################################
  525. # Save results thus far into toolset/benchmark/latest.json
  526. ##########################################################
  527. out.write( textwrap.dedent("""
  528. ----------------------------------------------------
  529. Saving results through {name}
  530. ----------------------------------------------------
  531. """.format(name=test.name)) )
  532. out.flush()
  533. self.__write_intermediate_results(test.name,time.strftime("%Y%m%d%H%M%S", time.localtime()))
  534. except (OSError, IOError, subprocess.CalledProcessError):
  535. self.__write_intermediate_results(test.name,"<setup.py> raised an exception")
  536. out.write( textwrap.dedent("""
  537. -----------------------------------------------------
  538. Subprocess Error {name}
  539. -----------------------------------------------------
  540. """.format(name=test.name)) )
  541. out.flush()
  542. try:
  543. test.stop(out)
  544. except (subprocess.CalledProcessError):
  545. self.__write_intermediate_results(test.name,"<setup.py>#stop() raised an error")
  546. out.write( textwrap.dedent("""
  547. -----------------------------------------------------
  548. Subprocess Error: Test .stop() raised exception {name}
  549. -----------------------------------------------------
  550. """.format(name=test.name)) )
  551. out.flush()
  552. except (KeyboardInterrupt, SystemExit):
  553. test.stop(out)
  554. out.write( """
  555. -----------------------------------------------------
  556. Cleaning up....
  557. -----------------------------------------------------
  558. """ )
  559. out.flush()
  560. out.close()
  561. self.__finish()
  562. sys.exit()
  563. ############################################################
  564. # End __run_tests
  565. ############################################################
  566. ############################################################
  567. # __is_port_bound
  568. # Check if the requested port is available. If it
  569. # isn't available, then a previous test probably didn't
  570. # shutdown properly.
  571. ############################################################
  572. def __is_port_bound(self, port):
  573. s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
  574. try:
  575. # Try to bind to all IP addresses, this port
  576. s.bind(("", port))
  577. # If we get here, we were able to bind successfully,
  578. # which means the port is free.
  579. except:
  580. # If we get an exception, it might be because the port is still bound
  581. # which would be bad, or maybe it is a privileged port (<1024) and we
  582. # are not running as root, or maybe the server is gone, but sockets are
  583. # still in TIME_WAIT (SO_REUSEADDR). To determine which scenario, try to
  584. # connect.
  585. try:
  586. s.connect(("127.0.0.1", port))
  587. # If we get here, we were able to connect to something, which means
  588. # that the port is still bound.
  589. return True
  590. except:
  591. # An exception means that we couldn't connect, so a server probably
  592. # isn't still running on the port.
  593. pass
  594. finally:
  595. s.close()
  596. return False
  597. ############################################################
  598. # End __is_port_bound
  599. ############################################################
  600. ############################################################
  601. # __parse_results
  602. # Ensures that the system has all necessary software to run
  603. # the tests. This does not include that software for the individual
  604. # test, but covers software such as curl and weighttp that
  605. # are needed.
  606. ############################################################
  607. def __parse_results(self, tests):
  608. # Run the method to get the commmit count of each framework.
  609. self.__count_commits()
  610. # Call the method which counts the sloc for each framework
  611. self.__count_sloc()
  612. # Time to create parsed files
  613. # Aggregate JSON file
  614. with open(os.path.join(self.full_results_directory(), "results.json"), "w") as f:
  615. f.write(json.dumps(self.results))
  616. ############################################################
  617. # End __parse_results
  618. ############################################################
  619. #############################################################
  620. # __count_sloc
  621. # This is assumed to be run from the benchmark root directory
  622. #############################################################
  623. def __count_sloc(self):
  624. all_frameworks = self.__gather_frameworks()
  625. jsonResult = {}
  626. for framework in all_frameworks:
  627. try:
  628. command = "cloc --list-file=" + framework['directory'] + "/source_code --yaml"
  629. lineCount = subprocess.check_output(command, shell=True)
  630. # Find the last instance of the word 'code' in the yaml output. This should
  631. # be the line count for the sum of all listed files or just the line count
  632. # for the last file in the case where there's only one file listed.
  633. lineCount = lineCount[lineCount.rfind('code'):len(lineCount)]
  634. lineCount = lineCount.strip('code: ')
  635. lineCount = lineCount[0:lineCount.rfind('comment')]
  636. jsonResult[framework['name']] = int(lineCount)
  637. except:
  638. continue
  639. self.results['rawData']['slocCounts'] = jsonResult
  640. ############################################################
  641. # End __count_sloc
  642. ############################################################
  643. ############################################################
  644. # __count_commits
  645. ############################################################
  646. def __count_commits(self):
  647. all_frameworks = self.__gather_frameworks()
  648. jsonResult = {}
  649. for framework in all_frameworks:
  650. try:
  651. command = "git rev-list HEAD -- " + framework + " | sort -u | wc -l"
  652. commitCount = subprocess.check_output(command, shell=True)
  653. jsonResult[framework] = int(commitCount)
  654. except:
  655. continue
  656. self.results['rawData']['commitCounts'] = jsonResult
  657. self.commits = jsonResult
  658. ############################################################
  659. # End __count_commits
  660. ############################################################
  661. ############################################################
  662. # __write_intermediate_results
  663. ############################################################
  664. def __write_intermediate_results(self,test_name,status_message):
  665. try:
  666. self.results["completed"][test_name] = status_message
  667. with open(os.path.join(self.latest_results_directory, 'results.json'), 'w') as f:
  668. f.write(json.dumps(self.results))
  669. except (IOError):
  670. logging.error("Error writing results.json")
  671. ############################################################
  672. # End __write_intermediate_results
  673. ############################################################
  674. ############################################################
  675. # __finish
  676. ############################################################
  677. def __finish(self):
  678. print "Time to complete: " + str(int(time.time() - self.start_time)) + " seconds"
  679. print "Results are saved in " + os.path.join(self.result_directory, self.timestamp)
  680. ############################################################
  681. # End __finish
  682. ############################################################
  683. ##########################################################################################
  684. # Constructor
  685. ##########################################################################################
  686. ############################################################
  687. # Initialize the benchmarker. The args are the arguments
  688. # parsed via argparser.
  689. ############################################################
  690. def __init__(self, args):
  691. self.__dict__.update(args)
  692. self.start_time = time.time()
  693. self.run_test_timeout_seconds = 3600
  694. # setup logging
  695. logging.basicConfig(stream=sys.stderr, level=logging.DEBUG)
  696. # setup some additional variables
  697. if self.database_user == None: self.database_user = self.client_user
  698. if self.database_host == None: self.database_host = self.client_host
  699. if self.database_identity_file == None: self.database_identity_file = self.client_identity_file
  700. # setup results and latest_results directories
  701. self.result_directory = os.path.join("results", self.name)
  702. self.latest_results_directory = self.latest_results_directory()
  703. if self.parse != None:
  704. self.timestamp = self.parse
  705. else:
  706. self.timestamp = time.strftime("%Y%m%d%H%M%S", time.localtime())
  707. # Setup the concurrency levels array. This array goes from
  708. # starting_concurrency to max concurrency, doubling each time
  709. self.concurrency_levels = []
  710. concurrency = self.starting_concurrency
  711. while concurrency <= self.max_concurrency:
  712. self.concurrency_levels.append(concurrency)
  713. concurrency = concurrency * 2
  714. # Setup query interval array
  715. # starts at 1, and goes up to max_queries, using the query_interval
  716. self.query_intervals = []
  717. queries = 1
  718. while queries <= self.max_queries:
  719. self.query_intervals.append(queries)
  720. if queries == 1:
  721. queries = 0
  722. queries = queries + self.query_interval
  723. # Load the latest data
  724. #self.latest = None
  725. #try:
  726. # with open('toolset/benchmark/latest.json', 'r') as f:
  727. # # Load json file into config object
  728. # self.latest = json.load(f)
  729. # logging.info("toolset/benchmark/latest.json loaded to self.latest")
  730. # logging.debug("contents of latest.json: " + str(json.dumps(self.latest)))
  731. #except IOError:
  732. # logging.warn("IOError on attempting to read toolset/benchmark/latest.json")
  733. #
  734. #self.results = None
  735. #try:
  736. # if self.latest != None and self.name in self.latest.keys():
  737. # with open(os.path.join(self.result_directory, str(self.latest[self.name]), 'results.json'), 'r') as f:
  738. # # Load json file into config object
  739. # self.results = json.load(f)
  740. #except IOError:
  741. # pass
  742. self.results = None
  743. try:
  744. with open(os.path.join(self.latest_results_directory, 'results.json'), 'r') as f:
  745. #Load json file into results object
  746. self.results = json.load(f)
  747. except IOError:
  748. logging.warn("results.json for test %s not found.",self.name)
  749. if self.results == None:
  750. self.results = dict()
  751. self.results['name'] = self.name
  752. self.results['concurrencyLevels'] = self.concurrency_levels
  753. self.results['queryIntervals'] = self.query_intervals
  754. self.results['frameworks'] = [t.name for t in self.__gather_tests]
  755. self.results['duration'] = self.duration
  756. self.results['rawData'] = dict()
  757. self.results['rawData']['json'] = dict()
  758. self.results['rawData']['db'] = dict()
  759. self.results['rawData']['query'] = dict()
  760. self.results['rawData']['fortune'] = dict()
  761. self.results['rawData']['update'] = dict()
  762. self.results['rawData']['plaintext'] = dict()
  763. self.results['completed'] = dict()
  764. else:
  765. #for x in self.__gather_tests():
  766. # if x.name not in self.results['frameworks']:
  767. # self.results['frameworks'] = self.results['frameworks'] + [x.name]
  768. # Always overwrite framework list
  769. self.results['frameworks'] = [t.name for t in self.__gather_tests]
  770. # Setup the ssh command string
  771. self.database_ssh_string = "ssh -T -o StrictHostKeyChecking=no " + self.database_user + "@" + self.database_host
  772. self.client_ssh_string = "ssh -T -o StrictHostKeyChecking=no " + self.client_user + "@" + self.client_host
  773. if self.database_identity_file != None:
  774. self.database_ssh_string = self.database_ssh_string + " -i " + self.database_identity_file
  775. if self.client_identity_file != None:
  776. self.client_ssh_string = self.client_ssh_string + " -i " + self.client_identity_file
  777. if self.install_software:
  778. install = Installer(self)
  779. install.install_software()
  780. ############################################################
  781. # End __init__
  782. ############################################################