benchmarker.py 31 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796
  1. from setup.linux.installer import Installer
  2. from benchmark import framework_test
  3. import os
  4. import json
  5. import subprocess
  6. import time
  7. import textwrap
  8. import pprint
  9. import csv
  10. import sys
  11. import logging
  12. from multiprocessing import Process
  13. from datetime import datetime
  14. class Benchmarker:
  15. ##########################################################################################
  16. # Public methods
  17. ##########################################################################################
  18. ############################################################
  19. # Prints all the available tests
  20. ############################################################
  21. def run_list_tests(self):
  22. all_tests = self.__gather_tests
  23. for test in all_tests:
  24. print test.name
  25. self.__finish()
  26. ############################################################
  27. # End run_list_tests
  28. ############################################################
  29. ############################################################
  30. # Prints the metadata for all the available tests
  31. ############################################################
  32. def run_list_test_metadata(self):
  33. all_tests = self.__gather_tests
  34. all_tests_json = json.dumps(map(lambda test: {
  35. "name": test.name,
  36. "approach": test.approach,
  37. "classification": test.classification,
  38. "database": test.database,
  39. "framework": test.framework,
  40. "language": test.language,
  41. "orm": test.orm,
  42. "platform": test.platform,
  43. "webserver": test.webserver,
  44. "os": test.os,
  45. "database_os": test.database_os,
  46. "display_name": test.display_name,
  47. "notes": test.notes,
  48. "versus": test.versus
  49. }, all_tests))
  50. with open(os.path.join(self.full_results_directory(), "test_metadata.json"), "w") as f:
  51. f.write(all_tests_json)
  52. self.__finish()
  53. ############################################################
  54. # End run_list_test_metadata
  55. ############################################################
  56. ############################################################
  57. # parse_timestamp
  58. # Re-parses the raw data for a given timestamp
  59. ############################################################
  60. def parse_timestamp(self):
  61. all_tests = self.__gather_tests
  62. for test in all_tests:
  63. test.parse_all()
  64. self.__parse_results(all_tests)
  65. self.__finish()
  66. ############################################################
  67. # End parse_timestamp
  68. ############################################################
  69. ############################################################
  70. # Run the tests:
  71. # This process involves setting up the client/server machines
  72. # with any necessary change. Then going through each test,
  73. # running their setup script, verifying the URLs, and
  74. # running benchmarks against them.
  75. ############################################################
  76. def run(self):
  77. ##########################
  78. # Get a list of all known
  79. # tests that we can run.
  80. ##########################
  81. all_tests = self.__gather_tests
  82. ##########################
  83. # Setup client/server
  84. ##########################
  85. print textwrap.dedent("""
  86. =====================================================
  87. Preparing Server, Database, and Client ...
  88. =====================================================
  89. """)
  90. self.__setup_server()
  91. self.__setup_database()
  92. self.__setup_client()
  93. ##########################
  94. # Run tests
  95. ##########################
  96. self.__run_tests(all_tests)
  97. ##########################
  98. # Parse results
  99. ##########################
  100. if self.mode == "benchmark":
  101. print textwrap.dedent("""
  102. =====================================================
  103. Parsing Results ...
  104. =====================================================
  105. """)
  106. self.__parse_results(all_tests)
  107. self.__finish()
  108. ############################################################
  109. # End run
  110. ############################################################
  111. ############################################################
  112. # database_sftp_string(batch_file)
  113. # generates a fully qualified URL for sftp to database
  114. ############################################################
  115. def database_sftp_string(self, batch_file):
  116. sftp_string = "sftp -oStrictHostKeyChecking=no "
  117. if batch_file != None: sftp_string += " -b " + batch_file + " "
  118. if self.database_identity_file != None:
  119. sftp_string += " -i " + self.database_identity_file + " "
  120. return sftp_string + self.database_user + "@" + self.database_host
  121. ############################################################
  122. # End database_sftp_string
  123. ############################################################
  124. ############################################################
  125. # client_sftp_string(batch_file)
  126. # generates a fully qualified URL for sftp to client
  127. ############################################################
  128. def client_sftp_string(self, batch_file):
  129. sftp_string = "sftp -oStrictHostKeyChecking=no "
  130. if batch_file != None: sftp_string += " -b " + batch_file + " "
  131. if self.client_identity_file != None:
  132. sftp_string += " -i " + self.client_identity_file + " "
  133. return sftp_string + self.client_user + "@" + self.client_host
  134. ############################################################
  135. # End client_sftp_string
  136. ############################################################
  137. ############################################################
  138. # generate_url(url, port)
  139. # generates a fully qualified URL for accessing a test url
  140. ############################################################
  141. def generate_url(self, url, port):
  142. return self.server_host + ":" + str(port) + url
  143. ############################################################
  144. # End generate_url
  145. ############################################################
  146. ############################################################
  147. # output_file(test_name, test_type)
  148. # returns the output file for this test_name and test_type
  149. # timestamp/test_type/test_name/raw
  150. ############################################################
  151. def output_file(self, test_name, test_type):
  152. path = os.path.join(self.result_directory, self.timestamp, test_type, test_name, "raw")
  153. try:
  154. os.makedirs(os.path.dirname(path))
  155. except OSError:
  156. pass
  157. return path
  158. ############################################################
  159. # End output_file
  160. ############################################################
  161. ############################################################
  162. # full_results_directory
  163. ############################################################
  164. def full_results_directory(self):
  165. path = os.path.join(self.result_directory, self.timestamp)
  166. try:
  167. os.makedirs(path)
  168. except OSError:
  169. pass
  170. return path
  171. ############################################################
  172. # End output_file
  173. ############################################################
  174. ############################################################
  175. # Latest intermediate results dirctory
  176. ############################################################
  177. def latest_results_directory(self):
  178. path = os.path.join(self.result_directory,"latest")
  179. try:
  180. os.makedirs(path)
  181. except OSError:
  182. pass
  183. return path
  184. ############################################################
  185. # report_results
  186. ############################################################
  187. def report_results(self, framework, test, results):
  188. if test not in self.results['rawData'].keys():
  189. self.results['rawData'][test] = dict()
  190. self.results['rawData'][test][framework.name] = results
  191. ############################################################
  192. # End report_results
  193. ############################################################
  194. ##########################################################################################
  195. # Private methods
  196. ##########################################################################################
  197. ############################################################
  198. # Gathers all the tests
  199. ############################################################
  200. @property
  201. def __gather_tests(self):
  202. tests = []
  203. # Loop through each directory (we assume we're being run from the benchmarking root)
  204. # and look for the files that signify a benchmark test
  205. for dirname, dirnames, filenames in os.walk('.'):
  206. # Look for the benchmark_config file, this will set up our tests.
  207. # Its format looks like this:
  208. #
  209. # {
  210. # "framework": "nodejs",
  211. # "tests": [{
  212. # "default": {
  213. # "setup_file": "setup",
  214. # "json_url": "/json"
  215. # },
  216. # "mysql": {
  217. # "setup_file": "setup",
  218. # "db_url": "/mysql",
  219. # "query_url": "/mysql?queries="
  220. # },
  221. # ...
  222. # }]
  223. # }
  224. if 'benchmark_config' in filenames:
  225. config = None
  226. config_file_name = os.path.join(dirname, 'benchmark_config')
  227. with open(config_file_name, 'r') as config_file:
  228. # Load json file into config object
  229. try:
  230. config = json.load(config_file)
  231. except:
  232. print("Error loading '%s'." % config_file_name)
  233. raise
  234. if config == None:
  235. continue
  236. tests = tests + framework_test.parse_config(config, dirname[2:], self)
  237. tests.sort(key=lambda x: x.name)
  238. return tests
  239. ############################################################
  240. # End __gather_tests
  241. ############################################################
  242. ############################################################
  243. # Gathers all the frameworks
  244. ############################################################
  245. def __gather_frameworks(self):
  246. frameworks = []
  247. # Loop through each directory (we assume we're being run from the benchmarking root)
  248. for dirname, dirnames, filenames in os.walk('.'):
  249. # Look for the benchmark_config file, this will contain our framework name
  250. # It's format looks like this:
  251. #
  252. # {
  253. # "framework": "nodejs",
  254. # "tests": [{
  255. # "default": {
  256. # "setup_file": "setup",
  257. # "json_url": "/json"
  258. # },
  259. # "mysql": {
  260. # "setup_file": "setup",
  261. # "db_url": "/mysql",
  262. # "query_url": "/mysql?queries="
  263. # },
  264. # ...
  265. # }]
  266. # }
  267. if 'benchmark_config' in filenames:
  268. config = None
  269. with open(os.path.join(dirname, 'benchmark_config'), 'r') as config_file:
  270. # Load json file into config object
  271. config = json.load(config_file)
  272. if config == None:
  273. continue
  274. frameworks.append(str(config['framework']))
  275. return frameworks
  276. ############################################################
  277. # End __gather_frameworks
  278. ############################################################
  279. ############################################################
  280. # Makes any necessary changes to the server that should be
  281. # made before running the tests. This involves setting kernal
  282. # settings to allow for more connections, or more file
  283. # descriptiors
  284. #
  285. # http://redmine.lighttpd.net/projects/weighttp/wiki#Troubleshooting
  286. ############################################################
  287. def __setup_server(self):
  288. try:
  289. if os.name == 'nt':
  290. return True
  291. subprocess.check_call(["sudo","bash","-c","cd /sys/devices/system/cpu; ls -d cpu*|while read x; do echo performance > $x/cpufreq/scaling_governor; done"])
  292. subprocess.check_call("sudo sysctl -w net.ipv4.tcp_max_syn_backlog=65535".rsplit(" "))
  293. subprocess.check_call("sudo sysctl -w net.core.somaxconn=65535".rsplit(" "))
  294. subprocess.check_call("sudo -s ulimit -n 65535".rsplit(" "))
  295. subprocess.check_call("sudo sysctl net.ipv4.tcp_tw_reuse=1".rsplit(" "))
  296. subprocess.check_call("sudo sysctl net.ipv4.tcp_tw_recycle=1".rsplit(" "))
  297. subprocess.check_call("sudo sysctl -w kernel.shmmax=134217728".rsplit(" "))
  298. subprocess.check_call("sudo sysctl -w kernel.shmall=2097152".rsplit(" "))
  299. except subprocess.CalledProcessError:
  300. return False
  301. ############################################################
  302. # End __setup_server
  303. ############################################################
  304. ############################################################
  305. # Makes any necessary changes to the database machine that
  306. # should be made before running the tests. Is very similar
  307. # to the server setup, but may also include database specific
  308. # changes.
  309. ############################################################
  310. def __setup_database(self):
  311. p = subprocess.Popen(self.database_ssh_string, stdin=subprocess.PIPE, shell=True)
  312. p.communicate("""
  313. sudo sysctl -w net.ipv4.tcp_max_syn_backlog=65535
  314. sudo sysctl -w net.core.somaxconn=65535
  315. sudo -s ulimit -n 65535
  316. sudo sysctl net.ipv4.tcp_tw_reuse=1
  317. sudo sysctl net.ipv4.tcp_tw_recycle=1
  318. sudo sysctl -w kernel.shmmax=2147483648
  319. sudo sysctl -w kernel.shmall=2097152
  320. """)
  321. ############################################################
  322. # End __setup_database
  323. ############################################################
  324. ############################################################
  325. # Makes any necessary changes to the client machine that
  326. # should be made before running the tests. Is very similar
  327. # to the server setup, but may also include client specific
  328. # changes.
  329. ############################################################
  330. def __setup_client(self):
  331. p = subprocess.Popen(self.client_ssh_string, stdin=subprocess.PIPE, shell=True)
  332. p.communicate("""
  333. sudo sysctl -w net.ipv4.tcp_max_syn_backlog=65535
  334. sudo sysctl -w net.core.somaxconn=65535
  335. sudo -s ulimit -n 65535
  336. sudo sysctl net.ipv4.tcp_tw_reuse=1
  337. sudo sysctl net.ipv4.tcp_tw_recycle=1
  338. sudo sysctl -w kernel.shmmax=2147483648
  339. sudo sysctl -w kernel.shmall=2097152
  340. """)
  341. ############################################################
  342. # End __setup_client
  343. ############################################################
  344. ############################################################
  345. # __run_tests
  346. #
  347. # 2013-10-02 ASB Calls each test passed in tests to
  348. # __run_test in a separate process. Each
  349. # test is given a set amount of time and if
  350. # kills the child process (and subsequently
  351. # all of its child processes). Uses
  352. # multiprocessing module.
  353. ############################################################
  354. def __run_tests(self, tests):
  355. logging.debug("Start __run_tests.")
  356. logging.debug("__name__ = %s",__name__)
  357. if self.os.lower() == 'windows':
  358. logging.debug("Executing __run_tests on Windows")
  359. for test in tests:
  360. self.__run_test(test)
  361. else:
  362. logging.debug("Executing __run_tests on Linux")
  363. # These features do not work on Windows
  364. for test in tests:
  365. if __name__ == 'benchmark.benchmarker':
  366. test_process = Process(target=self.__run_test, args=(test,))
  367. test_process.start()
  368. test_process.join(self.run_test_timeout_seconds)
  369. if(test_process.is_alive()):
  370. logging.debug("Child process for %s is still alive. Terminating.",test.name)
  371. self.__write_intermediate_results(test.name,"__run_test timeout (="+ str(self.run_test_timeout_seconds) + " seconds)")
  372. test_process.terminate()
  373. logging.debug("End __run_tests.")
  374. ############################################################
  375. # End __run_tests
  376. ############################################################
  377. ############################################################
  378. # __run_test
  379. # 2013-10-02 ASB Previously __run_tests. This code now only
  380. # processes a single test.
  381. #
  382. # Ensures that the system has all necessary software to run
  383. # the tests. This does not include that software for the individual
  384. # test, but covers software such as curl and weighttp that
  385. # are needed.
  386. ############################################################
  387. def __run_test(self, test):
  388. # If the user specified which tests to run, then
  389. # we can skip over tests that are not in that list
  390. if self.test != None and test.name not in self.test:
  391. return
  392. if hasattr(test, 'skip'):
  393. if test.skip.lower() == "true":
  394. logging.info("Test %s benchmark_config specifies to skip this test. Skipping.", test.name)
  395. return
  396. if test.os.lower() != self.os.lower() or test.database_os.lower() != self.database_os.lower():
  397. # the operating system requirements of this test for the
  398. # application server or the database server don't match
  399. # our current environment
  400. logging.info("OS or Database OS specified in benchmark_config does not match the current environment. Skipping.")
  401. return
  402. # If the test is in the excludes list, we skip it
  403. if self.exclude != None and test.name in self.exclude:
  404. logging.info("Test %s has been added to the excludes list. Skipping.", test.name)
  405. return
  406. # If the test does not contain an implementation of the current test-type, skip it
  407. if self.type != 'all' and not test.contains_type(self.type):
  408. logging.info("Test type %s does not contain an implementation of the current test-type. Skipping", self.type)
  409. return
  410. logging.debug("test.os.lower() = %s test.database_os.lower() = %s",test.os.lower(),test.database_os.lower())
  411. logging.debug("self.results['frameworks'] != None: " + str(self.results['frameworks'] != None))
  412. logging.debug("test.name: " + str(test.name))
  413. logging.debug("self.results['completed']: " + str(self.results['completed']))
  414. if self.results['frameworks'] != None and test.name in self.results['completed']:
  415. logging.info('Framework %s found in latest saved data. Skipping.',str(test.name))
  416. return
  417. print textwrap.dedent("""
  418. =====================================================
  419. Beginning {name}
  420. -----------------------------------------------------
  421. """.format(name=test.name))
  422. ##########################
  423. # Start this test
  424. ##########################
  425. print textwrap.dedent("""
  426. -----------------------------------------------------
  427. Starting {name}
  428. -----------------------------------------------------
  429. """.format(name=test.name))
  430. try:
  431. p = subprocess.Popen(self.database_ssh_string, stdin=subprocess.PIPE, shell=True)
  432. p.communicate("""
  433. sudo restart mysql
  434. sudo restart mongodb
  435. sudo /etc/init.d/postgresql restart
  436. """)
  437. time.sleep(10)
  438. result = test.start()
  439. if result != 0:
  440. test.stop()
  441. time.sleep(5)
  442. print "ERROR: Problem starting " + test.name
  443. print textwrap.dedent("""
  444. -----------------------------------------------------
  445. Stopped {name}
  446. -----------------------------------------------------
  447. """.format(name=test.name))
  448. self.__write_intermediate_results(test.name,"<setup.py>#start() returned non-zero")
  449. return
  450. time.sleep(self.sleep)
  451. ##########################
  452. # Verify URLs
  453. ##########################
  454. print textwrap.dedent("""
  455. -----------------------------------------------------
  456. Verifying URLs for {name}
  457. -----------------------------------------------------
  458. """.format(name=test.name))
  459. test.verify_urls()
  460. ##########################
  461. # Benchmark this test
  462. ##########################
  463. if self.mode == "benchmark":
  464. print textwrap.dedent("""
  465. -----------------------------------------------------
  466. Benchmarking {name} ...
  467. -----------------------------------------------------
  468. """.format(name=test.name))
  469. test.benchmark()
  470. ##########################
  471. # Stop this test
  472. ##########################
  473. test.stop()
  474. time.sleep(5)
  475. print textwrap.dedent("""
  476. -----------------------------------------------------
  477. Stopped {name}
  478. -----------------------------------------------------
  479. """.format(name=test.name))
  480. time.sleep(5)
  481. ##########################################################
  482. # Save results thus far into toolset/benchmark/latest.json
  483. ##########################################################
  484. print textwrap.dedent("""
  485. ----------------------------------------------------
  486. Saving results through {name}
  487. ----------------------------------------------------
  488. """.format(name=test.name))
  489. self.__write_intermediate_results(test.name,time.strftime("%Y%m%d%H%M%S", time.localtime()))
  490. except (OSError, IOError, subprocess.CalledProcessError):
  491. self.__write_intermediate_results(test.name,"<setup.py> raised an exception")
  492. print textwrap.dedent("""
  493. -----------------------------------------------------
  494. Subprocess Error {name}
  495. -----------------------------------------------------
  496. """.format(name=test.name))
  497. try:
  498. test.stop()
  499. except (subprocess.CalledProcessError):
  500. self.__write_intermediate_results(test.name,"<setup.py>#stop() raised an error")
  501. print textwrap.dedent("""
  502. -----------------------------------------------------
  503. Subprocess Error: Test .stop() raised exception {name}
  504. -----------------------------------------------------
  505. """.format(name=test.name))
  506. except (KeyboardInterrupt, SystemExit):
  507. test.stop()
  508. print """
  509. -----------------------------------------------------
  510. Cleaning up....
  511. -----------------------------------------------------
  512. """
  513. self.__finish()
  514. sys.exit()
  515. ############################################################
  516. # End __run_tests
  517. ############################################################
  518. ############################################################
  519. # __parse_results
  520. # Ensures that the system has all necessary software to run
  521. # the tests. This does not include that software for the individual
  522. # test, but covers software such as curl and weighttp that
  523. # are needed.
  524. ############################################################
  525. def __parse_results(self, tests):
  526. # Run the method to get the commmit count of each framework.
  527. self.__count_commits()
  528. # Call the method which counts the sloc for each framework
  529. self.__count_sloc()
  530. # Time to create parsed files
  531. # Aggregate JSON file
  532. with open(os.path.join(self.full_results_directory(), "results.json"), "w") as f:
  533. f.write(json.dumps(self.results))
  534. ############################################################
  535. # End __parse_results
  536. ############################################################
  537. #############################################################
  538. # __count_sloc
  539. # This is assumed to be run from the benchmark root directory
  540. #############################################################
  541. def __count_sloc(self):
  542. all_frameworks = self.__gather_frameworks()
  543. jsonResult = {}
  544. for framework in all_frameworks:
  545. try:
  546. command = "cloc --list-file=" + framework['directory'] + "/source_code --yaml"
  547. lineCount = subprocess.check_output(command, shell=True)
  548. # Find the last instance of the word 'code' in the yaml output. This should
  549. # be the line count for the sum of all listed files or just the line count
  550. # for the last file in the case where there's only one file listed.
  551. lineCount = lineCount[lineCount.rfind('code'):len(lineCount)]
  552. lineCount = lineCount.strip('code: ')
  553. lineCount = lineCount[0:lineCount.rfind('comment')]
  554. jsonResult[framework['name']] = int(lineCount)
  555. except:
  556. continue
  557. self.results['rawData']['slocCounts'] = jsonResult
  558. ############################################################
  559. # End __count_sloc
  560. ############################################################
  561. ############################################################
  562. # __count_commits
  563. ############################################################
  564. def __count_commits(self):
  565. all_frameworks = self.__gather_frameworks()
  566. jsonResult = {}
  567. for framework in all_frameworks:
  568. try:
  569. command = "git rev-list HEAD -- " + framework + " | sort -u | wc -l"
  570. commitCount = subprocess.check_output(command, shell=True)
  571. jsonResult[framework] = int(commitCount)
  572. except:
  573. continue
  574. self.results['rawData']['commitCounts'] = jsonResult
  575. self.commits = jsonResult
  576. ############################################################
  577. # End __count_commits
  578. ############################################################
  579. ############################################################
  580. # __write_intermediate_results
  581. ############################################################
  582. def __write_intermediate_results(self,test_name,status_message):
  583. try:
  584. self.results["completed"][test_name] = status_message
  585. with open(os.path.join(self.latest_results_directory, 'results.json'), 'w') as f:
  586. f.write(json.dumps(self.results))
  587. except (IOError):
  588. logging.error("Error writing results.json")
  589. ############################################################
  590. # End __write_intermediate_results
  591. ############################################################
  592. ############################################################
  593. # __finish
  594. ############################################################
  595. def __finish(self):
  596. print "Time to complete: " + str(int(time.time() - self.start_time)) + " seconds"
  597. print "Results are saved in " + os.path.join(self.result_directory, self.timestamp)
  598. ############################################################
  599. # End __finish
  600. ############################################################
  601. ##########################################################################################
  602. # Constructor
  603. ##########################################################################################
  604. ############################################################
  605. # Initialize the benchmarker. The args are the arguments
  606. # parsed via argparser.
  607. ############################################################
  608. def __init__(self, args):
  609. self.__dict__.update(args)
  610. self.start_time = time.time()
  611. self.run_test_timeout_seconds = 3600
  612. # setup logging
  613. logging.basicConfig(stream=sys.stderr, level=logging.DEBUG)
  614. # setup some additional variables
  615. if self.database_user == None: self.database_user = self.client_user
  616. if self.database_host == None: self.database_host = self.client_host
  617. if self.database_identity_file == None: self.database_identity_file = self.client_identity_file
  618. # setup results and latest_results directories
  619. self.result_directory = os.path.join("results", self.name)
  620. self.latest_results_directory = self.latest_results_directory()
  621. if self.parse != None:
  622. self.timestamp = self.parse
  623. else:
  624. self.timestamp = time.strftime("%Y%m%d%H%M%S", time.localtime())
  625. # Setup the concurrency levels array. This array goes from
  626. # starting_concurrency to max concurrency, doubling each time
  627. self.concurrency_levels = []
  628. concurrency = self.starting_concurrency
  629. while concurrency <= self.max_concurrency:
  630. self.concurrency_levels.append(concurrency)
  631. concurrency = concurrency * 2
  632. # Setup query interval array
  633. # starts at 1, and goes up to max_queries, using the query_interval
  634. self.query_intervals = []
  635. queries = 1
  636. while queries <= self.max_queries:
  637. self.query_intervals.append(queries)
  638. if queries == 1:
  639. queries = 0
  640. queries = queries + self.query_interval
  641. # Load the latest data
  642. #self.latest = None
  643. #try:
  644. # with open('toolset/benchmark/latest.json', 'r') as f:
  645. # # Load json file into config object
  646. # self.latest = json.load(f)
  647. # logging.info("toolset/benchmark/latest.json loaded to self.latest")
  648. # logging.debug("contents of latest.json: " + str(json.dumps(self.latest)))
  649. #except IOError:
  650. # logging.warn("IOError on attempting to read toolset/benchmark/latest.json")
  651. #
  652. #self.results = None
  653. #try:
  654. # if self.latest != None and self.name in self.latest.keys():
  655. # with open(os.path.join(self.result_directory, str(self.latest[self.name]), 'results.json'), 'r') as f:
  656. # # Load json file into config object
  657. # self.results = json.load(f)
  658. #except IOError:
  659. # pass
  660. self.results = None
  661. try:
  662. with open(os.path.join(self.latest_results_directory, 'results.json'), 'r') as f:
  663. #Load json file into results object
  664. self.results = json.load(f)
  665. except IOError:
  666. logging.warn("results.json for test %s not found.",self.name)
  667. if self.results == None:
  668. self.results = dict()
  669. self.results['name'] = self.name
  670. self.results['concurrencyLevels'] = self.concurrency_levels
  671. self.results['queryIntervals'] = self.query_intervals
  672. self.results['frameworks'] = [t.name for t in self.__gather_tests]
  673. self.results['duration'] = self.duration
  674. self.results['rawData'] = dict()
  675. self.results['rawData']['json'] = dict()
  676. self.results['rawData']['db'] = dict()
  677. self.results['rawData']['query'] = dict()
  678. self.results['rawData']['fortune'] = dict()
  679. self.results['rawData']['update'] = dict()
  680. self.results['rawData']['plaintext'] = dict()
  681. self.results['completed'] = dict()
  682. else:
  683. #for x in self.__gather_tests():
  684. # if x.name not in self.results['frameworks']:
  685. # self.results['frameworks'] = self.results['frameworks'] + [x.name]
  686. # Always overwrite framework list
  687. self.results['frameworks'] = [t.name for t in self.__gather_tests]
  688. # Setup the ssh command string
  689. self.database_ssh_string = "ssh -T -o StrictHostKeyChecking=no " + self.database_user + "@" + self.database_host
  690. self.client_ssh_string = "ssh -T -o StrictHostKeyChecking=no " + self.client_user + "@" + self.client_host
  691. if self.database_identity_file != None:
  692. self.database_ssh_string = self.database_ssh_string + " -i " + self.database_identity_file
  693. if self.client_identity_file != None:
  694. self.client_ssh_string = self.client_ssh_string + " -i " + self.client_identity_file
  695. if self.install_software:
  696. install = Installer(self)
  697. install.install_software()
  698. ############################################################
  699. # End __init__
  700. ############################################################