benchmarker.py 41 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047
  1. from setup.linux.installer import Installer
  2. from setup.linux import setup_util
  3. from benchmark import framework_test
  4. import os
  5. import json
  6. import subprocess
  7. import time
  8. import textwrap
  9. import pprint
  10. import csv
  11. import sys
  12. import logging
  13. import socket
  14. import glob
  15. from multiprocessing import Process
  16. from datetime import datetime
  17. class Benchmarker:
  18. ##########################################################################################
  19. # Public methods
  20. ##########################################################################################
  21. ############################################################
  22. # Prints all the available tests
  23. ############################################################
  24. def run_list_tests(self):
  25. all_tests = self.__gather_tests
  26. for test in all_tests:
  27. print test.name
  28. self.__finish()
  29. ############################################################
  30. # End run_list_tests
  31. ############################################################
  32. ############################################################
  33. # Prints the metadata for all the available tests
  34. ############################################################
  35. def run_list_test_metadata(self):
  36. all_tests = self.__gather_tests
  37. all_tests_json = json.dumps(map(lambda test: {
  38. "name": test.name,
  39. "approach": test.approach,
  40. "classification": test.classification,
  41. "database": test.database,
  42. "framework": test.framework,
  43. "language": test.language,
  44. "orm": test.orm,
  45. "platform": test.platform,
  46. "webserver": test.webserver,
  47. "os": test.os,
  48. "database_os": test.database_os,
  49. "display_name": test.display_name,
  50. "notes": test.notes,
  51. "versus": test.versus
  52. }, all_tests))
  53. with open(os.path.join(self.full_results_directory(), "test_metadata.json"), "w") as f:
  54. f.write(all_tests_json)
  55. self.__finish()
  56. ############################################################
  57. # End run_list_test_metadata
  58. ############################################################
  59. ############################################################
  60. # parse_timestamp
  61. # Re-parses the raw data for a given timestamp
  62. ############################################################
  63. def parse_timestamp(self):
  64. all_tests = self.__gather_tests
  65. for test in all_tests:
  66. test.parse_all()
  67. self.__parse_results(all_tests)
  68. self.__finish()
  69. ############################################################
  70. # End parse_timestamp
  71. ############################################################
  72. ############################################################
  73. # Run the tests:
  74. # This process involves setting up the client/server machines
  75. # with any necessary change. Then going through each test,
  76. # running their setup script, verifying the URLs, and
  77. # running benchmarks against them.
  78. ############################################################
  79. def run(self):
  80. ##########################
  81. # Get a list of all known
  82. # tests that we can run.
  83. ##########################
  84. all_tests = self.__gather_tests
  85. ##########################
  86. # Setup client/server
  87. ##########################
  88. print textwrap.dedent("""
  89. =====================================================
  90. Preparing Server, Database, and Client ...
  91. =====================================================
  92. """)
  93. self.__setup_server()
  94. self.__setup_database()
  95. self.__setup_client()
  96. ## Check if wrk (and wrk-pipeline) is installed and executable, if not, raise an exception
  97. #if not (os.access("/usr/local/bin/wrk", os.X_OK) and os.access("/usr/local/bin/wrk-pipeline", os.X_OK)):
  98. # raise Exception("wrk and/or wrk-pipeline are not properly installed. Not running tests.")
  99. ##########################
  100. # Run tests
  101. ##########################
  102. print textwrap.dedent("""
  103. =====================================================
  104. Running Tests ...
  105. =====================================================
  106. """)
  107. result = self.__run_tests(all_tests)
  108. ##########################
  109. # Parse results
  110. ##########################
  111. if self.mode == "benchmark":
  112. print textwrap.dedent("""
  113. =====================================================
  114. Parsing Results ...
  115. =====================================================
  116. """)
  117. self.__parse_results(all_tests)
  118. self.__finish()
  119. return result
  120. ############################################################
  121. # End run
  122. ############################################################
  123. ############################################################
  124. # database_sftp_string(batch_file)
  125. # generates a fully qualified URL for sftp to database
  126. ############################################################
  127. def database_sftp_string(self, batch_file):
  128. sftp_string = "sftp -oStrictHostKeyChecking=no "
  129. if batch_file != None: sftp_string += " -b " + batch_file + " "
  130. if self.database_identity_file != None:
  131. sftp_string += " -i " + self.database_identity_file + " "
  132. return sftp_string + self.database_user + "@" + self.database_host
  133. ############################################################
  134. # End database_sftp_string
  135. ############################################################
  136. ############################################################
  137. # client_sftp_string(batch_file)
  138. # generates a fully qualified URL for sftp to client
  139. ############################################################
  140. def client_sftp_string(self, batch_file):
  141. sftp_string = "sftp -oStrictHostKeyChecking=no "
  142. if batch_file != None: sftp_string += " -b " + batch_file + " "
  143. if self.client_identity_file != None:
  144. sftp_string += " -i " + self.client_identity_file + " "
  145. return sftp_string + self.client_user + "@" + self.client_host
  146. ############################################################
  147. # End client_sftp_string
  148. ############################################################
  149. ############################################################
  150. # generate_url(url, port)
  151. # generates a fully qualified URL for accessing a test url
  152. ############################################################
  153. def generate_url(self, url, port):
  154. return self.server_host + ":" + str(port) + url
  155. ############################################################
  156. # End generate_url
  157. ############################################################
  158. ############################################################
  159. # get_output_file(test_name, test_type)
  160. # returns the output file name for this test_name and
  161. # test_type timestamp/test_type/test_name/raw
  162. ############################################################
  163. def get_output_file(self, test_name, test_type):
  164. return os.path.join(self.result_directory, self.timestamp, test_type, test_name, "raw")
  165. ############################################################
  166. # End get_output_file
  167. ############################################################
  168. ############################################################
  169. # output_file(test_name, test_type)
  170. # returns the output file for this test_name and test_type
  171. # timestamp/test_type/test_name/raw
  172. ############################################################
  173. def output_file(self, test_name, test_type):
  174. path = self.get_output_file(test_name, test_type)
  175. try:
  176. os.makedirs(os.path.dirname(path))
  177. except OSError:
  178. pass
  179. return path
  180. ############################################################
  181. # End output_file
  182. ############################################################
  183. ############################################################
  184. # get_warning_file(test_name, test_type)
  185. # returns the output file name for this test_name and
  186. # test_type timestamp/test_type/test_name/raw
  187. ############################################################
  188. def get_warning_file(self, test_name, test_type):
  189. return os.path.join(self.result_directory, self.timestamp, test_type, test_name, "warn")
  190. ############################################################
  191. # End get_warning_file
  192. ############################################################
  193. ############################################################
  194. # warning_file(test_name, test_type)
  195. # returns the warning file for this test_name and test_type
  196. # timestamp/test_type/test_name/raw
  197. ############################################################
  198. def warning_file(self, test_name, test_type):
  199. path = self.get_warning_file(test_name, test_type)
  200. try:
  201. os.makedirs(os.path.dirname(path))
  202. except OSError:
  203. pass
  204. return path
  205. ############################################################
  206. # End warning_file
  207. ############################################################
  208. ############################################################
  209. # get_stats_file(test_name, test_type)
  210. # returns the stats file name for this test_name and
  211. # test_type timestamp/test_type/test_name/raw
  212. ############################################################
  213. def get_stats_file(self, test_name, test_type):
  214. return os.path.join(self.result_directory, self.timestamp, test_type, test_name, "stats")
  215. ############################################################
  216. # End get_stats_file
  217. ############################################################
  218. ############################################################
  219. # stats_file(test_name, test_type)
  220. # returns the stats file for this test_name and test_type
  221. # timestamp/test_type/test_name/raw
  222. ############################################################
  223. def stats_file(self, test_name, test_type):
  224. path = self.get_stats_file(test_name, test_type)
  225. try:
  226. os.makedirs(os.path.dirname(path))
  227. except OSError:
  228. pass
  229. return path
  230. ############################################################
  231. # End stats_file
  232. ############################################################
  233. ############################################################
  234. # full_results_directory
  235. ############################################################
  236. def full_results_directory(self):
  237. path = os.path.join(self.result_directory, self.timestamp)
  238. try:
  239. os.makedirs(path)
  240. except OSError:
  241. pass
  242. return path
  243. ############################################################
  244. # End full_results_directory
  245. ############################################################
  246. ############################################################
  247. # Latest intermediate results dirctory
  248. ############################################################
  249. def latest_results_directory(self):
  250. path = os.path.join(self.result_directory,"latest")
  251. try:
  252. os.makedirs(path)
  253. except OSError:
  254. pass
  255. return path
  256. ############################################################
  257. # report_results
  258. ############################################################
  259. def report_results(self, framework, test, results):
  260. if test not in self.results['rawData'].keys():
  261. self.results['rawData'][test] = dict()
  262. # If results has a size from the parse, then it succeeded.
  263. if results:
  264. self.results['rawData'][test][framework.name] = results
  265. # This may already be set for single-tests
  266. if framework.name not in self.results['succeeded'][test]:
  267. self.results['succeeded'][test].append(framework.name)
  268. # Add this type
  269. if (os.path.exists(self.get_warning_file(framework.name, test)) and
  270. framework.name not in self.results['warning'][test]):
  271. self.results['warning'][test].append(framework.name)
  272. else:
  273. # This may already be set for single-tests
  274. if framework.name not in self.results['failed'][test]:
  275. self.results['failed'][test].append(framework.name)
  276. ############################################################
  277. # End report_results
  278. ############################################################
  279. ##########################################################################################
  280. # Private methods
  281. ##########################################################################################
  282. ############################################################
  283. # Gathers all the tests
  284. ############################################################
  285. @property
  286. def __gather_tests(self):
  287. tests = []
  288. # Assume we are running from FrameworkBenchmarks
  289. config_files = glob.glob('*/benchmark_config')
  290. for config_file_name in config_files:
  291. # Look for the benchmark_config file, this will set up our tests.
  292. # Its format looks like this:
  293. #
  294. # {
  295. # "framework": "nodejs",
  296. # "tests": [{
  297. # "default": {
  298. # "setup_file": "setup",
  299. # "json_url": "/json"
  300. # },
  301. # "mysql": {
  302. # "setup_file": "setup",
  303. # "db_url": "/mysql",
  304. # "query_url": "/mysql?queries="
  305. # },
  306. # ...
  307. # }]
  308. # }
  309. config = None
  310. with open(config_file_name, 'r') as config_file:
  311. # Load json file into config object
  312. try:
  313. config = json.load(config_file)
  314. except:
  315. print("Error loading '%s'." % config_file_name)
  316. raise
  317. if config is None:
  318. continue
  319. test = framework_test.parse_config(config, os.path.dirname(config_file_name), self)
  320. # If the user specified which tests to run, then
  321. # we can skip over tests that are not in that list
  322. if self.test == None:
  323. tests = tests + test
  324. else:
  325. for atest in test:
  326. if atest.name in self.test:
  327. tests.append(atest)
  328. tests.sort(key=lambda x: x.name)
  329. # If the tests have been interrupted somehow, then we want to resume them where we left
  330. # off, rather than starting from the beginning
  331. if os.path.isfile('current_benchmark.txt'):
  332. with open('current_benchmark.txt', 'r') as interrupted_benchmark:
  333. interrupt_bench = interrupted_benchmark.read()
  334. for index, atest in enumerate(tests):
  335. if atest.name == interrupt_bench:
  336. tests = tests[index:]
  337. break
  338. return tests
  339. ############################################################
  340. # End __gather_tests
  341. ############################################################
  342. ############################################################
  343. # Gathers all the frameworks
  344. ############################################################
  345. def __gather_frameworks(self):
  346. frameworks = []
  347. # Loop through each directory (we assume we're being run from the benchmarking root)
  348. for dirname, dirnames, filenames in os.walk('.'):
  349. # Look for the benchmark_config file, this will contain our framework name
  350. # It's format looks like this:
  351. #
  352. # {
  353. # "framework": "nodejs",
  354. # "tests": [{
  355. # "default": {
  356. # "setup_file": "setup",
  357. # "json_url": "/json"
  358. # },
  359. # "mysql": {
  360. # "setup_file": "setup",
  361. # "db_url": "/mysql",
  362. # "query_url": "/mysql?queries="
  363. # },
  364. # ...
  365. # }]
  366. # }
  367. if 'benchmark_config' in filenames:
  368. config = None
  369. with open(os.path.join(dirname, 'benchmark_config'), 'r') as config_file:
  370. # Load json file into config object
  371. config = json.load(config_file)
  372. if config == None:
  373. continue
  374. frameworks.append(str(config['framework']))
  375. return frameworks
  376. ############################################################
  377. # End __gather_frameworks
  378. ############################################################
  379. ############################################################
  380. # Makes any necessary changes to the server that should be
  381. # made before running the tests. This involves setting kernal
  382. # settings to allow for more connections, or more file
  383. # descriptiors
  384. #
  385. # http://redmine.lighttpd.net/projects/weighttp/wiki#Troubleshooting
  386. ############################################################
  387. def __setup_server(self):
  388. try:
  389. if os.name == 'nt':
  390. return True
  391. subprocess.check_call(["sudo","bash","-c","cd /sys/devices/system/cpu; ls -d cpu[0-9]*|while read x; do echo performance > $x/cpufreq/scaling_governor; done"])
  392. subprocess.check_call("sudo sysctl -w net.ipv4.tcp_max_syn_backlog=65535".rsplit(" "))
  393. subprocess.check_call("sudo sysctl -w net.core.somaxconn=65535".rsplit(" "))
  394. subprocess.check_call("sudo -s ulimit -n 65535".rsplit(" "))
  395. subprocess.check_call("sudo sysctl net.ipv4.tcp_tw_reuse=1".rsplit(" "))
  396. subprocess.check_call("sudo sysctl net.ipv4.tcp_tw_recycle=1".rsplit(" "))
  397. subprocess.check_call("sudo sysctl -w kernel.shmmax=134217728".rsplit(" "))
  398. subprocess.check_call("sudo sysctl -w kernel.shmall=2097152".rsplit(" "))
  399. except subprocess.CalledProcessError:
  400. return False
  401. ############################################################
  402. # End __setup_server
  403. ############################################################
  404. ############################################################
  405. # Makes any necessary changes to the database machine that
  406. # should be made before running the tests. Is very similar
  407. # to the server setup, but may also include database specific
  408. # changes.
  409. ############################################################
  410. def __setup_database(self):
  411. p = subprocess.Popen(self.database_ssh_string, stdin=subprocess.PIPE, shell=True)
  412. p.communicate("""
  413. sudo sysctl -w net.ipv4.tcp_max_syn_backlog=65535
  414. sudo sysctl -w net.core.somaxconn=65535
  415. sudo -s ulimit -n 65535
  416. sudo sysctl net.ipv4.tcp_tw_reuse=1
  417. sudo sysctl net.ipv4.tcp_tw_recycle=1
  418. sudo sysctl -w kernel.shmmax=2147483648
  419. sudo sysctl -w kernel.shmall=2097152
  420. """)
  421. ############################################################
  422. # End __setup_database
  423. ############################################################
  424. ############################################################
  425. # Makes any necessary changes to the client machine that
  426. # should be made before running the tests. Is very similar
  427. # to the server setup, but may also include client specific
  428. # changes.
  429. ############################################################
  430. def __setup_client(self):
  431. p = subprocess.Popen(self.client_ssh_string, stdin=subprocess.PIPE, shell=True)
  432. p.communicate("""
  433. sudo sysctl -w net.ipv4.tcp_max_syn_backlog=65535
  434. sudo sysctl -w net.core.somaxconn=65535
  435. sudo -s ulimit -n 65535
  436. sudo sysctl net.ipv4.tcp_tw_reuse=1
  437. sudo sysctl net.ipv4.tcp_tw_recycle=1
  438. sudo sysctl -w kernel.shmmax=2147483648
  439. sudo sysctl -w kernel.shmall=2097152
  440. """)
  441. ############################################################
  442. # End __setup_client
  443. ############################################################
  444. ############################################################
  445. # __run_tests
  446. #
  447. # 2013-10-02 ASB Calls each test passed in tests to
  448. # __run_test in a separate process. Each
  449. # test is given a set amount of time and if
  450. # kills the child process (and subsequently
  451. # all of its child processes). Uses
  452. # multiprocessing module.
  453. ############################################################
  454. def __run_tests(self, tests):
  455. logging.debug("Start __run_tests.")
  456. logging.debug("__name__ = %s",__name__)
  457. error_happened = False
  458. if self.os.lower() == 'windows':
  459. logging.debug("Executing __run_tests on Windows")
  460. for test in tests:
  461. with open('current_benchmark.txt', 'w') as benchmark_resume_file:
  462. benchmark_resume_file.write(test.name)
  463. if self.__run_test(test) != 0:
  464. error_happened = True
  465. else:
  466. logging.debug("Executing __run_tests on Linux")
  467. # These features do not work on Windows
  468. for test in tests:
  469. if __name__ == 'benchmark.benchmarker':
  470. print textwrap.dedent("""
  471. -----------------------------------------------------
  472. Running Test: {name} ...
  473. -----------------------------------------------------
  474. """.format(name=test.name))
  475. with open('current_benchmark.txt', 'w') as benchmark_resume_file:
  476. benchmark_resume_file.write(test.name)
  477. test_process = Process(target=self.__run_test, args=(test,))
  478. test_process.start()
  479. test_process.join(self.run_test_timeout_seconds)
  480. self.__load_results() # Load intermediate result from child process
  481. if(test_process.is_alive()):
  482. logging.debug("Child process for {name} is still alive. Terminating.".format(name=test.name))
  483. self.__write_intermediate_results(test.name,"__run_test timeout (="+ str(self.run_test_timeout_seconds) + " seconds)")
  484. test_process.terminate()
  485. if test_process.exitcode != 0:
  486. error_happened = True
  487. os.remove('current_benchmark.txt')
  488. logging.debug("End __run_tests.")
  489. if error_happened:
  490. return 1
  491. return 0
  492. ############################################################
  493. # End __run_tests
  494. ############################################################
  495. ############################################################
  496. # __run_test
  497. # 2013-10-02 ASB Previously __run_tests. This code now only
  498. # processes a single test.
  499. #
  500. # Ensures that the system has all necessary software to run
  501. # the tests. This does not include that software for the individual
  502. # test, but covers software such as curl and weighttp that
  503. # are needed.
  504. ############################################################
  505. def __run_test(self, test):
  506. try:
  507. os.makedirs(os.path.join(self.latest_results_directory, 'logs', "{name}".format(name=test.name)))
  508. except:
  509. pass
  510. with open(os.path.join(self.latest_results_directory, 'logs', "{name}".format(name=test.name), 'out.txt'), 'w') as out, \
  511. open(os.path.join(self.latest_results_directory, 'logs', "{name}".format(name=test.name), 'err.txt'), 'w') as err:
  512. if hasattr(test, 'skip'):
  513. if test.skip.lower() == "true":
  514. out.write("Test {name} benchmark_config specifies to skip this test. Skipping.\n".format(name=test.name))
  515. return
  516. if test.os.lower() != self.os.lower() or test.database_os.lower() != self.database_os.lower():
  517. # the operating system requirements of this test for the
  518. # application server or the database server don't match
  519. # our current environment
  520. out.write("OS or Database OS specified in benchmark_config does not match the current environment. Skipping.\n")
  521. return 0
  522. # If the test is in the excludes list, we skip it
  523. if self.exclude != None and test.name in self.exclude:
  524. out.write("Test {name} has been added to the excludes list. Skipping.\n".format(name=test.name))
  525. return 0
  526. # If the test does not contain an implementation of the current test-type, skip it
  527. if self.type != 'all' and not test.contains_type(self.type):
  528. out.write("Test type {type} does not contain an implementation of the current test-type. Skipping.\n".format(type=self.type))
  529. return 0
  530. out.write("test.os.lower() = {os} test.database_os.lower() = {dbos}\n".format(os=test.os.lower(),dbos=test.database_os.lower()))
  531. out.write("self.results['frameworks'] != None: {val}\n".format(val=str(self.results['frameworks'] != None)))
  532. out.write("test.name: {name}\n".format(name=str(test.name)))
  533. out.write("self.results['completed']: {completed}\n".format(completed=str(self.results['completed'])))
  534. if self.results['frameworks'] != None and test.name in self.results['completed']:
  535. out.write('Framework {name} found in latest saved data. Skipping.\n'.format(name=str(test.name)))
  536. return
  537. out.flush()
  538. out.write( textwrap.dedent("""
  539. =====================================================
  540. Beginning {name}
  541. -----------------------------------------------------
  542. """.format(name=test.name)) )
  543. out.flush()
  544. ##########################
  545. # Start this test
  546. ##########################
  547. out.write( textwrap.dedent("""
  548. -----------------------------------------------------
  549. Starting {name}
  550. -----------------------------------------------------
  551. """.format(name=test.name)) )
  552. out.flush()
  553. try:
  554. if test.requires_database():
  555. p = subprocess.Popen(self.database_ssh_string, stdin=subprocess.PIPE, stdout=out, stderr=err, shell=True)
  556. p.communicate("""
  557. sudo restart mysql
  558. sudo restart mongodb
  559. sudo service redis-server restart
  560. sudo /etc/init.d/postgresql restart
  561. """)
  562. time.sleep(10)
  563. if self.__is_port_bound(test.port):
  564. self.__write_intermediate_results(test.name, "port " + str(test.port) + " is not available before start")
  565. err.write( textwrap.dedent("""
  566. ---------------------------------------------------------
  567. Error: Port {port} is not available before start {name}
  568. ---------------------------------------------------------
  569. """.format(name=test.name, port=str(test.port))) )
  570. err.flush()
  571. return 1
  572. result = test.start(out, err)
  573. if result != 0:
  574. test.stop(out, err)
  575. time.sleep(5)
  576. err.write( "ERROR: Problem starting {name}\n".format(name=test.name) )
  577. err.write( textwrap.dedent("""
  578. -----------------------------------------------------
  579. Stopped {name}
  580. -----------------------------------------------------
  581. """.format(name=test.name)) )
  582. err.flush()
  583. self.__write_intermediate_results(test.name,"<setup.py>#start() returned non-zero")
  584. return 1
  585. time.sleep(self.sleep)
  586. ##########################
  587. # Verify URLs
  588. ##########################
  589. passed_verify = test.verify_urls(out, err)
  590. out.flush()
  591. err.flush()
  592. ##########################
  593. # Benchmark this test
  594. ##########################
  595. if self.mode == "benchmark":
  596. out.write( textwrap.dedent("""
  597. -----------------------------------------------------
  598. Benchmarking {name} ...
  599. -----------------------------------------------------
  600. """.format(name=test.name)) )
  601. out.flush()
  602. test.benchmark(out, err)
  603. out.flush()
  604. err.flush()
  605. ##########################
  606. # Stop this test
  607. ##########################
  608. out.write( textwrap.dedent("""
  609. -----------------------------------------------------
  610. Stopping {name}
  611. -----------------------------------------------------
  612. """.format(name=test.name)) )
  613. out.flush()
  614. test.stop(out, err)
  615. out.flush()
  616. err.flush()
  617. time.sleep(5)
  618. if self.__is_port_bound(test.port):
  619. self.__write_intermediate_results(test.name, "port " + str(test.port) + " was not released by stop")
  620. err.write( textwrap.dedent("""
  621. -----------------------------------------------------
  622. Error: Port {port} was not released by stop {name}
  623. -----------------------------------------------------
  624. """.format(name=test.name, port=str(test.port))) )
  625. err.flush()
  626. return
  627. out.write( textwrap.dedent("""
  628. -----------------------------------------------------
  629. Stopped {name}
  630. -----------------------------------------------------
  631. """.format(name=test.name)) )
  632. out.flush()
  633. time.sleep(5)
  634. ##########################################################
  635. # Save results thus far into toolset/benchmark/latest.json
  636. ##########################################################
  637. out.write( textwrap.dedent("""
  638. ----------------------------------------------------
  639. Saving results through {name}
  640. ----------------------------------------------------
  641. """.format(name=test.name)) )
  642. out.flush()
  643. self.__write_intermediate_results(test.name,time.strftime("%Y%m%d%H%M%S", time.localtime()))
  644. except (OSError, IOError, subprocess.CalledProcessError) as e:
  645. self.__write_intermediate_results(test.name,"<setup.py> raised an exception")
  646. err.write( textwrap.dedent("""
  647. -----------------------------------------------------
  648. Subprocess Error {name}
  649. -----------------------------------------------------
  650. {err}
  651. {trace}
  652. """.format(name=test.name, err=e, trace=sys.exc_info()[:2])) )
  653. err.flush()
  654. try:
  655. test.stop(out, err)
  656. except (subprocess.CalledProcessError) as e:
  657. self.__write_intermediate_results(test.name,"<setup.py>#stop() raised an error")
  658. err.write( textwrap.dedent("""
  659. -----------------------------------------------------
  660. Subprocess Error: Test .stop() raised exception {name}
  661. -----------------------------------------------------
  662. {err}
  663. {trace}
  664. """.format(name=test.name, err=e, trace=sys.exc_info()[:2])) )
  665. err.flush()
  666. out.close()
  667. err.close()
  668. return 1
  669. except (KeyboardInterrupt, SystemExit) as e:
  670. test.stop(out)
  671. out.write( """
  672. -----------------------------------------------------
  673. Cleaning up....
  674. -----------------------------------------------------
  675. """)
  676. out.flush()
  677. self.__finish()
  678. sys.exit()
  679. out.close()
  680. err.close()
  681. return 0
  682. ############################################################
  683. # End __run_tests
  684. ############################################################
  685. ############################################################
  686. # __is_port_bound
  687. # Check if the requested port is available. If it
  688. # isn't available, then a previous test probably didn't
  689. # shutdown properly.
  690. ############################################################
  691. def __is_port_bound(self, port):
  692. s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
  693. try:
  694. # Try to bind to all IP addresses, this port
  695. s.bind(("", port))
  696. # If we get here, we were able to bind successfully,
  697. # which means the port is free.
  698. except:
  699. # If we get an exception, it might be because the port is still bound
  700. # which would be bad, or maybe it is a privileged port (<1024) and we
  701. # are not running as root, or maybe the server is gone, but sockets are
  702. # still in TIME_WAIT (SO_REUSEADDR). To determine which scenario, try to
  703. # connect.
  704. try:
  705. s.connect(("127.0.0.1", port))
  706. # If we get here, we were able to connect to something, which means
  707. # that the port is still bound.
  708. return True
  709. except:
  710. # An exception means that we couldn't connect, so a server probably
  711. # isn't still running on the port.
  712. pass
  713. finally:
  714. s.close()
  715. return False
  716. ############################################################
  717. # End __is_port_bound
  718. ############################################################
  719. ############################################################
  720. # __parse_results
  721. # Ensures that the system has all necessary software to run
  722. # the tests. This does not include that software for the individual
  723. # test, but covers software such as curl and weighttp that
  724. # are needed.
  725. ############################################################
  726. def __parse_results(self, tests):
  727. # Run the method to get the commmit count of each framework.
  728. self.__count_commits()
  729. # Call the method which counts the sloc for each framework
  730. self.__count_sloc()
  731. # Time to create parsed files
  732. # Aggregate JSON file
  733. with open(os.path.join(self.full_results_directory(), "results.json"), "w") as f:
  734. f.write(json.dumps(self.results))
  735. ############################################################
  736. # End __parse_results
  737. ############################################################
  738. #############################################################
  739. # __count_sloc
  740. # This is assumed to be run from the benchmark root directory
  741. #############################################################
  742. def __count_sloc(self):
  743. all_frameworks = self.__gather_frameworks()
  744. jsonResult = {}
  745. for framework in all_frameworks:
  746. try:
  747. command = "cloc --list-file=" + framework['directory'] + "/source_code --yaml"
  748. lineCount = subprocess.check_output(command, shell=True)
  749. # Find the last instance of the word 'code' in the yaml output. This should
  750. # be the line count for the sum of all listed files or just the line count
  751. # for the last file in the case where there's only one file listed.
  752. lineCount = lineCount[lineCount.rfind('code'):len(lineCount)]
  753. lineCount = lineCount.strip('code: ')
  754. lineCount = lineCount[0:lineCount.rfind('comment')]
  755. jsonResult[framework['name']] = int(lineCount)
  756. except:
  757. continue
  758. self.results['rawData']['slocCounts'] = jsonResult
  759. ############################################################
  760. # End __count_sloc
  761. ############################################################
  762. ############################################################
  763. # __count_commits
  764. ############################################################
  765. def __count_commits(self):
  766. all_frameworks = self.__gather_frameworks()
  767. jsonResult = {}
  768. for framework in all_frameworks:
  769. try:
  770. command = "git rev-list HEAD -- " + framework + " | sort -u | wc -l"
  771. commitCount = subprocess.check_output(command, shell=True)
  772. jsonResult[framework] = int(commitCount)
  773. except:
  774. continue
  775. self.results['rawData']['commitCounts'] = jsonResult
  776. self.commits = jsonResult
  777. ############################################################
  778. # End __count_commits
  779. ############################################################
  780. ############################################################
  781. # __write_intermediate_results
  782. ############################################################
  783. def __write_intermediate_results(self,test_name,status_message):
  784. try:
  785. self.results["completed"][test_name] = status_message
  786. with open(os.path.join(self.latest_results_directory, 'results.json'), 'w') as f:
  787. f.write(json.dumps(self.results))
  788. except (IOError):
  789. logging.error("Error writing results.json")
  790. ############################################################
  791. # End __write_intermediate_results
  792. ############################################################
  793. def __load_results(self):
  794. try:
  795. with open(os.path.join(self.latest_results_directory, 'results.json')) as f:
  796. self.results = json.load(f)
  797. except (ValueError, IOError):
  798. pass
  799. ############################################################
  800. # __finish
  801. ############################################################
  802. def __finish(self):
  803. print "Time to complete: " + str(int(time.time() - self.start_time)) + " seconds"
  804. print "Results are saved in " + os.path.join(self.result_directory, self.timestamp)
  805. ############################################################
  806. # End __finish
  807. ############################################################
  808. ##########################################################################################
  809. # Constructor
  810. ##########################################################################################
  811. ############################################################
  812. # Initialize the benchmarker. The args are the arguments
  813. # parsed via argparser.
  814. ############################################################
  815. def __init__(self, args):
  816. self.__dict__.update(args)
  817. self.start_time = time.time()
  818. self.run_test_timeout_seconds = 3600
  819. # setup logging
  820. logging.basicConfig(stream=sys.stderr, level=logging.DEBUG)
  821. # setup some additional variables
  822. if self.database_user == None: self.database_user = self.client_user
  823. if self.database_host == None: self.database_host = self.client_host
  824. if self.database_identity_file == None: self.database_identity_file = self.client_identity_file
  825. # Remember root directory
  826. self.fwroot = setup_util.get_fwroot()
  827. # setup results and latest_results directories
  828. self.result_directory = os.path.join("results", self.name)
  829. self.latest_results_directory = self.latest_results_directory()
  830. if self.parse != None:
  831. self.timestamp = self.parse
  832. else:
  833. self.timestamp = time.strftime("%Y%m%d%H%M%S", time.localtime())
  834. # Setup the concurrency levels array. This array goes from
  835. # starting_concurrency to max concurrency, doubling each time
  836. self.concurrency_levels = []
  837. concurrency = self.starting_concurrency
  838. while concurrency <= self.max_concurrency:
  839. self.concurrency_levels.append(concurrency)
  840. concurrency = concurrency * 2
  841. # Setup query interval array
  842. # starts at 1, and goes up to max_queries, using the query_interval
  843. self.query_intervals = []
  844. queries = 1
  845. while queries <= self.max_queries:
  846. self.query_intervals.append(queries)
  847. if queries == 1:
  848. queries = 0
  849. queries = queries + self.query_interval
  850. # Load the latest data
  851. #self.latest = None
  852. #try:
  853. # with open('toolset/benchmark/latest.json', 'r') as f:
  854. # # Load json file into config object
  855. # self.latest = json.load(f)
  856. # logging.info("toolset/benchmark/latest.json loaded to self.latest")
  857. # logging.debug("contents of latest.json: " + str(json.dumps(self.latest)))
  858. #except IOError:
  859. # logging.warn("IOError on attempting to read toolset/benchmark/latest.json")
  860. #
  861. #self.results = None
  862. #try:
  863. # if self.latest != None and self.name in self.latest.keys():
  864. # with open(os.path.join(self.result_directory, str(self.latest[self.name]), 'results.json'), 'r') as f:
  865. # # Load json file into config object
  866. # self.results = json.load(f)
  867. #except IOError:
  868. # pass
  869. self.results = None
  870. try:
  871. with open(os.path.join(self.latest_results_directory, 'results.json'), 'r') as f:
  872. #Load json file into results object
  873. self.results = json.load(f)
  874. except IOError:
  875. logging.warn("results.json for test %s not found.",self.name)
  876. if self.results == None:
  877. self.results = dict()
  878. self.results['name'] = self.name
  879. self.results['concurrencyLevels'] = self.concurrency_levels
  880. self.results['queryIntervals'] = self.query_intervals
  881. self.results['frameworks'] = [t.name for t in self.__gather_tests]
  882. self.results['duration'] = self.duration
  883. self.results['rawData'] = dict()
  884. self.results['rawData']['json'] = dict()
  885. self.results['rawData']['db'] = dict()
  886. self.results['rawData']['query'] = dict()
  887. self.results['rawData']['fortune'] = dict()
  888. self.results['rawData']['update'] = dict()
  889. self.results['rawData']['plaintext'] = dict()
  890. self.results['completed'] = dict()
  891. self.results['succeeded'] = dict()
  892. self.results['succeeded']['json'] = []
  893. self.results['succeeded']['db'] = []
  894. self.results['succeeded']['query'] = []
  895. self.results['succeeded']['fortune'] = []
  896. self.results['succeeded']['update'] = []
  897. self.results['succeeded']['plaintext'] = []
  898. self.results['failed'] = dict()
  899. self.results['failed']['json'] = []
  900. self.results['failed']['db'] = []
  901. self.results['failed']['query'] = []
  902. self.results['failed']['fortune'] = []
  903. self.results['failed']['update'] = []
  904. self.results['failed']['plaintext'] = []
  905. self.results['warning'] = dict()
  906. self.results['warning']['json'] = []
  907. self.results['warning']['db'] = []
  908. self.results['warning']['query'] = []
  909. self.results['warning']['fortune'] = []
  910. self.results['warning']['update'] = []
  911. self.results['warning']['plaintext'] = []
  912. else:
  913. #for x in self.__gather_tests():
  914. # if x.name not in self.results['frameworks']:
  915. # self.results['frameworks'] = self.results['frameworks'] + [x.name]
  916. # Always overwrite framework list
  917. self.results['frameworks'] = [t.name for t in self.__gather_tests]
  918. # Setup the ssh command string
  919. self.database_ssh_string = "ssh -T -o StrictHostKeyChecking=no " + self.database_user + "@" + self.database_host
  920. self.client_ssh_string = "ssh -T -o StrictHostKeyChecking=no " + self.client_user + "@" + self.client_host
  921. if self.database_identity_file != None:
  922. self.database_ssh_string = self.database_ssh_string + " -i " + self.database_identity_file
  923. if self.client_identity_file != None:
  924. self.client_ssh_string = self.client_ssh_string + " -i " + self.client_identity_file
  925. if self.install is not None:
  926. install = Installer(self, self.install_strategy)
  927. install.install_software()
  928. ############################################################
  929. # End __init__
  930. ############################################################