benchmarker.py 39 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003
  1. from setup.linux.installer import Installer
  2. from setup.linux import setup_util
  3. from benchmark import framework_test
  4. from utils import WrapLogger
  5. from utils import Header
  6. import os
  7. import json
  8. import subprocess
  9. import time
  10. import pprint
  11. import csv
  12. import sys
  13. import logging
  14. log = logging.getLogger('benchmarker')
  15. import socket
  16. import glob
  17. import signal
  18. from multiprocessing import Process
  19. from datetime import datetime
  20. class Benchmarker:
  21. ##########################################################################################
  22. # Public methods
  23. ##########################################################################################
  24. ############################################################
  25. # Prints all the available tests
  26. ############################################################
  27. def run_list_tests(self):
  28. all_tests = self.__gather_tests
  29. for test in all_tests:
  30. print test.name
  31. self.__finish()
  32. ############################################################
  33. # End run_list_tests
  34. ############################################################
  35. ############################################################
  36. # Prints the metadata for all the available tests
  37. ############################################################
  38. def run_list_test_metadata(self):
  39. all_tests = self.__gather_tests
  40. all_tests_json = json.dumps(map(lambda test: {
  41. "name": test.name,
  42. "approach": test.approach,
  43. "classification": test.classification,
  44. "database": test.database,
  45. "framework": test.framework,
  46. "language": test.language,
  47. "orm": test.orm,
  48. "platform": test.platform,
  49. "webserver": test.webserver,
  50. "os": test.os,
  51. "database_os": test.database_os,
  52. "display_name": test.display_name,
  53. "notes": test.notes,
  54. "versus": test.versus
  55. }, all_tests))
  56. with open(os.path.join(self.full_results_directory(), "test_metadata.json"), "w") as f:
  57. f.write(all_tests_json)
  58. self.__finish()
  59. ############################################################
  60. # End run_list_test_metadata
  61. ############################################################
  62. ############################################################
  63. # parse_timestamp
  64. # Re-parses the raw data for a given timestamp
  65. ############################################################
  66. def parse_timestamp(self):
  67. log.info("parse_timestamp")
  68. all_tests = self.__gather_tests
  69. for test in all_tests:
  70. test.parse_all()
  71. self.__parse_results(all_tests)
  72. self.__finish()
  73. ############################################################
  74. # End parse_timestamp
  75. ############################################################
  76. ############################################################
  77. # Run the tests:
  78. # This process involves setting up the client/server machines
  79. # with any necessary change. Then going through each test,
  80. # running their setup script, verifying the URLs, and
  81. # running benchmarks against them.
  82. ############################################################
  83. def run(self):
  84. log.info("run")
  85. ##########################
  86. # Get a list of all known
  87. # tests that we can run.
  88. ##########################
  89. all_tests = self.__gather_tests
  90. ##########################
  91. # Setup client/server
  92. ##########################
  93. print Header("Preparing Server, Database, and Client ...", top='=', bottom='=')
  94. self.__setup_server()
  95. self.__setup_database()
  96. self.__setup_client()
  97. ## Check if wrk (and wrk-pipeline) is installed and executable, if not, raise an exception
  98. #if not (os.access("/usr/local/bin/wrk", os.X_OK) and os.access("/usr/local/bin/wrk-pipeline", os.X_OK)):
  99. # raise Exception("wrk and/or wrk-pipeline are not properly installed. Not running tests.")
  100. ##########################
  101. # Run tests
  102. ##########################
  103. print Header("Running Tests...", top='=', bottom='=')
  104. self.__run_tests(all_tests)
  105. ##########################
  106. # Parse results
  107. ##########################
  108. if self.mode == "benchmark":
  109. print Header("Parsing Results ...", top='=', bottom='=')
  110. self.__parse_results(all_tests)
  111. self.__finish()
  112. ############################################################
  113. # End run
  114. ############################################################
  115. ############################################################
  116. # database_sftp_string(batch_file)
  117. # generates a fully qualified URL for sftp to database
  118. ############################################################
  119. def database_sftp_string(self, batch_file):
  120. sftp_string = "sftp -oStrictHostKeyChecking=no "
  121. if batch_file != None: sftp_string += " -b " + batch_file + " "
  122. if self.database_identity_file != None:
  123. sftp_string += " -i " + self.database_identity_file + " "
  124. return sftp_string + self.database_user + "@" + self.database_host
  125. ############################################################
  126. # End database_sftp_string
  127. ############################################################
  128. ############################################################
  129. # client_sftp_string(batch_file)
  130. # generates a fully qualified URL for sftp to client
  131. ############################################################
  132. def client_sftp_string(self, batch_file):
  133. sftp_string = "sftp -oStrictHostKeyChecking=no "
  134. if batch_file != None: sftp_string += " -b " + batch_file + " "
  135. if self.client_identity_file != None:
  136. sftp_string += " -i " + self.client_identity_file + " "
  137. return sftp_string + self.client_user + "@" + self.client_host
  138. ############################################################
  139. # End client_sftp_string
  140. ############################################################
  141. ############################################################
  142. # generate_url(url, port)
  143. # generates a fully qualified URL for accessing a test url
  144. ############################################################
  145. def generate_url(self, url, port):
  146. return self.server_host + ":" + str(port) + url
  147. ############################################################
  148. # End generate_url
  149. ############################################################
  150. ############################################################
  151. # get_output_file(test_name, test_type)
  152. # returns the output file name for this test_name and
  153. # test_type timestamp/test_type/test_name/raw
  154. ############################################################
  155. def get_output_file(self, test_name, test_type):
  156. log.debug("get_output_file")
  157. return os.path.join(self.result_directory, self.timestamp, test_type, test_name, "raw")
  158. ############################################################
  159. # End get_output_file
  160. ############################################################
  161. ############################################################
  162. # output_file(test_name, test_type)
  163. # returns the output file for this test_name and test_type
  164. # timestamp/test_type/test_name/raw
  165. ############################################################
  166. def output_file(self, test_name, test_type):
  167. log.debug("output_file")
  168. path = self.get_output_file(test_name, test_type)
  169. try:
  170. os.makedirs(os.path.dirname(path))
  171. except OSError:
  172. pass
  173. return path
  174. ############################################################
  175. # End output_file
  176. ############################################################
  177. ############################################################
  178. # get_warning_file(test_name, test_type)
  179. # returns the output file name for this test_name and
  180. # test_type timestamp/test_type/test_name/raw
  181. ############################################################
  182. def get_warning_file(self, test_name, test_type):
  183. return os.path.join(self.result_directory, self.timestamp, test_type, test_name, "warn")
  184. ############################################################
  185. # End get_warning_file
  186. ############################################################
  187. ############################################################
  188. # warning_file(test_name, test_type)
  189. # returns the warning file for this test_name and test_type
  190. # timestamp/test_type/test_name/raw
  191. ############################################################
  192. def warning_file(self, test_name, test_type):
  193. path = self.get_warning_file(test_name, test_type)
  194. try:
  195. os.makedirs(os.path.dirname(path))
  196. except OSError:
  197. pass
  198. return path
  199. ############################################################
  200. # End warning_file
  201. ############################################################
  202. ############################################################
  203. # full_results_directory
  204. ############################################################
  205. def full_results_directory(self):
  206. log.debug("full_results_directory")
  207. path = os.path.join(self.result_directory, self.timestamp)
  208. try:
  209. os.makedirs(path)
  210. except OSError:
  211. pass
  212. return path
  213. ############################################################
  214. # End full_results_directory
  215. ############################################################
  216. ############################################################
  217. # Latest intermediate results dirctory
  218. ############################################################
  219. def latest_results_directory(self):
  220. log.debug("latest_results_directory")
  221. path = os.path.join(self.result_directory,"latest")
  222. try:
  223. os.makedirs(path)
  224. except OSError:
  225. pass
  226. return path
  227. ############################################################
  228. # report_results
  229. ############################################################
  230. def report_results(self, framework, test, results):
  231. log.debug("report_results: %s - %s" % (framework.name, test))
  232. log.debug("report_results: %s" % results)
  233. if test not in self.results['rawData'].keys():
  234. self.results['rawData'][test] = dict()
  235. # If results has a size from the parse, then it succeeded.
  236. if results:
  237. self.results['rawData'][test][framework.name] = results
  238. # This may already be set for single-tests
  239. if framework.name not in self.results['succeeded'][test]:
  240. self.results['succeeded'][test].append(framework.name)
  241. # Add this type
  242. if (os.path.exists(self.get_warning_file(framework.name, test)) and
  243. framework.name not in self.results['warning'][test]):
  244. self.results['warning'][test].append(framework.name)
  245. else:
  246. # This may already be set for single-tests
  247. if framework.name not in self.results['failed'][test]:
  248. self.results['failed'][test].append(framework.name)
  249. ############################################################
  250. # End report_results
  251. ############################################################
  252. ##########################################################################################
  253. # Private methods
  254. ##########################################################################################
  255. ############################################################
  256. # Gathers all the tests
  257. ############################################################
  258. @property
  259. def __gather_tests(self):
  260. log.info("__gather_tests")
  261. tests = []
  262. # Assume we are running from FrameworkBenchmarks
  263. config_files = glob.glob('*/benchmark_config')
  264. for config_file_name in config_files:
  265. # Look for the benchmark_config file, this will set up our tests.
  266. # Its format looks like this:
  267. #
  268. # {
  269. # "framework": "nodejs",
  270. # "tests": [{
  271. # "default": {
  272. # "setup_file": "setup",
  273. # "json_url": "/json"
  274. # },
  275. # "mysql": {
  276. # "setup_file": "setup",
  277. # "db_url": "/mysql",
  278. # "query_url": "/mysql?queries="
  279. # },
  280. # ...
  281. # }]
  282. # }
  283. config = None
  284. with open(config_file_name, 'r') as config_file:
  285. # Load json file into config object
  286. try:
  287. config = json.load(config_file)
  288. except:
  289. log.error("Error loading '%s'" % config_file_name)
  290. raise
  291. if config is None:
  292. continue
  293. test = framework_test.parse_config(config, os.path.dirname(config_file_name), self)
  294. # If the user specified which tests to run, then
  295. # we can skip over tests that are not in that list
  296. if self.test == None:
  297. tests = tests + test
  298. else:
  299. for atest in test:
  300. if atest.name in self.test:
  301. tests.append(atest)
  302. tests.sort(key=lambda x: x.name)
  303. return tests
  304. ############################################################
  305. # End __gather_tests
  306. ############################################################
  307. ############################################################
  308. # Gathers all the frameworks
  309. ############################################################
  310. def __gather_frameworks(self):
  311. log.info("__gather_frameworks")
  312. frameworks = []
  313. # Loop through each directory (we assume we're being run from the benchmarking root)
  314. for dirname, dirnames, filenames in os.walk('.'):
  315. # Look for the benchmark_config file, this will contain our framework name
  316. # It's format looks like this:
  317. #
  318. # {
  319. # "framework": "nodejs",
  320. # "tests": [{
  321. # "default": {
  322. # "setup_file": "setup",
  323. # "json_url": "/json"
  324. # },
  325. # "mysql": {
  326. # "setup_file": "setup",
  327. # "db_url": "/mysql",
  328. # "query_url": "/mysql?queries="
  329. # },
  330. # ...
  331. # }]
  332. # }
  333. if 'benchmark_config' in filenames:
  334. config = None
  335. with open(os.path.join(dirname, 'benchmark_config'), 'r') as config_file:
  336. # Load json file into config object
  337. config = json.load(config_file)
  338. if config == None:
  339. continue
  340. frameworks.append(str(config['framework']))
  341. return frameworks
  342. ############################################################
  343. # End __gather_frameworks
  344. ############################################################
  345. ############################################################
  346. # Makes any necessary changes to the server that should be
  347. # made before running the tests. This involves setting kernal
  348. # settings to allow for more connections, or more file
  349. # descriptiors
  350. #
  351. # http://redmine.lighttpd.net/projects/weighttp/wiki#Troubleshooting
  352. ############################################################
  353. def __setup_server(self):
  354. log.info("__setup_server")
  355. try:
  356. if os.name == 'nt':
  357. return True
  358. (out, err) = WrapLogger(log, logging.DEBUG), WrapLogger(log, logging.ERROR)
  359. def check(command):
  360. subprocess.check_call(command, shell=True, stdout=out, stderr=err)
  361. check(["sudo","bash","-c","cd /sys/devices/system/cpu; ls -d cpu[0-9]*|while read x; do echo performance > $x/cpufreq/scaling_governor; done"])
  362. check("sudo sysctl -w net.ipv4.tcp_max_syn_backlog=65535".rsplit(" "))
  363. check("sudo sysctl -w net.core.somaxconn=65535".rsplit(" "))
  364. check("sudo -s ulimit -n 65535".rsplit(" "))
  365. check("sudo sysctl net.ipv4.tcp_tw_reuse=1".rsplit(" "))
  366. check("sudo sysctl net.ipv4.tcp_tw_recycle=1".rsplit(" "))
  367. check("sudo sysctl -w kernel.shmmax=134217728".rsplit(" "))
  368. check("sudo sysctl -w kernel.shmall=2097152".rsplit(" "))
  369. except subprocess.CalledProcessError:
  370. return False
  371. ############################################################
  372. # End __setup_server
  373. ############################################################
  374. ############################################################
  375. # Makes any necessary changes to the database machine that
  376. # should be made before running the tests. Is very similar
  377. # to the server setup, but may also include database specific
  378. # changes.
  379. ############################################################
  380. def __setup_database(self):
  381. log.info("__setup_database")
  382. (out, err) = WrapLogger(log, logging.DEBUG), WrapLogger(log, logging.ERROR)
  383. p = subprocess.Popen(self.database_ssh_string, stdin=subprocess.PIPE, shell=True, stdout=out, stderr=err)
  384. p.communicate("""
  385. sudo sysctl -w net.ipv4.tcp_max_syn_backlog=65535
  386. sudo sysctl -w net.core.somaxconn=65535
  387. sudo -s ulimit -n 65535
  388. sudo sysctl net.ipv4.tcp_tw_reuse=1
  389. sudo sysctl net.ipv4.tcp_tw_recycle=1
  390. sudo sysctl -w kernel.shmmax=2147483648
  391. sudo sysctl -w kernel.shmall=2097152
  392. """)
  393. ############################################################
  394. # End __setup_database
  395. ############################################################
  396. ############################################################
  397. # Makes any necessary changes to the client machine that
  398. # should be made before running the tests. Is very similar
  399. # to the server setup, but may also include client specific
  400. # changes.
  401. ############################################################
  402. def __setup_client(self):
  403. log.info("__setup_client")
  404. (out, err) = WrapLogger(log, logging.DEBUG), WrapLogger(log, logging.ERROR)
  405. p = subprocess.Popen(self.client_ssh_string, stdin=subprocess.PIPE, shell=True, stdout=out, stderr=err)
  406. p.communicate("""
  407. sudo sysctl -w net.ipv4.tcp_max_syn_backlog=65535
  408. sudo sysctl -w net.core.somaxconn=65535
  409. sudo -s ulimit -n 65535
  410. sudo sysctl net.ipv4.tcp_tw_reuse=1
  411. sudo sysctl net.ipv4.tcp_tw_recycle=1
  412. sudo sysctl -w kernel.shmmax=2147483648
  413. sudo sysctl -w kernel.shmall=2097152
  414. """)
  415. ############################################################
  416. # End __setup_client
  417. ############################################################
  418. ############################################################
  419. # __run_tests
  420. #
  421. # 2013-10-02 ASB Calls each test passed in tests to
  422. # __run_test in a separate process. Each
  423. # test is given a set amount of time and if
  424. # kills the child process (and subsequently
  425. # all of its child processes). Uses
  426. # multiprocessing module.
  427. ############################################################
  428. def __run_tests(self, tests):
  429. log.info("__run_tests")
  430. log.debug("__run_tests with __name__ = %s",__name__)
  431. error_happened = False
  432. if self.os.lower() == 'windows':
  433. log.info("Executing __run_tests on Windows")
  434. for test in tests:
  435. if self.__run_test(test) != 0:
  436. error_happened = True
  437. else:
  438. log.info("Executing __run_tests on Linux")
  439. for test in tests:
  440. if __name__ == 'benchmark.benchmarker':
  441. print Header("Running Test: %s" % test.name)
  442. test_process = Process(target=self.__run_test, name="Test Runner (%s)" % test.name, args=(test,))
  443. test_process.start()
  444. try:
  445. test_process.join(self.run_test_timeout_seconds)
  446. except KeyboardInterrupt:
  447. # Politely SIGTERM everyong in the session
  448. subprocess.call("sudo pkill -s %s" % test_process.pid, shell=True)
  449. time.sleep(5)
  450. self.__load_results() # Load intermediate result from child process
  451. if(test_process.is_alive()):
  452. log.warn("Child process for {name} is still alive. Terminating.".format(name=test.name))
  453. self.__write_intermediate_results(test.name,"__run_test timeout (="+ str(self.run_test_timeout_seconds) + " seconds)")
  454. # Politely SIGTERM everyone in the original process group
  455. try:
  456. print "SIGTERM TO PID %s" % test_process.pid
  457. os.killpg(test_process.pid, signal.SIGTERM)
  458. except OSError:
  459. print "Failed"
  460. pass
  461. # Politely SIGTERM everyong in the session (this takes care
  462. # of most servers that start up as long-running daemons)
  463. subprocess.call("sudo pkill -s %s" % test_process.pid, shell=True)
  464. # Some time to close up shop
  465. time.sleep(3)
  466. # SIGKILL any stragglers
  467. try:
  468. print "SIGKILL TO PID %s" % test_process.pid
  469. os.killpg(test_process.pid, signal.SIGKILL)
  470. except OSError:
  471. print "Failed"
  472. pass
  473. subprocess.call("sudo pkill -SIGKILL -s %s" % test_process.pid, shell=True)
  474. log.handlers = [] # Clean up handlers left by __run_test
  475. if test_process.exitcode != 0:
  476. error_happened = True
  477. log.info("End __run_tests")
  478. if error_happened:
  479. return 1
  480. return 0
  481. ############################################################
  482. # End __run_tests
  483. ############################################################
  484. ############################################################
  485. # __run_test
  486. #
  487. # Checks test prerequisites, makes calls into framework_test
  488. # to orchestrate benchmarking this test, and stores results
  489. ############################################################
  490. def __run_test(self, test):
  491. log.info("__run_test")
  492. if self.os.lower() != 'windows':
  493. # Set our process ID to be the current session ID
  494. os.setsid()
  495. self.sigterm_has_fired = False
  496. def signal_term_handler(signal, frame):
  497. if self.sigterm_has_fired:
  498. print "SIGTERM fired before, ignoring"
  499. return 0
  500. else:
  501. print "SIGTERM fired"
  502. self.sigterm_has_fired=True
  503. log.info(Header("Cleaning up..."))
  504. test.stop(log)
  505. return 0
  506. signal.signal(signal.SIGTERM, signal_term_handler)
  507. logfile = os.path.join(self.latest_results_directory, 'logs', test.name, 'output.log')
  508. try:
  509. os.makedirs(os.path.dirname(logfile))
  510. except:
  511. pass
  512. # Create handler for file logging
  513. logHandler = logging.FileHandler(logfile, mode='w')
  514. f = logging.Formatter("%(asctime)s: %(name)-12s: %(levelname)-8s %(message)s")
  515. logHandler.setFormatter(f)
  516. logHandler.setLevel(logging.DEBUG)
  517. log.addHandler(logHandler)
  518. # Ensure messages of level info will be sent to stdout
  519. # This is a bit tricky, because we pass our logger to
  520. # framework_test, so this means anything logged to it
  521. # at INFO or greater is always shown in stdout. This is
  522. # good for some things (e.g. validate) and bad for others
  523. # (e.g. "in function foo"). Ensure you use logger vs log
  524. # in framework test properly
  525. rootStreamHandler = logging.getLogger().handlers[0]
  526. if rootStreamHandler.level > logging.INFO:
  527. #class FileFilter(logging.Filter):
  528. # def filter(self, record):
  529. # if record.filename == "benchmarker.py":
  530. ## return True
  531. # return False
  532. streamHandler = logging.StreamHandler()
  533. streamHandler.setFormatter(logging.Formatter("%(message)s"))
  534. streamHandler.setLevel(logging.INFO)
  535. # streamHandler.addFilter(FileFilter())
  536. log.addHandler(streamHandler)
  537. if hasattr(test, 'skip') and test.skip.lower() == "true":
  538. log.info("Skipping %s: benchmark_config specifies to skip this test", test.name)
  539. return
  540. if test.os.lower() != self.os.lower() or test.database_os.lower() != self.database_os.lower():
  541. log.info("Skipping %s: OS or Database OS specified in benchmark_config does not match the current environment", test.name)
  542. return
  543. if self.exclude != None and test.name in self.exclude:
  544. log.info("Excluding %s: Added to the excludes list", test.name)
  545. return
  546. if self.type != 'all' and not test.contains_type(self.type):
  547. log.info("Skipping %s: Does not contain test for %s", test.name, self.type)
  548. return
  549. log.debug("test.os.lower() = %s test.database_os.lower() = %s", test.os.lower(), test.database_os.lower())
  550. log.debug("self.results['frameworks'] != None: %s", self.results['frameworks'] != None)
  551. log.debug("test.name: %s", test.name)
  552. log.debug("self.results['completed']: %s", self.results['completed'])
  553. #if self.results['frameworks'] != None and test.name in self.results['completed']:
  554. # log.debug("Skipping %s: Found in latest saved data", test.name)
  555. # return
  556. log.info(Header("Beginning %s" % test.name, top='='))
  557. # Start this test
  558. log.info(Header("Starting %s" % test.name))
  559. try:
  560. if test.requires_database():
  561. (out, err) = WrapLogger(log, logging.DEBUG), WrapLogger(log, logging.ERROR)
  562. p = subprocess.Popen(self.database_ssh_string, stdin=subprocess.PIPE, shell=True, stdout=out, stderr=err)
  563. log.debug("Restarting database")
  564. p.communicate("""
  565. sudo restart mysql
  566. sudo restart mongodb
  567. sudo /etc/init.d/postgresql restart
  568. """)
  569. time.sleep(10)
  570. if self.__is_port_bound(test.port):
  571. self.__write_intermediate_results(test.name, "port %s is not available before start" % test.port)
  572. log.error(Header("Error: Port %s is not available, cannot start %s" % (test.port, test.name)))
  573. return 1
  574. result = test.start(log)
  575. if result != 0:
  576. test.stop(log)
  577. time.sleep(5)
  578. log.error("ERROR: Problem starting %s", test.name)
  579. log.error(Header("Stopped %s" % test.name))
  580. self.__write_intermediate_results(test.name,"<setup.py>#start() returned non-zero")
  581. return 1
  582. log.info("Sleeping for %s", self.sleep)
  583. time.sleep(self.sleep)
  584. # Verify URLs
  585. test.verify_urls(log)
  586. # Benchmark
  587. if self.mode == "benchmark":
  588. log.info(Header("Benchmarking %s" % test.name))
  589. test.benchmark(log)
  590. # Stop this test
  591. if self.os.lower() == 'windows' or not self.sigterm_has_fired:
  592. log.info(Header("Stopping %s" % test.name))
  593. test.stop(log)
  594. time.sleep(5)
  595. if self.__is_port_bound(test.port):
  596. self.__write_intermediate_results(test.name, "port %s was not released by stop" % test.port)
  597. log.error(Header("Error: Port %s was not released by stop %s" % (test.port, test.name)))
  598. return 1
  599. log.info(Header("Stopped %s" % test.name))
  600. time.sleep(5)
  601. ##########################################################
  602. # Save results thus far into toolset/benchmark/latest.json
  603. ##########################################################
  604. log.info(Header("Saving results through %s" % test.name))
  605. self.__write_intermediate_results(test.name,time.strftime("%Y%m%d%H%M%S", time.localtime()))
  606. except (OSError, IOError, subprocess.CalledProcessError) as e:
  607. self.__write_intermediate_results(test.name,"<setup.py> raised an exception")
  608. log.error(Header("Subprocess Error %s" % test.name))
  609. log.error("%s", e)
  610. log.error("%s", sys.exc_info()[:2])
  611. log.debug("Subprocess Error Details", exc_info=True)
  612. try:
  613. test.stop(log)
  614. except subprocess.CalledProcessError as e:
  615. self.__write_intermediate_results(test.name,"<setup.py>#stop() raised an error")
  616. log.error(Header("Subprocess Error: Test .stop() raised exception %s" % test.name))
  617. log.error("%s", e)
  618. log.error("%s", sys.exc_info()[:2])
  619. log.debug("Subprocess Error Details", exc_info=True)
  620. return 1
  621. ############################################################
  622. # End __run_test
  623. ############################################################
  624. ############################################################
  625. # __is_port_bound
  626. # Check if the requested port is available. If it
  627. # isn't available, then a previous test probably didn't
  628. # shutdown properly.
  629. ############################################################
  630. def __is_port_bound(self, port):
  631. s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
  632. try:
  633. # Try to bind to all IP addresses, this port
  634. s.bind(("", port))
  635. # If we get here, we were able to bind successfully,
  636. # which means the port is free.
  637. except:
  638. # If we get an exception, it might be because the port is still bound
  639. # which would be bad, or maybe it is a privileged port (<1024) and we
  640. # are not running as root, or maybe the server is gone, but sockets are
  641. # still in TIME_WAIT (SO_REUSEADDR). To determine which scenario, try to
  642. # connect.
  643. try:
  644. s.connect(("127.0.0.1", port))
  645. # If we get here, we were able to connect to something, which means
  646. # that the port is still bound.
  647. return True
  648. except:
  649. # An exception means that we couldn't connect, so a server probably
  650. # isn't still running on the port.
  651. pass
  652. finally:
  653. s.close()
  654. return False
  655. ############################################################
  656. # End __is_port_bound
  657. ############################################################
  658. ############################################################
  659. # __parse_results
  660. # Ensures that the system has all necessary software to run
  661. # the tests. This does not include that software for the individual
  662. # test, but covers software such as curl and weighttp that
  663. # are needed.
  664. ############################################################
  665. def __parse_results(self, tests):
  666. log.info("__parse_results")
  667. # Run the method to get the commmit count of each framework.
  668. self.__count_commits()
  669. # Call the method which counts the sloc for each framework
  670. self.__count_sloc()
  671. # Time to create parsed files
  672. # Aggregate JSON file
  673. results_file=os.path.join(self.full_results_directory(), "results.json")
  674. log.debug("Writing results to %s"%results_file)
  675. log.debug("Results: %s" % json.dumps(self.results))
  676. with open(results_file, "w") as f:
  677. f.write(json.dumps(self.results))
  678. ############################################################
  679. # End __parse_results
  680. ############################################################
  681. #############################################################
  682. # __count_sloc
  683. # This is assumed to be run from the benchmark root directory
  684. #############################################################
  685. def __count_sloc(self):
  686. log.info("__count_sloc")
  687. all_frameworks = self.__gather_frameworks()
  688. jsonResult = {}
  689. for framework in all_frameworks:
  690. try:
  691. command = "cloc --list-file=" + framework['directory'] + "/source_code --yaml"
  692. lineCount = subprocess.check_output(command, shell=True)
  693. # Find the last instance of the word 'code' in the yaml output. This should
  694. # be the line count for the sum of all listed files or just the line count
  695. # for the last file in the case where there's only one file listed.
  696. lineCount = lineCount[lineCount.rfind('code'):len(lineCount)]
  697. lineCount = lineCount.strip('code: ')
  698. lineCount = lineCount[0:lineCount.rfind('comment')]
  699. jsonResult[framework['name']] = int(lineCount)
  700. except:
  701. continue
  702. self.results['rawData']['slocCounts'] = jsonResult
  703. ############################################################
  704. # End __count_sloc
  705. ############################################################
  706. ############################################################
  707. # __count_commits
  708. ############################################################
  709. def __count_commits(self):
  710. log.info("__count_commits")
  711. all_frameworks = self.__gather_frameworks()
  712. jsonResult = {}
  713. for framework in all_frameworks:
  714. try:
  715. command = "git rev-list HEAD -- " + framework + " | sort -u | wc -l"
  716. commitCount = subprocess.check_output(command, shell=True)
  717. jsonResult[framework] = int(commitCount)
  718. except:
  719. continue
  720. self.results['rawData']['commitCounts'] = jsonResult
  721. self.commits = jsonResult
  722. ############################################################
  723. # End __count_commits
  724. ############################################################
  725. ############################################################
  726. # __write_intermediate_results
  727. ############################################################
  728. def __write_intermediate_results(self,test_name,status_message):
  729. log.debug("__write_intermediate_results: %s reports %s" % (test_name, status_message))
  730. try:
  731. self.results["completed"][test_name] = status_message
  732. latest_results = os.path.join(self.latest_results_directory, 'results.json')
  733. log.debug("Dumping to %s: %s" % (latest_results, json.dumps(self.results)))
  734. with open(latest_results, 'w') as f:
  735. f.write(json.dumps(self.results))
  736. except (IOError):
  737. log.error("Error writing results.json")
  738. ############################################################
  739. # End __write_intermediate_results
  740. ############################################################
  741. def __load_results(self):
  742. try:
  743. with open(os.path.join(self.latest_results_directory, 'results.json')) as f:
  744. self.results = json.load(f)
  745. except (ValueError, IOError):
  746. pass
  747. ############################################################
  748. # __finish
  749. ############################################################
  750. def __finish(self):
  751. log.info("__finish")
  752. print "Time to complete: " + str(int(time.time() - self.start_time)) + " seconds"
  753. print "Results are saved in " + os.path.join(self.result_directory, self.timestamp)
  754. ############################################################
  755. # End __finish
  756. ############################################################
  757. ##########################################################################################
  758. # Constructor
  759. ##########################################################################################
  760. ############################################################
  761. # Initialize the benchmarker. The args are the arguments
  762. # parsed via argparser.
  763. ############################################################
  764. def __init__(self, args):
  765. self.__dict__.update(args)
  766. self.start_time = time.time()
  767. self.run_test_timeout_seconds = 3600
  768. # setup some additional variables
  769. if self.database_user == None: self.database_user = self.client_user
  770. if self.database_host == None: self.database_host = self.client_host
  771. if self.database_identity_file == None: self.database_identity_file = self.client_identity_file
  772. # Remember root directory
  773. self.fwroot = setup_util.get_fwroot()
  774. # setup results and latest_results directories
  775. self.result_directory = os.path.join("results", self.name)
  776. self.latest_results_directory = self.latest_results_directory()
  777. if self.parse != None:
  778. self.timestamp = self.parse
  779. else:
  780. self.timestamp = time.strftime("%Y%m%d%H%M%S", time.localtime())
  781. # Setup the concurrency levels array. This array goes from
  782. # starting_concurrency to max concurrency, doubling each time
  783. self.concurrency_levels = []
  784. concurrency = self.starting_concurrency
  785. while concurrency <= self.max_concurrency:
  786. self.concurrency_levels.append(concurrency)
  787. concurrency = concurrency * 2
  788. # Setup query interval array
  789. # starts at 1, and goes up to max_queries, using the query_interval
  790. self.query_intervals = []
  791. queries = 1
  792. while queries <= self.max_queries:
  793. self.query_intervals.append(queries)
  794. if queries == 1:
  795. queries = 0
  796. queries = queries + self.query_interval
  797. # Load the latest data
  798. #self.latest = None
  799. #try:
  800. # with open('toolset/benchmark/latest.json', 'r') as f:
  801. # # Load json file into config object
  802. # self.latest = json.load(f)
  803. # logging.info("toolset/benchmark/latest.json loaded to self.latest")
  804. # logging.debug("contents of latest.json: " + str(json.dumps(self.latest)))
  805. #except IOError:
  806. # logging.warn("IOError on attempting to read toolset/benchmark/latest.json")
  807. #
  808. #self.results = None
  809. #try:
  810. # if self.latest != None and self.name in self.latest.keys():
  811. # with open(os.path.join(self.result_directory, str(self.latest[self.name]), 'results.json'), 'r') as f:
  812. # # Load json file into config object
  813. # self.results = json.load(f)
  814. #except IOError:
  815. # pass
  816. self.results = None
  817. try:
  818. with open(os.path.join(self.latest_results_directory, 'results.json'), 'r') as f:
  819. #Load json file into results object
  820. self.results = json.load(f)
  821. except IOError:
  822. log.warn("results.json for test %s not found.",self.name)
  823. if self.results == None:
  824. self.results = dict()
  825. self.results['name'] = self.name
  826. self.results['concurrencyLevels'] = self.concurrency_levels
  827. self.results['queryIntervals'] = self.query_intervals
  828. self.results['frameworks'] = [t.name for t in self.__gather_tests]
  829. self.results['duration'] = self.duration
  830. self.results['rawData'] = dict()
  831. self.results['rawData']['json'] = dict()
  832. self.results['rawData']['db'] = dict()
  833. self.results['rawData']['query'] = dict()
  834. self.results['rawData']['fortune'] = dict()
  835. self.results['rawData']['update'] = dict()
  836. self.results['rawData']['plaintext'] = dict()
  837. self.results['completed'] = dict()
  838. self.results['succeeded'] = dict()
  839. self.results['succeeded']['json'] = []
  840. self.results['succeeded']['db'] = []
  841. self.results['succeeded']['query'] = []
  842. self.results['succeeded']['fortune'] = []
  843. self.results['succeeded']['update'] = []
  844. self.results['succeeded']['plaintext'] = []
  845. self.results['failed'] = dict()
  846. self.results['failed']['json'] = []
  847. self.results['failed']['db'] = []
  848. self.results['failed']['query'] = []
  849. self.results['failed']['fortune'] = []
  850. self.results['failed']['update'] = []
  851. self.results['failed']['plaintext'] = []
  852. self.results['warning'] = dict()
  853. self.results['warning']['json'] = []
  854. self.results['warning']['db'] = []
  855. self.results['warning']['query'] = []
  856. self.results['warning']['fortune'] = []
  857. self.results['warning']['update'] = []
  858. self.results['warning']['plaintext'] = []
  859. else:
  860. #for x in self.__gather_tests():
  861. # if x.name not in self.results['frameworks']:
  862. # self.results['frameworks'] = self.results['frameworks'] + [x.name]
  863. # Always overwrite framework list
  864. self.results['frameworks'] = [t.name for t in self.__gather_tests]
  865. # Setup the ssh command string
  866. self.database_ssh_string = "ssh -T -o StrictHostKeyChecking=no " + self.database_user + "@" + self.database_host
  867. self.client_ssh_string = "ssh -T -o StrictHostKeyChecking=no " + self.client_user + "@" + self.client_host
  868. if self.database_identity_file != None:
  869. self.database_ssh_string = self.database_ssh_string + " -i " + self.database_identity_file
  870. if self.client_identity_file != None:
  871. self.client_ssh_string = self.client_ssh_string + " -i " + self.client_identity_file
  872. if self.install is not None:
  873. install = Installer(self, self.install_strategy)
  874. install.install_software()
  875. ############################################################
  876. # End __init__
  877. ############################################################