framework_test.py 8.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223
  1. import os
  2. import subprocess
  3. import traceback
  4. from requests import ConnectionError
  5. from toolset.utils.output_helper import header, log, FNULL
  6. from toolset.utils import docker_helper
  7. # Cross-platform colored text
  8. from colorama import Fore, Style
  9. class FrameworkTest:
  10. def __init__(self, name, directory, benchmarker_config, results, runTests,
  11. args):
  12. '''
  13. Constructor
  14. '''
  15. self.name = name
  16. self.directory = directory
  17. self.benchmarker_config = benchmarker_config
  18. self.results = results
  19. self.runTests = runTests
  20. self.fwroot = benchmarker_config.fwroot
  21. self.approach = ""
  22. self.classification = ""
  23. self.database = ""
  24. self.framework = ""
  25. self.language = ""
  26. self.orm = ""
  27. self.platform = ""
  28. self.webserver = ""
  29. self.os = ""
  30. self.database_os = ""
  31. self.display_name = ""
  32. self.notes = ""
  33. self.port = ""
  34. self.versus = ""
  35. self.docker_files = None
  36. # Used in setup.sh scripts for consistency with
  37. # the bash environment variables
  38. self.troot = self.directory
  39. self.__dict__.update(args)
  40. ##########################################################################################
  41. # Public Methods
  42. ##########################################################################################
  43. def start(self, database_container_id):
  44. '''
  45. Start the test implementation
  46. '''
  47. test_docker_files = self.get_docker_files()
  48. test_log_dir = os.path.join(self.results.directory, self.name.lower())
  49. build_log_dir = os.path.join(test_log_dir, 'build')
  50. run_log_dir = os.path.join(test_log_dir, 'run')
  51. try:
  52. os.makedirs(build_log_dir)
  53. except OSError:
  54. pass
  55. try:
  56. os.makedirs(run_log_dir)
  57. except OSError:
  58. pass
  59. result = docker_helper.build(self.benchmarker_config, [self.name],
  60. build_log_dir)
  61. if result != 0:
  62. return result
  63. return docker_helper.run(self.benchmarker_config, test_docker_files,
  64. run_log_dir)
  65. def is_running(self):
  66. '''
  67. Determines whether this test implementation is up and accepting
  68. requests.
  69. '''
  70. test_type = None
  71. for any_type in self.runTests:
  72. test_type = any_type
  73. break
  74. url = "http://%s:%s/%s" % (self.benchmarker_config.server_host,
  75. self.port,
  76. self.runTests[test_type].get_url())
  77. try:
  78. subprocess.check_call(
  79. ['curl', '-sSfl', url], stdout=FNULL, stderr=subprocess.STDOUT)
  80. except:
  81. return False
  82. return True
  83. def get_docker_files(self):
  84. '''
  85. Returns all the docker_files for this test.
  86. '''
  87. test_docker_files = ["%s.dockerfile" % self.name]
  88. if self.docker_files is not None:
  89. if type(self.docker_files) is list:
  90. test_docker_files.extend(self.docker_files)
  91. else:
  92. raise Exception(
  93. "docker_files in benchmark_config.json must be an array")
  94. return test_docker_files
  95. def verify_urls(self):
  96. '''
  97. Verifys each of the URLs for this test. This will simply curl the URL and
  98. check for it's return status. For each url, a flag will be set on this
  99. object for whether or not it passed.
  100. Returns True if all verifications succeeded
  101. '''
  102. log_path = os.path.join(self.results.directory, self.name.lower())
  103. result = True
  104. def verify_type(test_type):
  105. verificationPath = os.path.join(log_path, test_type)
  106. try:
  107. os.makedirs(verificationPath)
  108. except OSError:
  109. pass
  110. with open(os.path.join(verificationPath, 'verification.txt'),
  111. 'w') as verification:
  112. test = self.runTests[test_type]
  113. header(
  114. message="VERIFYING %s" % test_type.upper(),
  115. log_file=verification)
  116. base_url = "http://%s:%s" % (
  117. self.benchmarker_config.server_host, self.port)
  118. try:
  119. # Verifies headers from the server. This check is made from the
  120. # App Server using Pythons requests module. Will do a second check from
  121. # the client to make sure the server isn't only accepting connections
  122. # from localhost on a multi-machine setup.
  123. results = test.verify(base_url)
  124. # Now verify that the url is reachable from the client machine, unless
  125. # we're already failing
  126. if not any(result == 'fail'
  127. for (result, reason, url) in results):
  128. p = subprocess.call(
  129. [
  130. "ssh", self.benchmarker_config.client_host,
  131. "curl -sSf %s" % base_url + test.get_url()
  132. ],
  133. shell=False,
  134. stdout=subprocess.PIPE,
  135. stderr=subprocess.PIPE)
  136. if p is not 0:
  137. results = [(
  138. 'fail',
  139. "Server did not respond to request from client machine.",
  140. base_url)]
  141. log("""This error usually means your server is only accepting
  142. requests from localhost.""")
  143. except ConnectionError as e:
  144. results = [('fail', "Server did not respond to request",
  145. base_url)]
  146. log("Verifying test %s for %s caused an exception: %s" %
  147. (test_type, self.name, e))
  148. except Exception as e:
  149. results = [('fail', """Caused Exception in TFB
  150. This almost certainly means your return value is incorrect,
  151. but also that you have found a bug. Please submit an issue
  152. including this message: %s\n%s""" % (e, traceback.format_exc()),
  153. base_url)]
  154. log("Verifying test %s for %s caused an exception: %s" %
  155. (test_type, self.name, e))
  156. traceback.format_exc()
  157. test.failed = any(
  158. result == 'fail' for (result, reason, url) in results)
  159. test.warned = any(
  160. result == 'warn' for (result, reason, url) in results)
  161. test.passed = all(
  162. result == 'pass' for (result, reason, url) in results)
  163. def output_result(result, reason, url):
  164. specific_rules_url = "http://frameworkbenchmarks.readthedocs.org/en/latest/Project-Information/Framework-Tests/#specific-test-requirements"
  165. color = Fore.GREEN
  166. if result.upper() == "WARN":
  167. color = Fore.YELLOW
  168. elif result.upper() == "FAIL":
  169. color = Fore.RED
  170. log(" {!s}{!s}{!s} for {!s}".format(
  171. color, result.upper(), Style.RESET_ALL, url), None,
  172. verification)
  173. if reason is not None and len(reason) != 0:
  174. for line in reason.splitlines():
  175. log(" " + line, None, verification)
  176. if not test.passed:
  177. log(" See {!s}".format(specific_rules_url),
  178. None, verification)
  179. [output_result(r1, r2, url) for (r1, r2, url) in results]
  180. if test.failed:
  181. self.results.report_verify_results(self, test_type, 'fail')
  182. elif test.warned:
  183. self.results.report_verify_results(self, test_type, 'warn')
  184. elif test.passed:
  185. self.results.report_verify_results(self, test_type, 'pass')
  186. else:
  187. raise Exception(
  188. "Unknown error - test did not pass,warn,or fail")
  189. result = True
  190. for test_type in self.runTests:
  191. verify_type(test_type)
  192. if self.runTests[test_type].failed:
  193. result = False
  194. return result