framework_test.py 8.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223
  1. import os
  2. import subprocess
  3. import sys
  4. import traceback
  5. import logging
  6. from requests import ConnectionError
  7. from toolset.utils.output_helper import header
  8. from toolset.utils import docker_helper
  9. # Cross-platform colored text
  10. from colorama import Fore, Style
  11. class FrameworkTest:
  12. def __init__(self, name, directory, benchmarker_config, results, runTests,
  13. args):
  14. '''
  15. Constructor
  16. '''
  17. self.name = name
  18. self.directory = directory
  19. self.benchmarker_config = benchmarker_config
  20. self.results = results
  21. self.runTests = runTests
  22. self.fwroot = benchmarker_config.fwroot
  23. self.approach = ""
  24. self.classification = ""
  25. self.database = ""
  26. self.framework = ""
  27. self.language = ""
  28. self.orm = ""
  29. self.platform = ""
  30. self.webserver = ""
  31. self.os = ""
  32. self.database_os = ""
  33. self.display_name = ""
  34. self.notes = ""
  35. self.port = ""
  36. self.versus = ""
  37. self.docker_files = None
  38. # setup logging
  39. logging.basicConfig(stream=sys.stderr, level=logging.INFO)
  40. # Used in setup.sh scripts for consistency with
  41. # the bash environment variables
  42. self.troot = self.directory
  43. self.__dict__.update(args)
  44. ##########################################################################################
  45. # Public Methods
  46. ##########################################################################################
  47. def start(self, out, database_container_id):
  48. '''
  49. Start the test implementation
  50. '''
  51. test_docker_files = self.get_docker_files()
  52. result = docker_helper.build(self.benchmarker_config, [self.name], out)
  53. if result != 0:
  54. return result
  55. return docker_helper.run(self.benchmarker_config, test_docker_files,
  56. out)
  57. def is_running(self):
  58. '''
  59. Determines whether this test implementation is up and accepting
  60. requests.
  61. '''
  62. test_type = None
  63. for any_type in self.runTests:
  64. test_type = any_type
  65. break
  66. url = "http://%s:%s/%s" % (self.benchmarker_config.server_host,
  67. self.port,
  68. self.runTests[test_type].get_url())
  69. try:
  70. FNULL = open(os.devnull, 'w')
  71. subprocess.check_call(
  72. ['curl', '-sSfl', url], stdout=FNULL, stderr=subprocess.STDOUT)
  73. except:
  74. return False
  75. return True
  76. def get_docker_files(self):
  77. '''
  78. Returns all the docker_files for this test.
  79. '''
  80. test_docker_files = ["%s.dockerfile" % self.name]
  81. if self.docker_files is not None:
  82. if type(self.docker_files) is list:
  83. test_docker_files.extend(self.docker_files)
  84. else:
  85. raise Exception(
  86. "docker_files in benchmark_config.json must be an array")
  87. return test_docker_files
  88. def verify_urls(self, logPath):
  89. '''
  90. Verifys each of the URLs for this test. This will simply curl the URL and
  91. check for it's return status. For each url, a flag will be set on this
  92. object for whether or not it passed.
  93. Returns True if all verifications succeeded
  94. '''
  95. result = True
  96. def verify_type(test_type):
  97. verificationPath = os.path.join(logPath, test_type)
  98. try:
  99. os.makedirs(verificationPath)
  100. except OSError:
  101. pass
  102. with open(os.path.join(verificationPath, 'verification.txt'),
  103. 'w') as verification:
  104. test = self.runTests[test_type]
  105. test.setup_out(verification)
  106. verification.write(header("VERIFYING %s" % test_type.upper()))
  107. base_url = "http://%s:%s" % (
  108. self.benchmarker_config.server_host, self.port)
  109. try:
  110. # Verifies headers from the server. This check is made from the
  111. # App Server using Pythons requests module. Will do a second check from
  112. # the client to make sure the server isn't only accepting connections
  113. # from localhost on a multi-machine setup.
  114. results = test.verify(base_url)
  115. # Now verify that the url is reachable from the client machine, unless
  116. # we're already failing
  117. if not any(result == 'fail'
  118. for (result, reason, url) in results):
  119. p = subprocess.call(
  120. [
  121. "ssh", self.benchmarker_config.client_host,
  122. "curl -sSf %s" % base_url + test.get_url()
  123. ],
  124. shell=False,
  125. stdout=subprocess.PIPE,
  126. stderr=subprocess.PIPE)
  127. if p is not 0:
  128. results = [(
  129. 'fail',
  130. "Server did not respond to request from client machine.",
  131. base_url)]
  132. logging.warning(
  133. """This error usually means your server is only accepting
  134. requests from localhost.""")
  135. except ConnectionError as e:
  136. results = [('fail', "Server did not respond to request",
  137. base_url)]
  138. logging.warning(
  139. "Verifying test %s for %s caused an exception: %s",
  140. test_type, self.name, e)
  141. except Exception as e:
  142. results = [('fail', """Caused Exception in TFB
  143. This almost certainly means your return value is incorrect,
  144. but also that you have found a bug. Please submit an issue
  145. including this message: %s\n%s""" % (e, traceback.format_exc()),
  146. base_url)]
  147. logging.warning(
  148. "Verifying test %s for %s caused an exception: %s",
  149. test_type, self.name, e)
  150. traceback.format_exc()
  151. test.failed = any(
  152. result == 'fail' for (result, reason, url) in results)
  153. test.warned = any(
  154. result == 'warn' for (result, reason, url) in results)
  155. test.passed = all(
  156. result == 'pass' for (result, reason, url) in results)
  157. def output_result(result, reason, url):
  158. specific_rules_url = "http://frameworkbenchmarks.readthedocs.org/en/latest/Project-Information/Framework-Tests/#specific-test-requirements"
  159. color = Fore.GREEN
  160. if result.upper() == "WARN":
  161. color = Fore.YELLOW
  162. elif result.upper() == "FAIL":
  163. color = Fore.RED
  164. verification.write((
  165. " " + color + "%s" + Style.RESET_ALL + " for %s\n") %
  166. (result.upper(), url))
  167. print(" {!s}{!s}{!s} for {!s}\n".format(
  168. color, result.upper(), Style.RESET_ALL, url))
  169. if reason is not None and len(reason) != 0:
  170. for line in reason.splitlines():
  171. verification.write(" " + line + '\n')
  172. print(" " + line)
  173. if not test.passed:
  174. verification.write(
  175. " See %s\n" % specific_rules_url)
  176. print(" See {!s}\n".format(specific_rules_url))
  177. [output_result(r1, r2, url) for (r1, r2, url) in results]
  178. if test.failed:
  179. self.results.report_verify_results(self, test_type, 'fail')
  180. elif test.warned:
  181. self.results.report_verify_results(self, test_type, 'warn')
  182. elif test.passed:
  183. self.results.report_verify_results(self, test_type, 'pass')
  184. else:
  185. raise Exception(
  186. "Unknown error - test did not pass,warn,or fail")
  187. verification.flush()
  188. result = True
  189. for test_type in self.runTests:
  190. verify_type(test_type)
  191. if self.runTests[test_type].failed:
  192. result = False
  193. return result