framework_test.py 7.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189
  1. import os
  2. import traceback
  3. from requests import ConnectionError, Timeout
  4. from toolset.utils.output_helper import log
  5. # Cross-platform colored text
  6. from colorama import Fore, Style
  7. class FrameworkTest:
  8. def __init__(self, name, directory, benchmarker, runTests,
  9. args):
  10. '''
  11. Constructor
  12. '''
  13. self.name = name
  14. self.directory = directory
  15. self.benchmarker = benchmarker
  16. self.runTests = runTests
  17. self.approach = ""
  18. self.classification = ""
  19. self.database = ""
  20. self.framework = ""
  21. self.language = ""
  22. self.orm = ""
  23. self.platform = ""
  24. self.webserver = ""
  25. self.os = ""
  26. self.database_os = ""
  27. self.display_name = ""
  28. self.notes = ""
  29. self.port = ""
  30. self.versus = ""
  31. self.__dict__.update(args)
  32. ##########################################################################################
  33. # Public Methods
  34. ##########################################################################################
  35. def start(self):
  36. '''
  37. Start the test implementation
  38. '''
  39. test_log_dir = os.path.join(self.benchmarker.results.directory, self.name.lower())
  40. build_log_dir = os.path.join(test_log_dir, 'build')
  41. run_log_dir = os.path.join(test_log_dir, 'run')
  42. try:
  43. os.makedirs(build_log_dir)
  44. except OSError:
  45. pass
  46. try:
  47. os.makedirs(run_log_dir)
  48. except OSError:
  49. pass
  50. result = self.benchmarker.docker_helper.build(self, build_log_dir)
  51. if result != 0:
  52. return None
  53. return self.benchmarker.docker_helper.run(self, run_log_dir)
  54. def is_accepting_requests(self):
  55. '''
  56. Determines whether this test implementation is up and accepting
  57. requests.
  58. '''
  59. test_type = None
  60. for any_type in self.runTests:
  61. test_type = any_type
  62. break
  63. url = "http://%s:%s%s" % (self.benchmarker.config.server_host,
  64. self.port,
  65. self.runTests[test_type].get_url())
  66. return self.benchmarker.docker_helper.test_client_connection(url)
  67. def verify_urls(self):
  68. '''
  69. Verifys each of the URLs for this test. This will simply curl the URL and
  70. check for it's return status. For each url, a flag will be set on this
  71. object for whether or not it passed.
  72. Returns True if all verifications succeeded
  73. '''
  74. log_path = os.path.join(self.benchmarker.results.directory, self.name.lower())
  75. result = True
  76. def verify_type(test_type):
  77. verificationPath = os.path.join(log_path, test_type)
  78. try:
  79. os.makedirs(verificationPath)
  80. except OSError:
  81. pass
  82. with open(os.path.join(verificationPath, 'verification.txt'),
  83. 'w') as verification:
  84. test = self.runTests[test_type]
  85. log("VERIFYING %s" % test_type.upper(),
  86. file=verification,
  87. border='-',
  88. color=Fore.WHITE + Style.BRIGHT)
  89. base_url = "http://%s:%s" % (
  90. self.benchmarker.config.server_host, self.port)
  91. try:
  92. # Verifies headers from the server. This check is made from the
  93. # App Server using Pythons requests module. Will do a second check from
  94. # the client to make sure the server isn't only accepting connections
  95. # from localhost on a multi-machine setup.
  96. results = test.verify(base_url)
  97. # Now verify that the url is reachable from the client machine, unless
  98. # we're already failing
  99. if not any(result == 'fail'
  100. for (result, reason, url) in results):
  101. self.benchmarker.docker_helper.test_client_connection(
  102. base_url + test.get_url())
  103. except ConnectionError as e:
  104. results = [('fail', "Server did not respond to request",
  105. base_url)]
  106. log("Verifying test %s for %s caused an exception: %s" %
  107. (test_type, self.name, e),
  108. color=Fore.RED)
  109. except Timeout as e:
  110. results = [('fail', "Connection to server timed out",
  111. base_url)]
  112. log("Verifying test %s for %s caused an exception: %s" %
  113. (test_type, self.name, e),
  114. color=Fore.RED)
  115. except Exception as e:
  116. results = [('fail', """Caused Exception in TFB
  117. This almost certainly means your return value is incorrect,
  118. but also that you have found a bug. Please submit an issue
  119. including this message: %s\n%s""" % (e, traceback.format_exc()),
  120. base_url)]
  121. log("Verifying test %s for %s caused an exception: %s" %
  122. (test_type, self.name, e),
  123. color=Fore.RED)
  124. traceback.format_exc()
  125. test.failed = any(
  126. result == 'fail' for (result, reason, url) in results)
  127. test.warned = any(
  128. result == 'warn' for (result, reason, url) in results)
  129. test.passed = all(
  130. result == 'pass' for (result, reason, url) in results)
  131. def output_result(result, reason, url):
  132. specific_rules_url = "https://github.com/TechEmpower/FrameworkBenchmarks/wiki/Project-Information-Framework-Tests-Overview#specific-test-requirements"
  133. color = Fore.GREEN
  134. if result.upper() == "WARN":
  135. color = Fore.YELLOW
  136. elif result.upper() == "FAIL":
  137. color = Fore.RED
  138. log(" {!s}{!s}{!s} for {!s}".format(
  139. color, result.upper(), Style.RESET_ALL, url),
  140. file=verification)
  141. if reason is not None and len(reason) != 0:
  142. for line in reason.splitlines():
  143. log(" " + line, file=verification)
  144. if not test.passed:
  145. log(" See {!s}".format(specific_rules_url),
  146. file=verification)
  147. [output_result(r1, r2, url) for (r1, r2, url) in results]
  148. if test.failed:
  149. test.output_headers_and_body()
  150. self.benchmarker.results.report_verify_results(self, test_type, 'fail')
  151. elif test.warned:
  152. test.output_headers_and_body()
  153. self.benchmarker.results.report_verify_results(self, test_type, 'warn')
  154. elif test.passed:
  155. self.benchmarker.results.report_verify_results(self, test_type, 'pass')
  156. else:
  157. raise Exception(
  158. "Unknown error - test did not pass,warn,or fail")
  159. result = True
  160. for test_type in self.runTests:
  161. verify_type(test_type)
  162. if self.runTests[test_type].failed:
  163. result = False
  164. return result