framework_test.py 7.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190
  1. import os
  2. import traceback
  3. from requests import ConnectionError, Timeout
  4. from toolset.utils.output_helper import Logger
  5. # Cross-platform colored text
  6. from colorama import Fore, Style
  7. class FrameworkTest:
  8. def __init__(self, name, directory, benchmarker, runTests,
  9. args):
  10. '''
  11. Constructor
  12. '''
  13. self.name = name
  14. self.directory = directory
  15. self.benchmarker = benchmarker
  16. self.runTests = runTests
  17. self.approach = ""
  18. self.classification = ""
  19. self.database = ""
  20. self.framework = ""
  21. self.language = ""
  22. self.orm = ""
  23. self.platform = ""
  24. self.webserver = ""
  25. self.os = ""
  26. self.database_os = ""
  27. self.display_name = ""
  28. self.notes = ""
  29. self.port = ""
  30. self.versus = ""
  31. self.log = Logger().log
  32. self.__dict__.update(args)
  33. ##########################################################################################
  34. # Public Methods
  35. ##########################################################################################
  36. def start(self):
  37. '''
  38. Start the test implementation
  39. '''
  40. test_log_dir = os.path.join(self.benchmarker.results.directory, self.name.lower())
  41. build_log_dir = os.path.join(test_log_dir, 'build')
  42. run_log_dir = os.path.join(test_log_dir, 'run')
  43. try:
  44. os.makedirs(build_log_dir)
  45. except OSError:
  46. pass
  47. try:
  48. os.makedirs(run_log_dir)
  49. except OSError:
  50. pass
  51. result = self.benchmarker.docker_helper.build(self, build_log_dir)
  52. if result != 0:
  53. return None
  54. return self.benchmarker.docker_helper.run(self, run_log_dir)
  55. def is_accepting_requests(self):
  56. '''
  57. Determines whether this test implementation is up and accepting
  58. requests.
  59. '''
  60. test_type = None
  61. for any_type in self.runTests:
  62. test_type = any_type
  63. break
  64. url = "http://%s:%s%s" % (self.benchmarker.config.server_host,
  65. self.port,
  66. self.runTests[test_type].get_url())
  67. return self.benchmarker.docker_helper.test_client_connection(url)
  68. def verify_urls(self):
  69. '''
  70. Verifys each of the URLs for this test. This will simply curl the URL and
  71. check for it's return status. For each url, a flag will be set on this
  72. object for whether or not it passed.
  73. Returns True if all verifications succeeded
  74. '''
  75. log_path = os.path.join(self.benchmarker.results.directory, self.name.lower())
  76. result = True
  77. def verify_type(test_type):
  78. verificationPath = os.path.join(log_path, test_type)
  79. try:
  80. os.makedirs(verificationPath)
  81. except OSError:
  82. pass
  83. with open(os.path.join(verificationPath, 'verification.txt'),
  84. 'w') as verification:
  85. test = self.runTests[test_type]
  86. self.log("VERIFYING %s" % test_type.upper(),
  87. file=verification,
  88. border='-',
  89. color=Fore.WHITE + Style.BRIGHT)
  90. base_url = "http://%s:%s" % (
  91. self.benchmarker.config.server_host, self.port)
  92. try:
  93. # Verifies headers from the server. This check is made from the
  94. # App Server using Pythons requests module. Will do a second check from
  95. # the client to make sure the server isn't only accepting connections
  96. # from localhost on a multi-machine setup.
  97. results = test.verify(base_url)
  98. # Now verify that the url is reachable from the client machine, unless
  99. # we're already failing
  100. if not any(result == 'fail'
  101. for (result, reason, url) in results):
  102. self.benchmarker.docker_helper.test_client_connection(
  103. base_url + test.get_url())
  104. except ConnectionError as e:
  105. results = [('fail', "Server did not respond to request",
  106. base_url)]
  107. self.log("Verifying test %s for %s caused an exception: %s" %
  108. (test_type, self.name, e),
  109. color=Fore.RED)
  110. except Timeout as e:
  111. results = [('fail', "Connection to server timed out",
  112. base_url)]
  113. self.log("Verifying test %s for %s caused an exception: %s" %
  114. (test_type, self.name, e),
  115. color=Fore.RED)
  116. except Exception as e:
  117. results = [('fail', """Caused Exception in TFB
  118. This almost certainly means your return value is incorrect,
  119. but also that you have found a bug. Please submit an issue
  120. including this message: %s\n%s""" % (e, traceback.format_exc()),
  121. base_url)]
  122. self.log("Verifying test %s for %s caused an exception: %s" %
  123. (test_type, self.name, e),
  124. color=Fore.RED)
  125. traceback.format_exc()
  126. test.failed = any(
  127. result == 'fail' for (result, reason, url) in results)
  128. test.warned = any(
  129. result == 'warn' for (result, reason, url) in results)
  130. test.passed = all(
  131. result == 'pass' for (result, reason, url) in results)
  132. def output_result(result, reason, url):
  133. specific_rules_url = "https://github.com/TechEmpower/FrameworkBenchmarks/wiki/Project-Information-Framework-Tests-Overview#specific-test-requirements"
  134. color = Fore.GREEN
  135. if result.upper() == "WARN":
  136. color = Fore.YELLOW
  137. elif result.upper() == "FAIL":
  138. color = Fore.RED
  139. self.log(" {!s}{!s}{!s} for {!s}".format(
  140. color, result.upper(), Style.RESET_ALL, url),
  141. file=verification)
  142. if reason is not None and len(reason) != 0:
  143. for line in reason.splitlines():
  144. self.log(" " + line, file=verification)
  145. if not test.passed:
  146. self.log(" See {!s}".format(specific_rules_url),
  147. file=verification)
  148. [output_result(r1, r2, url) for (r1, r2, url) in results]
  149. if test.failed:
  150. test.output_headers_and_body()
  151. self.benchmarker.results.report_verify_results(self, test_type, 'fail')
  152. elif test.warned:
  153. test.output_headers_and_body()
  154. self.benchmarker.results.report_verify_results(self, test_type, 'warn')
  155. elif test.passed:
  156. self.benchmarker.results.report_verify_results(self, test_type, 'pass')
  157. else:
  158. raise Exception(
  159. "Unknown error - test did not pass,warn,or fail")
  160. result = True
  161. for test_type in self.runTests:
  162. verify_type(test_type)
  163. if self.runTests[test_type].failed:
  164. result = False
  165. return result