3
0

TestSuite_Main.py 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297
  1. """
  2. Copyright (c) Contributors to the Open 3D Engine Project.
  3. For complete copyright and license terms please see the LICENSE at the root of this distribution.
  4. SPDX-License-Identifier: Apache-2.0 OR MIT
  5. This suite contains the tests for editor_test utilities.
  6. """
  7. import pytest
  8. import os
  9. import sys
  10. import importlib
  11. import unittest.mock as mock
  12. import ly_test_tools
  13. import ly_test_tools.environment.process_utils as process_utils
  14. from ly_test_tools.o3de.editor_test import EditorSingleTest, EditorSharedTest, EditorTestSuite
  15. from ly_test_tools.o3de.multi_test_framework import Result
  16. from ly_test_tools.o3de.asset_processor import AssetProcessor
  17. sys.path.append(os.path.dirname(os.path.abspath(__file__)))
  18. def get_editor_launcher_platform():
  19. if ly_test_tools.WINDOWS:
  20. return "windows_editor"
  21. elif ly_test_tools.LINUX:
  22. return "linux_editor"
  23. else:
  24. return None
  25. # Other plugins can create cross-object reference issues due these tests executing nonstandard pytest-within-pytest
  26. @mock.patch('ly_test_tools._internal.pytest_plugin.terminal_report._add_ownership', mock.MagicMock())
  27. @pytest.mark.parametrize("launcher_platform", [get_editor_launcher_platform()])
  28. @pytest.mark.parametrize("project", ["AutomatedTesting"])
  29. class TestEditorTest:
  30. args = []
  31. path = None
  32. @classmethod
  33. def setup_class(cls):
  34. # Copy all args except for the python interpreter and module file
  35. for arg in sys.argv:
  36. if not arg.endswith(".py"):
  37. TestEditorTest.args.append(arg)
  38. if "--build-directory" in TestEditorTest.args:
  39. # passed as two args, flag and value
  40. build_dir_arg_index = TestEditorTest.args.index("--build-directory")
  41. TestEditorTest.args[build_dir_arg_index + 1] = os.path.abspath(
  42. TestEditorTest.args[build_dir_arg_index + 1])
  43. else:
  44. # may instead be passed as one arg which includes equals-sign between flag and value
  45. build_dir_arg_index = 0
  46. for arg in TestEditorTest.args:
  47. if arg.startswith("--build-directory"):
  48. first, second = arg.split("=", maxsplit=1)
  49. TestEditorTest.args[build_dir_arg_index] = f'{first}={os.path.abspath(second)}'
  50. break
  51. build_dir_arg_index += 1
  52. if build_dir_arg_index == len(TestEditorTest.args):
  53. raise ValueError(f"Must pass --build-directory argument in order to run this test. Found args: {TestEditorTest.args}")
  54. TestEditorTest.args.append("-s")
  55. TestEditorTest.path = os.path.dirname(os.path.abspath(__file__))
  56. cls._asset_processor = None
  57. # Custom cleanup
  58. def teardown_class(cls):
  59. if cls._asset_processor:
  60. cls._asset_processor.stop(1)
  61. cls._asset_processor.teardown()
  62. # Test runs #
  63. @classmethod
  64. def _run_single_test(cls, pytester, workspace, module_name):
  65. # Keep the AP open for all tests
  66. if cls._asset_processor is None:
  67. if not process_utils.process_exists("AssetProcessor", ignore_extensions=True):
  68. cls._asset_processor = AssetProcessor(workspace)
  69. cls._asset_processor.start()
  70. pytester.makepyfile(
  71. f"""
  72. import pytest
  73. import os
  74. import sys
  75. from ly_test_tools import LAUNCHERS
  76. from ly_test_tools.o3de.editor_test import EditorSingleTest, EditorSharedTest, EditorTestSuite
  77. @pytest.mark.SUITE_main
  78. @pytest.mark.parametrize("launcher_platform", ['{get_editor_launcher_platform()}'])
  79. @pytest.mark.parametrize("project", ["AutomatedTesting"])
  80. class TestAutomation(EditorTestSuite):
  81. class test_single(EditorSingleTest):
  82. import {module_name} as test_module
  83. """)
  84. result = pytester.runpytest(*TestEditorTest.args)
  85. def get_class(module_name):
  86. class test_single(EditorSingleTest):
  87. test_module = importlib.import_module(module_name)
  88. return test_single
  89. output = "".join(result.outlines)
  90. extracted_results = EditorTestSuite._get_results_using_output([get_class(module_name)], output, output)
  91. extracted_result = next(iter(extracted_results.items()))
  92. return extracted_result[1], result
  93. @pytest.mark.skip(reason="Skipped for test efficiency, but keeping for reference.")
  94. def test_single_pass_test(self, request, workspace, launcher_platform, pytester):
  95. (extracted_result, result) = TestEditorTest._run_single_test(pytester, workspace, "EditorTest_That_Passes")
  96. result.assert_outcomes(passed=1)
  97. assert isinstance(extracted_result, Result.Pass)
  98. def test_single_fail_test(self, request, workspace, launcher_platform, pytester):
  99. (extracted_result, result) = TestEditorTest._run_single_test(pytester, workspace, "EditorTest_That_Fails")
  100. result.assert_outcomes(failed=1)
  101. assert isinstance(extracted_result, Result.Fail)
  102. def test_single_crash_test(self, request, workspace, launcher_platform, pytester):
  103. (extracted_result, result) = TestEditorTest._run_single_test(pytester, workspace, "EditorTest_That_Crashes")
  104. result.assert_outcomes(failed=1)
  105. # TODO: For the python 3.10.5 update on windows, a crashed test results in a fail, but on linux it results in an Unknown
  106. # We will need to investigate the appropriate assertion here
  107. assert isinstance(extracted_result, Result.Unknown) or isinstance(extracted_result, Result.Fail)
  108. @classmethod
  109. def _run_shared_test(cls, pytester, workspace, module_class_code, extra_cmd_line=None):
  110. if not extra_cmd_line:
  111. extra_cmd_line = []
  112. # Keep the AP open for all tests
  113. if cls._asset_processor is None:
  114. if not process_utils.process_exists("AssetProcessor", ignore_extensions=True):
  115. cls._asset_processor = AssetProcessor(workspace)
  116. cls._asset_processor.start()
  117. pytester.makepyfile(
  118. f"""
  119. import pytest
  120. import os
  121. import sys
  122. from ly_test_tools import LAUNCHERS
  123. from ly_test_tools.o3de.editor_test import EditorSingleTest, EditorSharedTest, EditorTestSuite
  124. @pytest.mark.SUITE_main
  125. @pytest.mark.parametrize("launcher_platform", ['{get_editor_launcher_platform()}'])
  126. @pytest.mark.parametrize("project", ["AutomatedTesting"])
  127. class TestAutomation(EditorTestSuite):
  128. {module_class_code}
  129. """)
  130. result = pytester.runpytest(*TestEditorTest.args + extra_cmd_line)
  131. return result
  132. # Here and throughout- the batch/parallel runner counts towards pytest's Passes, so we must include it in the asserts
  133. @pytest.mark.skip(reason="Skipped for test efficiency, but keeping for reference.")
  134. def test_batched_2_pass(self, request, workspace, launcher_platform, pytester):
  135. result = self._run_shared_test(pytester, workspace,
  136. """
  137. class test_pass(EditorSharedTest):
  138. import EditorTest_That_Passes as test_module
  139. is_parallelizable = False
  140. class test_2(EditorSharedTest):
  141. import EditorTest_That_PassesToo as test_module
  142. is_parallelizable = False
  143. """
  144. )
  145. # 2 Passes +1(batch runner)
  146. result.assert_outcomes(passed=3)
  147. @pytest.mark.skip(reason="Skipped for test efficiency, but keeping for reference.")
  148. def test_batched_1_pass_1_fail(self, request, workspace, launcher_platform, pytester):
  149. result = self._run_shared_test(pytester, workspace,
  150. """
  151. class test_pass(EditorSharedTest):
  152. import EditorTest_That_Passes as test_module
  153. is_parallelizable = False
  154. class test_fail(EditorSharedTest):
  155. import EditorTest_That_Fails as test_module
  156. is_parallelizable = False
  157. """
  158. )
  159. # 1 Fail, 1 Passes +1(batch runner)
  160. result.assert_outcomes(passed=2, failed=1)
  161. def test_batched_1_pass_1_fail_1_crash(self, request, workspace, launcher_platform, pytester):
  162. result = self._run_shared_test(pytester, workspace,
  163. """
  164. class test_pass(EditorSharedTest):
  165. import EditorTest_That_Passes as test_module
  166. is_parallelizable = False
  167. class test_fail(EditorSharedTest):
  168. import EditorTest_That_Fails as test_module
  169. is_parallelizable = False
  170. class test_crash(EditorSharedTest):
  171. import EditorTest_That_Crashes as test_module
  172. is_parallelizable = False
  173. """
  174. )
  175. # 2 Fail, 1 Passes + 1(batch runner)
  176. result.assert_outcomes(passed=2, failed=2)
  177. @pytest.mark.skip(reason="Skipped for test efficiency, but keeping for reference.")
  178. def test_parallel_2_pass(self, request, workspace, launcher_platform, pytester):
  179. result = self._run_shared_test(pytester, workspace,
  180. """
  181. class test_pass_1(EditorSharedTest):
  182. import EditorTest_That_Passes as test_module
  183. is_batchable = False
  184. class test_pass_2(EditorSharedTest):
  185. import EditorTest_That_PassesToo as test_module
  186. is_batchable = False
  187. """
  188. )
  189. # 2 Passes +1(parallel runner)
  190. result.assert_outcomes(passed=3)
  191. def test_parallel_1_pass_1_fail_1_crash(self, request, workspace, launcher_platform, pytester):
  192. result = self._run_shared_test(pytester, workspace,
  193. """
  194. class test_pass(EditorSharedTest):
  195. import EditorTest_That_Passes as test_module
  196. is_batchable = False
  197. class test_fail(EditorSharedTest):
  198. import EditorTest_That_Fails as test_module
  199. is_batchable = False
  200. class test_crash(EditorSharedTest):
  201. import EditorTest_That_Crashes as test_module
  202. is_batchable = False
  203. """
  204. )
  205. # 2 Fail, 1 Passes + 1(parallel runner)
  206. result.assert_outcomes(passed=2, failed=2)
  207. @pytest.mark.skip(reason="Skipped for test efficiency, but keeping for reference.")
  208. def test_parallel_batched_2_pass(self, request, workspace, launcher_platform, pytester):
  209. result = self._run_shared_test(pytester, workspace,
  210. """
  211. class test_pass_1(EditorSharedTest):
  212. import EditorTest_That_Passes as test_module
  213. class test_pass_2(EditorSharedTest):
  214. import EditorTest_That_PassesToo as test_module
  215. """
  216. )
  217. # 2 Passes +1(batched+parallel runner)
  218. result.assert_outcomes(passed=3)
  219. def test_parallel_batched_1_pass_1_fail_1_crash(self, request, workspace, launcher_platform, pytester):
  220. result = self._run_shared_test(pytester, workspace,
  221. """
  222. class test_pass(EditorSharedTest):
  223. import EditorTest_That_Passes as test_module
  224. class test_fail(EditorSharedTest):
  225. import EditorTest_That_Fails as test_module
  226. class test_crash(EditorSharedTest):
  227. import EditorTest_That_Crashes as test_module
  228. """
  229. )
  230. # 2 Fail, 1 Passes + 1(batched+parallel runner)
  231. result.assert_outcomes(passed=2, failed=2)
  232. def test_selection_2_deselected_1_selected(self, request, workspace, launcher_platform, pytester):
  233. result = self._run_shared_test(pytester, workspace,
  234. """
  235. class test_pass(EditorSharedTest):
  236. import EditorTest_That_Passes as test_module
  237. class test_fail(EditorSharedTest):
  238. import EditorTest_That_Fails as test_module
  239. class test_crash(EditorSharedTest):
  240. import EditorTest_That_Crashes as test_module
  241. """, extra_cmd_line=["-k", "fail"]
  242. )
  243. # 1 Fail + 1 Success(parallel runner)
  244. result.assert_outcomes(failed=1, passed=1)
  245. outcomes = result.parseoutcomes()
  246. deselected = outcomes.get("deselected")
  247. assert deselected == 2