TestSuite_Main.py 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298
  1. """
  2. Copyright (c) Contributors to the Open 3D Engine Project.
  3. For complete copyright and license terms please see the LICENSE at the root of this distribution.
  4. SPDX-License-Identifier: Apache-2.0 OR MIT
  5. This suite contains the tests for editor_test utilities.
  6. """
  7. import pytest
  8. import os
  9. import sys
  10. import importlib
  11. import unittest.mock as mock
  12. import ly_test_tools
  13. import ly_test_tools.environment.process_utils as process_utils
  14. from ly_test_tools.o3de.editor_test import EditorSingleTest, EditorSharedTest, EditorTestSuite
  15. from ly_test_tools.o3de.multi_test_framework import Result
  16. from ly_test_tools.o3de.asset_processor import AssetProcessor
  17. sys.path.append(os.path.dirname(os.path.abspath(__file__)))
  18. def get_editor_launcher_platform():
  19. if ly_test_tools.WINDOWS:
  20. return "windows_editor"
  21. elif ly_test_tools.LINUX:
  22. return "linux_editor"
  23. else:
  24. return None
  25. # Other plugins can create cross-object reference issues due these tests executing nonstandard pytest-within-pytest
  26. @mock.patch('ly_test_tools._internal.pytest_plugin.terminal_report._add_ownership', mock.MagicMock())
  27. @pytest.mark.parametrize("launcher_platform", [get_editor_launcher_platform()])
  28. @pytest.mark.parametrize("project", ["AutomatedTesting"])
  29. class TestEditorTest:
  30. args = []
  31. assetprocessor_extra_params = None
  32. path = None
  33. @classmethod
  34. def setup_class(cls):
  35. # Copy all args except for the python interpreter and module file
  36. for arg in sys.argv:
  37. if not arg.endswith(".py"):
  38. TestEditorTest.args.append(arg)
  39. if "--build-directory" in TestEditorTest.args:
  40. # passed as two args, flag and value
  41. build_dir_arg_index = TestEditorTest.args.index("--build-directory")
  42. TestEditorTest.args[build_dir_arg_index + 1] = os.path.abspath(
  43. TestEditorTest.args[build_dir_arg_index + 1])
  44. else:
  45. # may instead be passed as one arg which includes equals-sign between flag and value
  46. build_dir_arg_index = 0
  47. for arg in TestEditorTest.args:
  48. if arg.startswith("--build-directory"):
  49. first, second = arg.split("=", maxsplit=1)
  50. TestEditorTest.args[build_dir_arg_index] = f'{first}={os.path.abspath(second)}'
  51. break
  52. build_dir_arg_index += 1
  53. if build_dir_arg_index == len(TestEditorTest.args):
  54. raise ValueError(f"Must pass --build-directory argument in order to run this test. Found args: {TestEditorTest.args}")
  55. TestEditorTest.args.append("-s")
  56. TestEditorTest.path = os.path.dirname(os.path.abspath(__file__))
  57. cls._asset_processor = None
  58. # Custom cleanup
  59. def teardown_class(cls):
  60. if cls._asset_processor:
  61. cls._asset_processor.stop(1)
  62. cls._asset_processor.teardown()
  63. # Test runs #
  64. @classmethod
  65. def _run_single_test(cls, pytester, workspace, module_name):
  66. # Keep the AP open for all tests
  67. if cls._asset_processor is None:
  68. if not process_utils.process_exists("AssetProcessor", ignore_extensions=True):
  69. cls._asset_processor = AssetProcessor(workspace)
  70. cls._asset_processor.start(extra_params=cls.assetprocessor_extra_params)
  71. pytester.makepyfile(
  72. f"""
  73. import pytest
  74. import os
  75. import sys
  76. from ly_test_tools import LAUNCHERS
  77. from ly_test_tools.o3de.editor_test import EditorSingleTest, EditorSharedTest, EditorTestSuite
  78. @pytest.mark.SUITE_main
  79. @pytest.mark.parametrize("launcher_platform", ['{get_editor_launcher_platform()}'])
  80. @pytest.mark.parametrize("project", ["AutomatedTesting"])
  81. class TestAutomation(EditorTestSuite):
  82. class test_single(EditorSingleTest):
  83. import {module_name} as test_module
  84. """)
  85. result = pytester.runpytest(*TestEditorTest.args)
  86. def get_class(module_name):
  87. class test_single(EditorSingleTest):
  88. test_module = importlib.import_module(module_name)
  89. return test_single
  90. output = "".join(result.outlines)
  91. extracted_results = EditorTestSuite._get_results_using_output([get_class(module_name)], output, output)
  92. extracted_result = next(iter(extracted_results.items()))
  93. return extracted_result[1], result
  94. @pytest.mark.skip(reason="Skipped for test efficiency, but keeping for reference.")
  95. def test_single_pass_test(self, request, workspace, launcher_platform, pytester):
  96. (extracted_result, result) = TestEditorTest._run_single_test(pytester, workspace, "EditorTest_That_Passes")
  97. result.assert_outcomes(passed=1)
  98. assert isinstance(extracted_result, Result.Pass)
  99. def test_single_fail_test(self, request, workspace, launcher_platform, pytester):
  100. (extracted_result, result) = TestEditorTest._run_single_test(pytester, workspace, "EditorTest_That_Fails")
  101. result.assert_outcomes(failed=1)
  102. assert isinstance(extracted_result, Result.Fail)
  103. def test_single_crash_test(self, request, workspace, launcher_platform, pytester):
  104. (extracted_result, result) = TestEditorTest._run_single_test(pytester, workspace, "EditorTest_That_Crashes")
  105. result.assert_outcomes(failed=1)
  106. # TODO: For the python 3.10.5 update on windows, a crashed test results in a fail, but on linux it results in an Unknown
  107. # We will need to investigate the appropriate assertion here
  108. assert isinstance(extracted_result, Result.Unknown) or isinstance(extracted_result, Result.Fail)
  109. @classmethod
  110. def _run_shared_test(cls, pytester, workspace, module_class_code, extra_cmd_line=None):
  111. if not extra_cmd_line:
  112. extra_cmd_line = []
  113. # Keep the AP open for all tests
  114. if cls._asset_processor is None:
  115. if not process_utils.process_exists("AssetProcessor", ignore_extensions=True):
  116. cls._asset_processor = AssetProcessor(workspace)
  117. cls._asset_processor.start(extra_params=cls.assetprocessor_extra_params)
  118. pytester.makepyfile(
  119. f"""
  120. import pytest
  121. import os
  122. import sys
  123. from ly_test_tools import LAUNCHERS
  124. from ly_test_tools.o3de.editor_test import EditorSingleTest, EditorSharedTest, EditorTestSuite
  125. @pytest.mark.SUITE_main
  126. @pytest.mark.parametrize("launcher_platform", ['{get_editor_launcher_platform()}'])
  127. @pytest.mark.parametrize("project", ["AutomatedTesting"])
  128. class TestAutomation(EditorTestSuite):
  129. {module_class_code}
  130. """)
  131. result = pytester.runpytest(*TestEditorTest.args + extra_cmd_line)
  132. return result
  133. # Here and throughout- the batch/parallel runner counts towards pytest's Passes, so we must include it in the asserts
  134. @pytest.mark.skip(reason="Skipped for test efficiency, but keeping for reference.")
  135. def test_batched_2_pass(self, request, workspace, launcher_platform, pytester):
  136. result = self._run_shared_test(pytester, workspace,
  137. """
  138. class test_pass(EditorSharedTest):
  139. import EditorTest_That_Passes as test_module
  140. is_parallelizable = False
  141. class test_2(EditorSharedTest):
  142. import EditorTest_That_PassesToo as test_module
  143. is_parallelizable = False
  144. """
  145. )
  146. # 2 Passes +1(batch runner)
  147. result.assert_outcomes(passed=3)
  148. @pytest.mark.skip(reason="Skipped for test efficiency, but keeping for reference.")
  149. def test_batched_1_pass_1_fail(self, request, workspace, launcher_platform, pytester):
  150. result = self._run_shared_test(pytester, workspace,
  151. """
  152. class test_pass(EditorSharedTest):
  153. import EditorTest_That_Passes as test_module
  154. is_parallelizable = False
  155. class test_fail(EditorSharedTest):
  156. import EditorTest_That_Fails as test_module
  157. is_parallelizable = False
  158. """
  159. )
  160. # 1 Fail, 1 Passes +1(batch runner)
  161. result.assert_outcomes(passed=2, failed=1)
  162. def test_batched_1_pass_1_fail_1_crash(self, request, workspace, launcher_platform, pytester):
  163. result = self._run_shared_test(pytester, workspace,
  164. """
  165. class test_pass(EditorSharedTest):
  166. import EditorTest_That_Passes as test_module
  167. is_parallelizable = False
  168. class test_fail(EditorSharedTest):
  169. import EditorTest_That_Fails as test_module
  170. is_parallelizable = False
  171. class test_crash(EditorSharedTest):
  172. import EditorTest_That_Crashes as test_module
  173. is_parallelizable = False
  174. """
  175. )
  176. # 2 Fail, 1 Passes + 1(batch runner)
  177. result.assert_outcomes(passed=2, failed=2)
  178. @pytest.mark.skip(reason="Skipped for test efficiency, but keeping for reference.")
  179. def test_parallel_2_pass(self, request, workspace, launcher_platform, pytester):
  180. result = self._run_shared_test(pytester, workspace,
  181. """
  182. class test_pass_1(EditorSharedTest):
  183. import EditorTest_That_Passes as test_module
  184. is_batchable = False
  185. class test_pass_2(EditorSharedTest):
  186. import EditorTest_That_PassesToo as test_module
  187. is_batchable = False
  188. """
  189. )
  190. # 2 Passes +1(parallel runner)
  191. result.assert_outcomes(passed=3)
  192. def test_parallel_1_pass_1_fail_1_crash(self, request, workspace, launcher_platform, pytester):
  193. result = self._run_shared_test(pytester, workspace,
  194. """
  195. class test_pass(EditorSharedTest):
  196. import EditorTest_That_Passes as test_module
  197. is_batchable = False
  198. class test_fail(EditorSharedTest):
  199. import EditorTest_That_Fails as test_module
  200. is_batchable = False
  201. class test_crash(EditorSharedTest):
  202. import EditorTest_That_Crashes as test_module
  203. is_batchable = False
  204. """
  205. )
  206. # 2 Fail, 1 Passes + 1(parallel runner)
  207. result.assert_outcomes(passed=2, failed=2)
  208. @pytest.mark.skip(reason="Skipped for test efficiency, but keeping for reference.")
  209. def test_parallel_batched_2_pass(self, request, workspace, launcher_platform, pytester):
  210. result = self._run_shared_test(pytester, workspace,
  211. """
  212. class test_pass_1(EditorSharedTest):
  213. import EditorTest_That_Passes as test_module
  214. class test_pass_2(EditorSharedTest):
  215. import EditorTest_That_PassesToo as test_module
  216. """
  217. )
  218. # 2 Passes +1(batched+parallel runner)
  219. result.assert_outcomes(passed=3)
  220. def test_parallel_batched_1_pass_1_fail_1_crash(self, request, workspace, launcher_platform, pytester):
  221. result = self._run_shared_test(pytester, workspace,
  222. """
  223. class test_pass(EditorSharedTest):
  224. import EditorTest_That_Passes as test_module
  225. class test_fail(EditorSharedTest):
  226. import EditorTest_That_Fails as test_module
  227. class test_crash(EditorSharedTest):
  228. import EditorTest_That_Crashes as test_module
  229. """
  230. )
  231. # 2 Fail, 1 Passes + 1(batched+parallel runner)
  232. result.assert_outcomes(passed=2, failed=2)
  233. def test_selection_2_deselected_1_selected(self, request, workspace, launcher_platform, pytester):
  234. result = self._run_shared_test(pytester, workspace,
  235. """
  236. class test_pass(EditorSharedTest):
  237. import EditorTest_That_Passes as test_module
  238. class test_fail(EditorSharedTest):
  239. import EditorTest_That_Fails as test_module
  240. class test_crash(EditorSharedTest):
  241. import EditorTest_That_Crashes as test_module
  242. """, extra_cmd_line=["-k", "fail"]
  243. )
  244. # 1 Fail + 1 Success(parallel runner)
  245. result.assert_outcomes(failed=1, passed=1)
  246. outcomes = result.parseoutcomes()
  247. deselected = outcomes.get("deselected")
  248. assert deselected == 2