Selaa lähdekoodia

(Continuation) Implemented automation paralellization & standarization (#1718)

Engine improvements/fixes

Fixed behavior that made the editor automated test to be sometimes stuck if lost the focus is lost.
Added support for specifying multiple tests to in batch to the editor, this is achieved by passing --runpythontest with the tests separated by ';'
Added new cmdline argument --project-user-path for overriding the user path. This allows to have multiple editors running writing logs and crash logs in different locations.
Moved responsability of exiting after a test finishes/passes out of ExecuteByFilenameAsTest, callers will use the bool return to know if the test passed.
Editor test batch and parallelization implementation:

Now the external python portion of the editor tests will be specified via test specs which will generate the test. Requiring no code. This is almost a data-driven approach.
Tests can be specified as single tests, parallel, batchable or batchable+parallel
Command line arguments for pytest to override the maximum number of editors, disable parallelization or batching.
Automated tests for testing this new editor testing utility

Signed-off-by: Garcia Ruiz <[email protected]>

Co-authored-by: Garcia Ruiz <[email protected]>
AMZN-AlexOteiza 4 vuotta sitten
vanhempi
commit
b815c203da
32 muutettua tiedostoa jossa 1653 lisäystä ja 111 poistoa
  1. 83 35
      AutomatedTesting/Gem/PythonTests/EditorPythonTestTools/editor_python_test_tools/utils.py
  2. 14 0
      AutomatedTesting/Gem/PythonTests/editor_test_testing/EditorTest_That_Crashes.py
  3. 13 0
      AutomatedTesting/Gem/PythonTests/editor_test_testing/EditorTest_That_Fails.py
  4. 15 0
      AutomatedTesting/Gem/PythonTests/editor_test_testing/EditorTest_That_Passes.py
  5. 15 0
      AutomatedTesting/Gem/PythonTests/editor_test_testing/EditorTest_That_PassesToo.py
  6. 262 0
      AutomatedTesting/Gem/PythonTests/editor_test_testing/TestSuite_Main.py
  7. 6 0
      AutomatedTesting/Gem/PythonTests/editor_test_testing/__init__.py
  8. 8 0
      AutomatedTesting/Gem/PythonTests/editor_test_testing/conftest.py
  9. 0 2
      AutomatedTesting/Gem/PythonTests/physics/C111111_RigidBody_EnablingGravityWorksUsingNotificationsPoC.py
  10. 4 2
      AutomatedTesting/Gem/PythonTests/physics/C12712453_ScriptCanvas_MultipleRaycastNode.py
  11. 6 2
      AutomatedTesting/Gem/PythonTests/physics/C17411467_AddPhysxRagdollComponent.py
  12. 0 1
      AutomatedTesting/Gem/PythonTests/physics/C24308873_CylinderShapeCollider_CollidesWithPhysXTerrain.py
  13. 3 1
      AutomatedTesting/Gem/PythonTests/physics/C4982803_Enable_PxMesh_Option.py
  14. 105 0
      AutomatedTesting/Gem/PythonTests/physics/TestSuite_Main_Test.py
  15. 5 4
      AutomatedTesting/Gem/PythonTests/prefab/PrefabLevel_OpensLevelWithEntities.py
  16. 74 14
      Code/Editor/CryEdit.cpp
  17. 2 7
      Code/Editor/MainWindow.cpp
  18. 25 3
      Code/Framework/AzCore/AzCore/Settings/SettingsRegistryMergeUtils.cpp
  19. 4 0
      Code/Framework/AzCore/AzCore/Settings/SettingsRegistryMergeUtils.h
  20. 13 15
      Code/Framework/AzFramework/AzFramework/Application/Application.cpp
  21. 2 1
      Code/Framework/AzToolsFramework/AzToolsFramework/API/EditorPythonRunnerRequestsBus.h
  22. 3 11
      Gems/EditorPythonBindings/Code/Source/PythonSystemComponent.cpp
  23. 1 1
      Gems/EditorPythonBindings/Code/Source/PythonSystemComponent.h
  24. 2 0
      Registry/application_options.setreg
  25. 2 1
      Templates/DefaultProject/Template/.gitignore
  26. 43 0
      Tools/LyTestTools/ly_test_tools/_internal/pytest_plugin/editor_test.py
  27. 10 1
      Tools/LyTestTools/ly_test_tools/_internal/pytest_plugin/test_tools_fixtures.py
  28. 7 7
      Tools/LyTestTools/ly_test_tools/launchers/platforms/base.py
  29. 3 2
      Tools/LyTestTools/ly_test_tools/launchers/platforms/win/launcher.py
  30. 777 0
      Tools/LyTestTools/ly_test_tools/o3de/editor_test.py
  31. 144 0
      Tools/LyTestTools/ly_test_tools/o3de/editor_test_utils.py
  32. 2 1
      Tools/LyTestTools/setup.py

+ 83 - 35
AutomatedTesting/Gem/PythonTests/EditorPythonTestTools/editor_python_test_tools/utils.py

@@ -11,14 +11,16 @@ import math
 import azlmbr
 import azlmbr.legacy.general as general
 import azlmbr.debug
+import json
 
 import traceback
 
+from typing import Callable, Tuple
+
 class FailFast(Exception):
     """
     Raise to stop proceeding through test steps.
     """
-
     pass
 
 
@@ -30,8 +32,8 @@ class TestHelper:
         # general.idle_wait_frames(1)
 
     @staticmethod
-    def open_level(directory, level):
-        # type: (str, ) -> None
+    def open_level(directory : str, level : str):
+        # type: (str, str) -> None
         """
         :param level: the name of the level folder in AutomatedTesting\\Physics\\
 
@@ -51,7 +53,7 @@ class TestHelper:
         general.idle_wait_frames(200)
 
     @staticmethod
-    def enter_game_mode(msgtuple_success_fail):
+    def enter_game_mode(msgtuple_success_fail : Tuple[str, str]):
         # type: (tuple) -> None
         """
         :param msgtuple_success_fail: The tuple with the expected/unexpected messages for entering game mode.
@@ -65,7 +67,7 @@ class TestHelper:
         Report.critical_result(msgtuple_success_fail, general.is_in_game_mode())
 
     @staticmethod
-    def exit_game_mode(msgtuple_success_fail):
+    def exit_game_mode(msgtuple_success_fail : Tuple[str, str]):
         # type: (tuple) -> None
         """
         :param msgtuple_success_fail: The tuple with the expected/unexpected messages for exiting game mode.
@@ -147,84 +149,130 @@ class Timeout:
     def timed_out(self):
         return time.time() > self.die_after
 
-
 class Report:
     _results = []
     _exception = None
 
     @staticmethod
-    def start_test(test_function):
+    def start_test(test_function : Callable):
+        """
+        Runs the test, outputs the report and asserts in case of failure.
+        @param: The test function to execute
+        """
+        Report._results = []
+        Report._exception = None
+        general.test_output(f"Starting test {test_function.__name__}...\n")
         try:
             test_function()
         except Exception as ex:
             Report._exception = traceback.format_exc()
-        Report.report_results(test_function)
+
+        success, report_str = Report.get_report(test_function)
+        # Print on the o3de console, for debugging purpuses
+        print(report_str)
+        # Print the report on the piped stdout of the application
+        general.test_output(report_str)
+        assert success, f"Test {test_function.__name__} failed"
 
     @staticmethod
-    def report_results(test_function):
-        success = True 
-        report = f"Report for {test_function.__name__}:\n"
+    def get_report(test_function : Callable) -> (bool, str):
+        """
+        Outputs infomation on the editor console for the test
+        @param msg: message to output
+        @return: (success, report_information) tuple
+        """
+        success = True
+        report = f"Test {test_function.__name__} finished.\nReport:\n"
+        # report_dict is a JSON that can be used to parse test run information from a external runner
+        # The regular report string is intended to be used for manual debugging
+        filename = os.path.splitext(os.path.basename(test_function.__code__.co_filename))[0] 
+        report_dict = {'name' : filename, 'success' : True, 'exception' : None}
         for result in Report._results:
             passed, info = result
             success = success and passed
+            test_result_info = ""
             if passed:
-                report += f"[SUCCESS] {info}\n"
+                test_result_info = f"[SUCCESS] {info}"
             else:
-                report += f"[FAILED ] {info}\n"
+                test_result_info = f"[FAILED ] {info}"
+            report += f"{test_result_info}\n"
         if Report._exception:
-            report += "EXCEPTION raised:\n  %s\n" % Report._exception[:-1].replace("\n", "\n  ")
+            exception_str = Report._exception[:-1].replace("\n", "\n  ")
+            report += "EXCEPTION raised:\n  %s\n" % exception_str
+            report_dict['exception'] = exception_str
             success = False
-        report += "Test result:  "
-        report += "SUCCESS" if success else "FAILURE"
-        print(report)
-        general.report_test_result(success, report)
+        report += "Test result:  " + ("SUCCESS" if success else "FAILURE")
+        report_dict['success'] = success
+        report_dict['output'] = report
+        report_json_str = json.dumps(report_dict)
+        # For helping parsing, the json will be always contained between JSON_START JSON_END
+        report += f"\nJSON_START({report_json_str})JSON_END\n"
+        return success, report
 
     @staticmethod
-    def info(msg):
+    def info(msg : str):
+        """
+        Outputs infomation on the editor console for the test
+        @param msg: message to output
+        """
         print("Info: {}".format(msg))
 
     @staticmethod
-    def success(msgtuple_success_fail):
+    def success(msgtuple_success_fail : Tuple[str, str]):
+        """
+        Given a test string tuple (success_string, failure_string), registers the test result as success
+        @param msgtuple_success_fail: Two element tuple of success and failure strings
+        """
         outcome = "Success: {}".format(msgtuple_success_fail[0])
         print(outcome)
         Report._results.append((True, outcome))
 
     @staticmethod
-    def failure(msgtuple_success_fail):
+    def failure(msgtuple_success_fail : Tuple[str, str]):
+        """
+        Given a test string tuple (success_string, failure_string), registers the test result as failed
+        @param msgtuple_success_fail: Two element tuple of success and failure strings
+        """
         outcome = "Failure: {}".format(msgtuple_success_fail[1])
         print(outcome)
         Report._results.append((False, outcome))
 
     @staticmethod
-    def result(msgtuple_success_fail, condition):
-        if not isinstance(condition, bool):
-            raise TypeError("condition argument must be a bool")
+    def result(msgtuple_success_fail : Tuple[str, str], outcome : bool):
+        """
+        Given a test string tuple (success_string, failure_string), registers the test result based on the
+        given outcome
+        @param msgtuple_success_fail: Two element tuple of success and failure strings
+        @param outcome: True or False if the result has been a sucess or failure
+        """
+        if not isinstance(outcome, bool):
+            raise TypeError("outcome argument must be a bool")
 
-        if condition:
+        if outcome:
             Report.success(msgtuple_success_fail)
         else:
             Report.failure(msgtuple_success_fail)
-        return condition
+        return outcome
 
     @staticmethod
-    def critical_result(msgtuple_success_fail, condition, fast_fail_message=None):
+    def critical_result(msgtuple_success_fail : Tuple[str, str], outcome : bool, fast_fail_message : str = None):
         # type: (tuple, bool, str) -> None
         """
-        if condition is False we will fail fast
+        if outcome is False we will fail fast
 
-        :param msgtuple_success_fail: messages to print based on the condition
-        :param condition: success (True) or failure (False)
+        :param msgtuple_success_fail: messages to print based on the outcome
+        :param outcome: success (True) or failure (False)
         :param fast_fail_message: [optional] message to include on fast fail
         """
-        if not isinstance(condition, bool):
-            raise TypeError("condition argument must be a bool")
+        if not isinstance(outcome, bool):
+            raise TypeError("outcome argument must be a bool")
 
-        if not Report.result(msgtuple_success_fail, condition):
+        if not Report.result(msgtuple_success_fail, outcome):
             TestHelper.fail_fast(fast_fail_message)
 
     # DEPRECATED: Use vector3_str()
     @staticmethod
-    def info_vector3(vector3, label="", magnitude=None):
+    def info_vector3(vector3 : azlmbr.math.Vector3, label : str ="", magnitude : float =None):
         # type: (azlmbr.math.Vector3, str, float) -> None
         """
         prints the vector to the Report.info log. If applied, label will print first,
@@ -390,4 +438,4 @@ def vector3_str(vector3):
     return "(x: {:.2f}, y: {:.2f}, z: {:.2f})".format(vector3.x, vector3.y, vector3.z)
     
 def aabb_str(aabb):
-    return "[Min: %s, Max: %s]" % (vector3_str(aabb.min), vector3_str(aabb.max))
+    return "[Min: %s, Max: %s]" % (vector3_str(aabb.min), vector3_str(aabb.max))

+ 14 - 0
AutomatedTesting/Gem/PythonTests/editor_test_testing/EditorTest_That_Crashes.py

@@ -0,0 +1,14 @@
+"""
+Copyright (c) Contributors to the Open 3D Engine Project.
+For complete copyright and license terms please see the LICENSE at the root of this distribution.
+
+SPDX-License-Identifier: Apache-2.0 OR MIT
+"""
+
+def EditorTest_That_Crashes():
+    import azlmbr.legacy.general as general
+    general.crash()
+
+if __name__ == "__main__":
+    from editor_python_test_tools.utils import Report
+    Report.start_test(EditorTest_That_Crashes)

+ 13 - 0
AutomatedTesting/Gem/PythonTests/editor_test_testing/EditorTest_That_Fails.py

@@ -0,0 +1,13 @@
+"""
+Copyright (c) Contributors to the Open 3D Engine Project.
+For complete copyright and license terms please see the LICENSE at the root of this distribution.
+
+SPDX-License-Identifier: Apache-2.0 OR MIT
+"""
+
+def EditorTest_That_Fails():
+    assert False, "This test fails on purpose to test functionality"
+
+if __name__ == "__main__":
+    from editor_python_test_tools.utils import Report
+    Report.start_test(EditorTest_That_Fails)

+ 15 - 0
AutomatedTesting/Gem/PythonTests/editor_test_testing/EditorTest_That_Passes.py

@@ -0,0 +1,15 @@
+"""
+Copyright (c) Contributors to the Open 3D Engine Project.
+For complete copyright and license terms please see the LICENSE at the root of this distribution.
+
+SPDX-License-Identifier: Apache-2.0 OR MIT
+"""
+
+import time
+
+def EditorTest_That_Passes():
+    pass
+
+if __name__ == "__main__":
+    from editor_python_test_tools.utils import Report
+    Report.start_test(EditorTest_That_Passes)

+ 15 - 0
AutomatedTesting/Gem/PythonTests/editor_test_testing/EditorTest_That_PassesToo.py

@@ -0,0 +1,15 @@
+"""
+Copyright (c) Contributors to the Open 3D Engine Project.
+For complete copyright and license terms please see the LICENSE at the root of this distribution.
+
+SPDX-License-Identifier: Apache-2.0 OR MIT
+"""
+
+import time
+
+def EditorTest_That_PassesToo():
+    pass
+
+if __name__ == "__main__":
+    from editor_python_test_tools.utils import Report
+    Report.start_test(EditorTest_That_PassesToo)

+ 262 - 0
AutomatedTesting/Gem/PythonTests/editor_test_testing/TestSuite_Main.py

@@ -0,0 +1,262 @@
+"""
+Copyright (c) Contributors to the Open 3D Engine Project.
+For complete copyright and license terms please see the LICENSE at the root of this distribution.
+
+SPDX-License-Identifier: Apache-2.0 OR MIT
+"""
+
+"""
+This suite contains the tests for editor_test utilities.
+"""
+
+import pytest
+import os
+import sys
+import importlib
+import re
+
+from ly_test_tools import LAUNCHERS
+
+sys.path.append(os.path.dirname(os.path.abspath(__file__)))
+
+from ly_test_tools.o3de.editor_test import EditorSingleTest, EditorSharedTest, EditorTestSuite, Result
+from ly_test_tools.o3de.asset_processor import AssetProcessor
+import ly_test_tools.environment.process_utils as process_utils
+
+import argparse, sys
+
[email protected]_main
[email protected]("launcher_platform", ['windows_editor'])
[email protected]("project", ["AutomatedTesting"])
+class TestEditorTest:
+    
+    args = None 
+    path = None
+    @classmethod
+    def setup_class(cls):
+        TestEditorTest.args = sys.argv.copy()
+        build_dir_arg_index = TestEditorTest.args.index("--build-directory")
+        if build_dir_arg_index < 0:
+            print("Error: Must pass --build-directory argument in order to run this test")
+            sys.exit(-2)
+
+        TestEditorTest.args[build_dir_arg_index+1] = os.path.abspath(TestEditorTest.args[build_dir_arg_index+1])
+        TestEditorTest.args.append("-s")
+        TestEditorTest.path = os.path.dirname(os.path.abspath(__file__))
+        cls._asset_processor = None
+
+    def teardown_class(cls):
+        if cls._asset_processor:
+            cls._asset_processor.stop(1)
+            cls._asset_processor.teardown()
+
+    # Test runs #
+    @classmethod
+    def _run_single_test(cls, testdir, workspace, module_name):
+        if cls._asset_processor is None:
+            if not process_utils.process_exists("AssetProcessor", ignore_extensions=True):
+                cls._asset_processor = AssetProcessor(workspace)
+                cls._asset_processor.start()
+
+        testdir.makepyfile(
+            f"""
+            import pytest
+            import os
+            import sys
+
+            from ly_test_tools import LAUNCHERS
+            from ly_test_tools.o3de.editor_test import EditorSingleTest, EditorSharedTest, EditorTestSuite
+
+            @pytest.mark.SUITE_main
+            @pytest.mark.parametrize("launcher_platform", ['windows_editor'])
+            @pytest.mark.parametrize("project", ["AutomatedTesting"])
+            class TestAutomation(EditorTestSuite):
+                class test_single(EditorSingleTest):
+                    import {module_name} as test_module
+
+            """)
+        result = testdir.runpytest(*TestEditorTest.args[2:])
+
+        def get_class(module_name):
+            class test_single(EditorSingleTest):
+                test_module = importlib.import_module(module_name)
+            return test_single
+
+        output = "".join(result.outlines)
+        extracted_results = EditorTestSuite._get_results_using_output([get_class(module_name)], output, output)
+        extracted_result = next(iter(extracted_results.items()))
+        return (extracted_result[1], result)
+    
+    def test_single_passing_test(self, request, workspace, launcher_platform, testdir):
+        (extracted_result, result) = TestEditorTest._run_single_test(testdir, workspace, "EditorTest_That_Passes")
+        result.assert_outcomes(passed=1)
+        assert isinstance(extracted_result, Result.Pass)
+
+    def test_single_failing_test(self, request, workspace, launcher_platform, testdir):
+        (extracted_result, result) = TestEditorTest._run_single_test(testdir, workspace, "EditorTest_That_Fails")        
+        result.assert_outcomes(failed=1)
+        assert isinstance(extracted_result, Result.Fail)
+
+    def test_single_crashing_test(self, request, workspace, launcher_platform, testdir):
+        (extracted_result, result) = TestEditorTest._run_single_test(testdir, workspace, "EditorTest_That_Crashes")
+        result.assert_outcomes(failed=1)
+        assert isinstance(extracted_result, Result.Unknown)
+    
+    @classmethod
+    def _run_shared_test(cls, testdir, module_class_code, extra_cmd_line=None):
+        if not extra_cmd_line:
+            extra_cmd_line = []
+
+        if cls._asset_processor is None:
+            if not process_utils.process_exists("AssetProcessor", ignore_extensions=True):
+                cls._asset_processor = AssetProcessor(workspace)
+                cls._asset_processor.start()
+
+        testdir.makepyfile(
+            f"""
+            import pytest
+            import os
+            import sys
+
+            from ly_test_tools import LAUNCHERS
+            from ly_test_tools.o3de.editor_test import EditorSingleTest, EditorSharedTest, EditorTestSuite
+
+            @pytest.mark.SUITE_main
+            @pytest.mark.parametrize("launcher_platform", ['windows_editor'])
+            @pytest.mark.parametrize("project", ["AutomatedTesting"])
+            class TestAutomation(EditorTestSuite):
+            {module_class_code}
+            """)
+        result = testdir.runpytest(*TestEditorTest.args[2:] + extra_cmd_line)
+        return result
+
+    def test_batched_two_passing(self, request, workspace, launcher_platform, testdir):
+        result = self._run_shared_test(testdir,
+            """
+                class test_pass(EditorSharedTest):
+                    import EditorTest_That_Passes as test_module
+                    is_parallelizable = False
+                
+                class test_2(EditorSharedTest):
+                    import EditorTest_That_PassesToo as test_module
+                    is_parallelizable = False
+            """
+        )
+        # 2 Passes +1(batch runner)
+        result.assert_outcomes(passed=3)
+
+    def test_batched_one_pass_one_fail(self, request, workspace, launcher_platform, testdir):
+        result = self._run_shared_test(testdir,
+            """
+                class test_pass(EditorSharedTest):
+                    import EditorTest_That_Passes as test_module
+                    is_parallelizable = False
+                
+                class test_fail(EditorSharedTest):
+                    import EditorTest_That_Fails as test_module
+                    is_parallelizable = False                
+            """
+        )
+        # 1 Fail, 1 Passes +1(batch runner)
+        result.assert_outcomes(passed=2, failed=1)
+    
+    def test_batched_one_pass_one_fail_one_crash(self, request, workspace, launcher_platform, testdir):
+        result = self._run_shared_test(testdir,
+            """
+                class test_pass(EditorSharedTest):
+                    import EditorTest_That_Passes as test_module
+                    is_parallelizable = False
+                
+                class test_fail(EditorSharedTest):
+                    import EditorTest_That_Fails as test_module
+                    is_parallelizable = False                
+                
+                class test_crash(EditorSharedTest):
+                    import EditorTest_That_Crashes as test_module
+                    is_parallelizable = False   
+            """
+        )
+        # 2 Fail, 1 Passes + 1(batch runner)
+        result.assert_outcomes(passed=2, failed=2)
+
+    def test_parallel_two_passing(self, request, workspace, launcher_platform, testdir):
+        result = self._run_shared_test(testdir,
+            """
+                class test_pass_1(EditorSharedTest):
+                    import EditorTest_That_Passes as test_module
+                    is_batchable = False
+                
+                class test_pass_2(EditorSharedTest):
+                    import EditorTest_That_PassesToo as test_module
+                    is_batchable = False
+            """
+        )
+        # 2 Passes +1(parallel runner)
+        result.assert_outcomes(passed=3)
+    
+    def test_parallel_one_passing_one_failing_one_crashing(self, request, workspace, launcher_platform, testdir):
+        result = self._run_shared_test(testdir,
+            """
+                class test_pass(EditorSharedTest):
+                    import EditorTest_That_Passes as test_module
+                    is_batchable = False
+                
+                class test_fail(EditorSharedTest):
+                    import EditorTest_That_Fails as test_module
+                    is_batchable = False                
+                
+                class test_crash(EditorSharedTest):
+                    import EditorTest_That_Crashes as test_module
+                    is_batchable = False   
+            """
+        )
+        # 2 Fail, 1 Passes + 1(parallel runner)
+        result.assert_outcomes(passed=2, failed=2)
+
+    def test_parallel_batched_two_passing(self, request, workspace, launcher_platform, testdir):
+        result = self._run_shared_test(testdir,
+            """
+                class test_pass_1(EditorSharedTest):
+                    import EditorTest_That_Passes as test_module
+                
+                class test_pass_2(EditorSharedTest):
+                    import EditorTest_That_PassesToo as test_module
+            """
+        )
+        # 2 Passes +1(batched+parallel runner)
+        result.assert_outcomes(passed=3)
+    
+    def test_parallel_batched_one_passing_one_failing_one_crashing(self, request, workspace, launcher_platform, testdir):
+        result = self._run_shared_test(testdir,
+            """
+                class test_pass(EditorSharedTest):
+                    import EditorTest_That_Passes as test_module
+                
+                class test_fail(EditorSharedTest):
+                    import EditorTest_That_Fails as test_module
+                
+                class test_crash(EditorSharedTest):
+                    import EditorTest_That_Crashes as test_module
+            """
+        )
+        # 2 Fail, 1 Passes + 1(batched+parallel runner)
+        result.assert_outcomes(passed=2, failed=2)
+
+    def test_selection_2_deselected_1_selected(self, request, workspace, launcher_platform, testdir):
+        result = self._run_shared_test(testdir,
+            """
+                class test_pass(EditorSharedTest):
+                    import EditorTest_That_Passes as test_module
+                
+                class test_fail(EditorSharedTest):
+                    import EditorTest_That_Fails as test_module
+                
+                class test_crash(EditorSharedTest):
+                    import EditorTest_That_Crashes as test_module
+            """, extra_cmd_line=["-k", "fail"]
+        )
+        # 1 Fail + 1 Success(parallel runner)
+        result.assert_outcomes(failed=1, passed=1)
+        outcomes = result.parseoutcomes()
+        deselected = outcomes.get("deselected")
+        assert deselected == 2

+ 6 - 0
AutomatedTesting/Gem/PythonTests/editor_test_testing/__init__.py

@@ -0,0 +1,6 @@
+"""
+Copyright (c) Contributors to the Open 3D Engine Project.
+For complete copyright and license terms please see the LICENSE at the root of this distribution.
+
+SPDX-License-Identifier: Apache-2.0 OR MIT
+"""

+ 8 - 0
AutomatedTesting/Gem/PythonTests/editor_test_testing/conftest.py

@@ -0,0 +1,8 @@
+"""
+Copyright (c) Contributors to the Open 3D Engine Project.
+For complete copyright and license terms please see the LICENSE at the root of this distribution.
+
+SPDX-License-Identifier: Apache-2.0 OR MIT
+"""
+
+pytest_plugins = ["pytester"]

+ 0 - 2
AutomatedTesting/Gem/PythonTests/physics/C111111_RigidBody_EnablingGravityWorksUsingNotificationsPoC.py

@@ -8,7 +8,6 @@ SPDX-License-Identifier: Apache-2.0 OR MIT
 # Test case ID : C111111
 # Test Case Title : Check that Gravity works
 
-
 # fmt:off
 class Tests:
     enter_game_mode          = ("Entered game mode",        "Failed to enter game mode")
@@ -84,7 +83,6 @@ def C111111_RigidBody_EnablingGravityWorksUsingNotificationsPoC():
     # 7) Exit game mode
     helper.exit_game_mode(Tests.exit_game_mode)
 
-
 if __name__ == "__main__":
     import ImportPathHelper as imports
     imports.init()

+ 4 - 2
AutomatedTesting/Gem/PythonTests/physics/C12712453_ScriptCanvas_MultipleRaycastNode.py

@@ -69,6 +69,9 @@ def C12712453_ScriptCanvas_MultipleRaycastNode():
 
     :return: None
     """
+    
+    # Disabled until Script Canvas merges the new backend
+    return
 
     import os
     import sys
@@ -200,5 +203,4 @@ if __name__ == "__main__":
     imports.init()
 
     from editor_python_test_tools.utils import Report
-    # Disabled until Script Canvas merges the new backend
-    #Report.start_test(C12712453_ScriptCanvas_MultipleRaycastNode)
+    Report.start_test(C12712453_ScriptCanvas_MultipleRaycastNode)

+ 6 - 2
AutomatedTesting/Gem/PythonTests/physics/C17411467_AddPhysxRagdollComponent.py

@@ -20,7 +20,7 @@ class Tests():
 # fmt: on
 
 
-def run():
+def C17411467_AddPhysxRagdollComponent():
     """
     Summary:
     Load level with Entity having Actor, AnimGraph and PhysX Ragdoll components.
@@ -93,4 +93,8 @@ def run():
 
 
 if __name__ == "__main__":
-    run()
+    import ImportPathHelper as imports
+    imports.init()
+
+    from editor_python_test_tools.utils import Report
+    Report.start_test(C17411467_AddPhysxRagdollComponent)

+ 0 - 1
AutomatedTesting/Gem/PythonTests/physics/C24308873_CylinderShapeCollider_CollidesWithPhysXTerrain.py

@@ -133,7 +133,6 @@ def C24308873_CylinderShapeCollider_CollidesWithPhysXTerrain():
     helper.exit_game_mode(Tests.exit_game_mode)
 
 
-
 if __name__ == "__main__":
     import ImportPathHelper as imports
     imports.init()

+ 3 - 1
AutomatedTesting/Gem/PythonTests/physics/C4982803_Enable_PxMesh_Option.py

@@ -22,6 +22,7 @@ class Tests():
     add_physx_shape_collider = ("Added PhysX Shape Collider",          "Failed to add PhysX Shape Collider")
     add_box_shape            = ("Added Box Shape",                     "Failed to add Box Shape")
     enter_game_mode          = ("Entered game mode",                   "Failed to enter game mode")
+    exit_game_mode           = ("Exited game mode",                    "Failed to exit game mode")
     test_collision           = ("Entity collided with terrain",        "Failed to collide with terrain")
 # fmt: on
 
@@ -123,7 +124,6 @@ def C4982803_Enable_PxMesh_Option():
         touched_ground = False
 
     terrain_id = general.find_game_entity("Terrain")
-
     def on_collision_begin(args):
         other_id = args[0]
         if other_id.Equal(terrain_id):
@@ -137,6 +137,8 @@ def C4982803_Enable_PxMesh_Option():
     helper.wait_for_condition(lambda: Collider.touched_ground, TIMEOUT)
     Report.result(Tests.test_collision, Collider.touched_ground)
 
+    # 8) Exit game mode
+    helper.exit_game_mode(Tests.exit_game_mode)
 
 if __name__ == "__main__":
     import ImportPathHelper as imports

+ 105 - 0
AutomatedTesting/Gem/PythonTests/physics/TestSuite_Main_Test.py

@@ -0,0 +1,105 @@
+"""
+Copyright (c) Contributors to the Open 3D Engine Project.
+For complete copyright and license terms please see the LICENSE at the root of this distribution.
+
+SPDX-License-Identifier: Apache-2.0 OR MIT
+"""
+
+import pytest
+import os
+import sys
+import inspect
+
+from ly_test_tools import LAUNCHERS
+from ly_test_tools.o3de.editor_test import EditorSingleTest, EditorSharedTest, EditorParallelTest, EditorTestSuite
+from .FileManagement import FileManagement as fm
+
+# Custom test spec, it provides functionality to override files
+class EditorSingleTest_WithFileOverrides(EditorSingleTest):
+    # Specify here what files to override, [(original, override), ...]
+    files_to_override = [()]
+    # Base directory of the files (Default path is {ProjectName})
+    base_dir = None
+    # True will will search sub-directories for the files in base 
+    search_subdirs = False
+
+    @classmethod
+    def wrap_run(cls, instance, request, workspace, editor, editor_test_results, launcher_platform):
+        root_path = cls.base_dir
+        if root_path is not None:
+            root_path = os.path.join(workspace.paths.engine_root(), root_path)
+        else:
+            # Default to project folder
+            root_path = workspace.paths.project()
+
+        # Try to locate both target and source files
+        original_file_list, override_file_list = zip(*cls.files_to_override)
+        try:
+            file_list = fm._find_files(original_file_list + override_file_list, root_path, cls.search_subdirs)
+        except RuntimeWarning as w:
+            assert False, (
+                w.message
+                + " Please check use of search_subdirs; make sure you are using the correct parent directory."
+            )
+
+        for f in original_file_list:
+            fm._restore_file(f, file_list[f])
+            fm._backup_file(f, file_list[f])
+        
+        for original, override in cls.files_to_override:
+            fm._copy_file(override, file_list[override], original, file_list[override])
+
+        yield # Run Test
+        for f in original_file_list:
+            fm._restore_file(f, file_list[f])
+
+
[email protected]_main
[email protected]("launcher_platform", ['windows_editor'])
[email protected]("project", ["AutomatedTesting"])
+class TestAutomation(EditorTestSuite):
+    
+    class C4044459_Material_DynamicFriction(EditorSingleTest_WithFileOverrides):
+        from . import C4044459_Material_DynamicFriction as test_module        
+        files_to_override = [
+            ('physxsystemconfiguration.setreg', 'C4044459_Material_DynamicFriction.setreg_override')
+        ]
+        base_dir = "AutomatedTesting/Registry"
+
+    class C4982593_PhysXCollider_CollisionLayerTest(EditorSingleTest_WithFileOverrides):
+        from . import C4982593_PhysXCollider_CollisionLayerTest as test_module
+        files_to_override = [
+            ('physxsystemconfiguration.setreg', 'C4982593_PhysXCollider_CollisionLayer.setreg_override')
+        ]
+        base_dir = "AutomatedTesting/Registry"
+
+    class C111111_RigidBody_EnablingGravityWorksUsingNotificationsPoC(EditorSharedTest):
+        from . import C111111_RigidBody_EnablingGravityWorksUsingNotificationsPoC as test_module
+
+    class C5932041_PhysXForceRegion_LocalSpaceForceOnRigidBodies(EditorSharedTest):
+        from . import C5932041_PhysXForceRegion_LocalSpaceForceOnRigidBodies as test_module
+
+    class C15425929_Undo_Redo(EditorSharedTest):
+        from . import C15425929_Undo_Redo as test_module
+        
+    class C4976243_Collision_SameCollisionGroupDiffCollisionLayers(EditorSharedTest):
+        from . import C4976243_Collision_SameCollisionGroupDiffCollisionLayers as test_module
+
+    class C14654881_CharacterController_SwitchLevels(EditorSharedTest):
+        from . import C14654881_CharacterController_SwitchLevels as test_module
+ 
+    class C17411467_AddPhysxRagdollComponent(EditorSharedTest):
+        from . import C17411467_AddPhysxRagdollComponent as test_module
+ 
+    class C12712453_ScriptCanvas_MultipleRaycastNode(EditorSharedTest):
+        from . import C12712453_ScriptCanvas_MultipleRaycastNode as test_module
+
+    class C18243586_Joints_HingeLeadFollowerCollide(EditorSharedTest):
+        from . import C18243586_Joints_HingeLeadFollowerCollide as test_module
+
+    class C4982803_Enable_PxMesh_Option(EditorSharedTest):
+        from . import C4982803_Enable_PxMesh_Option as test_module
+    
+    class C24308873_CylinderShapeCollider_CollidesWithPhysXTerrain(EditorSharedTest):
+        from . import C24308873_CylinderShapeCollider_CollidesWithPhysXTerrain as test_module
+    

+ 5 - 4
AutomatedTesting/Gem/PythonTests/prefab/PrefabLevel_OpensLevelWithEntities.py

@@ -46,19 +46,20 @@ def PrefabLevel_OpensLevelWithEntities():
         if entityIds[0].IsValid():
             return entityIds[0]
         return None
-#Checks for an entity called "EmptyEntity"
+    
+    # Checks for an entity called "EmptyEntity"
     helper.wait_for_condition(lambda: find_entity("EmptyEntity").IsValid(), 5.0)
     empty_entity_id = find_entity("EmptyEntity")
     Report.result(Tests.find_empty_entity, empty_entity_id.IsValid())
 
-# Checks if the EmptyEntity is in the correct position and if it fails, it will provide the expected postion and the actual postion of the entity in the Editor log
+    # Checks if the EmptyEntity is in the correct position and if it fails, it will provide the expected postion and the actual postion of the entity in the Editor log
     empty_entity_pos = azlmbr.components.TransformBus(azlmbr.bus.Event, "GetWorldTranslation", empty_entity_id)
     is_at_position = empty_entity_pos.IsClose(EXPECTED_EMPTY_ENTITY_POS)
     Report.result(Tests.empty_entity_pos, is_at_position)
     if not is_at_position:
         Report.info(f'Expected position: {EXPECTED_EMPTY_ENTITY_POS.ToString()}, actual position: {empty_entity_pos.ToString()}')
 
-#Checks for an entity called "EntityWithPxCollider" and if it has the PhysX Collider component
+    # Checks for an entity called "EntityWithPxCollider" and if it has the PhysX Collider component
     pxentity = find_entity("EntityWithPxCollider")
     Report.result(Tests.find_pxentity, pxentity.IsValid())
 
@@ -69,4 +70,4 @@ def PrefabLevel_OpensLevelWithEntities():
 if __name__ == "__main__":
 
     from editor_python_test_tools.utils import Report
-    Report.start_test (PrefabLevel_OpensLevelWithEntities)
+    Report.start_test(PrefabLevel_OpensLevelWithEntities)

+ 74 - 14
Code/Editor/CryEdit.cpp

@@ -581,6 +581,8 @@ public:
             {{"project-path", "Supplies the path to the project that the Editor should use", "project-path"}, dummyString},
             {{"engine-path", "Supplies the path to the engine", "engine-path"}, dummyString},
             {{"project-cache-path", "Path to the project cache", "project-cache-path"}, dummyString},
+            {{"project-user-path", "Path to the project user path", "project-user-path"}, dummyString},
+            {{"project-log-path", "Path to the project log path", "project-log-path"}, dummyString}
             // add dummy entries here to prevent QCommandLineParser error-ing out on cmd line args that will be parsed later
         };
 
@@ -1483,7 +1485,6 @@ struct PythonTestOutputHandler final
     {
         PythonOutputHandler::OnExceptionMessage(message);
         printf("EXCEPTION: %.*s\n", static_cast<int>(message.size()), message.data());
-        AZ::Debug::Trace::Terminate(1);
     }
 };
 
@@ -1501,34 +1502,91 @@ void CCryEditApp::RunInitPythonScript(CEditCommandLineInfo& cmdInfo)
     using namespace AzToolsFramework;
     if (cmdInfo.m_bRunPythonScript || cmdInfo.m_bRunPythonTestScript)
     {
+        // cmdInfo data is only available on startup, copy it
+        QByteArray fileStr = cmdInfo.m_strFileName.toUtf8();
+
+        // We support specifying multiple files in the cmdline by separating them with ';'
+        AZStd::vector<AZStd::string_view> fileList;
+        AzFramework::StringFunc::TokenizeVisitor(
+            fileStr.constData(),
+            [&fileList](AZStd::string_view elem)
+            {
+                fileList.push_back(elem);
+            }, ';', false /* keepEmptyStrings */
+        );
+
         if (cmdInfo.m_pythonArgs.length() > 0 || cmdInfo.m_bRunPythonTestScript)
         {
-            AZStd::vector<AZStd::string> tokens;
-            AzFramework::StringFunc::Tokenize(cmdInfo.m_pythonArgs.toUtf8().constData(), tokens, ' ');
+            QByteArray pythonArgsStr = cmdInfo.m_pythonArgs.toUtf8();
             AZStd::vector<AZStd::string_view> pythonArgs;
-            std::transform(tokens.begin(), tokens.end(), std::back_inserter(pythonArgs), [](auto& tokenData) { return tokenData.c_str(); });
+            AzFramework::StringFunc::TokenizeVisitor(pythonArgsStr.constData(),
+                [&pythonArgs](AZStd::string_view elem)
+                {
+                    pythonArgs.push_back(elem);
+                }, ' '
+            );
+
             if (cmdInfo.m_bRunPythonTestScript)
             {
-                AZStd::string pythonTestCase;
-                if (!cmdInfo.m_pythontTestCase.isEmpty())
+                // Multiple testcases can be specified them with ';', these should match the files to run
+                AZStd::vector<AZStd::string_view> testcaseList;
+                testcaseList.resize(fileList.size());
                 {
-                    pythonTestCase = cmdInfo.m_pythontTestCase.toUtf8().constData();
+                    int i = 0;
+                    AzFramework::StringFunc::TokenizeVisitor(
+                        fileStr.constData(),
+                        [&i, &testcaseList](AZStd::string_view elem)
+                        {
+                            testcaseList[i++] = (elem);
+                        }, ';', false /* keepEmptyStrings */
+                    );
                 }
 
-                EditorPythonRunnerRequestBus::Broadcast(&EditorPythonRunnerRequestBus::Events::ExecuteByFilenameAsTest, cmdInfo.m_strFileName.toUtf8().constData(), pythonTestCase, pythonArgs);
+                bool success = true;
+                auto ExecuteByFilenamesTests = [&pythonArgs, &fileList, &testcaseList, &success](EditorPythonRunnerRequests* pythonRunnerRequests)
+                {
+                    for (int i = 0; i < fileList.size(); ++i)
+                    {
+                        bool cur_success = pythonRunnerRequests->ExecuteByFilenameAsTest(fileList[i], testcaseList[i], pythonArgs);
+                        success = success && cur_success;
+                    }
+                };
+                EditorPythonRunnerRequestBus::Broadcast(ExecuteByFilenamesTests);
 
-                // Close the editor gracefully as the test has completed
-                GetIEditor()->GetDocument()->SetModifiedFlag(false);
-                QTimer::singleShot(0, qApp, &QApplication::closeAllWindows);
+                if (success)
+                {
+                    // Close the editor gracefully as the test has completed
+                    GetIEditor()->GetDocument()->SetModifiedFlag(false);
+                    QTimer::singleShot(0, qApp, &QApplication::closeAllWindows);
+                }
+                else
+                {
+                    // Close down the application with 0xF exit code indicating failure of the test
+                    AZ::Debug::Trace::Terminate(0xF);
+                }
             }
             else
             {
-                EditorPythonRunnerRequestBus::Broadcast(&EditorPythonRunnerRequestBus::Events::ExecuteByFilenameWithArgs, cmdInfo.m_strFileName.toUtf8().constData(), pythonArgs);
+                auto ExecuteByFilenamesWithArgs = [&pythonArgs, &fileList](EditorPythonRunnerRequests* pythonRunnerRequests)
+                {
+                    for (AZStd::string_view filename : fileList)
+                    {
+                        pythonRunnerRequests->ExecuteByFilenameWithArgs(filename, pythonArgs);
+                    }
+                };
+                EditorPythonRunnerRequestBus::Broadcast(ExecuteByFilenamesWithArgs);
             }
         }
         else
         {
-            EditorPythonRunnerRequestBus::Broadcast(&EditorPythonRunnerRequestBus::Events::ExecuteByFilename, cmdInfo.m_strFileName.toUtf8().constData());
+            auto ExecuteByFilenames = [&fileList](EditorPythonRunnerRequests* pythonRunnerRequests)
+            {
+                for (AZStd::string_view filename : fileList)
+                {
+                    pythonRunnerRequests->ExecuteByFilename(filename);
+                }
+            };
+            EditorPythonRunnerRequestBus::Broadcast(ExecuteByFilenames);
         }
     }
 }
@@ -2271,7 +2329,9 @@ int CCryEditApp::IdleProcessing(bool bBackgroundUpdate)
     bool bIsAppWindow = IsWindowInForeground();
     bool bActive = false;
     int res = 0;
-    if (bIsAppWindow || m_bForceProcessIdle || m_bKeepEditorActive)
+    if (bIsAppWindow || m_bForceProcessIdle || m_bKeepEditorActive
+        // Automated tests must always keep the editor active, or they can get stuck
+        || m_bAutotestMode)
     {
         res = 1;
         bActive = true;

+ 2 - 7
Code/Editor/MainWindow.cpp

@@ -278,14 +278,9 @@ namespace
         PyExit();
     }
 
-    void PyReportTest(bool success, const AZStd::string& output)
+    void PyTestOutput(const AZStd::string& output)
     {
         CCryEditApp::instance()->PrintAlways(output);
-        if (!success)
-        {
-            gEnv->retCode = 0xF; // Special error code indicating a failure in tests
-        }
-        PyExitNoPrompt();
     }
 }
 
@@ -1956,7 +1951,7 @@ namespace AzToolsFramework
             addLegacyGeneral(behaviorContext->Method("get_pane_class_names", PyGetViewPaneNames, nullptr, "Get all available class names for use with open_pane & close_pane."));
             addLegacyGeneral(behaviorContext->Method("exit", PyExit, nullptr, "Exits the editor."));
             addLegacyGeneral(behaviorContext->Method("exit_no_prompt", PyExitNoPrompt, nullptr, "Exits the editor without prompting to save first."));
-            addLegacyGeneral(behaviorContext->Method("report_test_result", PyReportTest, nullptr, "Report test information."));
+            addLegacyGeneral(behaviorContext->Method("test_output", PyTestOutput, nullptr, "Report test information."));
         }
     }
 }

+ 25 - 3
Code/Framework/AzCore/AzCore/Settings/SettingsRegistryMergeUtils.cpp

@@ -561,9 +561,24 @@ namespace AZ::SettingsRegistryMergeUtils
             AZ::IO::FixedMaxPath normalizedProjectPath = path.LexicallyNormal();
             registry.Set(FilePathKey_ProjectPath, normalizedProjectPath.Native());
 
-            // Add an alias to the project "user" directory
-            AZ::IO::FixedMaxPath projectUserPath = (normalizedProjectPath / "user").LexicallyNormal();
+            // Set the user directory with the provided path or using project/user as default
+            auto projectUserPathKey = FixedValueString::format("%s/project_user_path", BootstrapSettingsRootKey);
+            AZ::IO::FixedMaxPath projectUserPath;
+            if (!registry.Get(projectUserPath.Native(), projectUserPathKey))
+            {
+                projectUserPath = (normalizedProjectPath / "user").LexicallyNormal();
+            }
             registry.Set(FilePathKey_ProjectUserPath, projectUserPath.Native());
+
+            // Set the user directory with the provided path or using project/user as default
+            auto projectLogPathKey = FixedValueString::format("%s/project_log_path", BootstrapSettingsRootKey);
+            AZ::IO::FixedMaxPath projectLogPath;
+            if (!registry.Get(projectLogPath.Native(), projectLogPathKey))
+            {
+                projectLogPath = (projectUserPath / "log").LexicallyNormal();
+            }
+            registry.Set(FilePathKey_ProjectLogPath, projectLogPath.Native());
+
             // check for a default write storage path, fall back to the project's user/ directory if not
             AZStd::optional<AZ::IO::FixedMaxPathString> devWriteStorage = Utils::GetDevWriteStoragePath();
             registry.Set(FilePathKey_DevWriteStorage, devWriteStorage.has_value()
@@ -948,7 +963,14 @@ namespace AZ::SettingsRegistryMergeUtils
             OptionKeyToRegsetKey{
                 "project-cache-path",
                 AZStd::string::format("%s/project_cache_path", AZ::SettingsRegistryMergeUtils::BootstrapSettingsRootKey)},
-            OptionKeyToRegsetKey{"project-build-path", ProjectBuildPath} };
+            OptionKeyToRegsetKey{
+                "project-user-path",
+                AZStd::string::format("%s/project_user_path", AZ::SettingsRegistryMergeUtils::BootstrapSettingsRootKey)},
+            OptionKeyToRegsetKey{
+                "project-log-path",
+                AZStd::string::format("%s/project_log_path", AZ::SettingsRegistryMergeUtils::BootstrapSettingsRootKey)},
+            OptionKeyToRegsetKey{"project-build-path", ProjectBuildPath},
+        };
 
         AZStd::fixed_vector<AZStd::string, commandOptions.size()> overrideArgs;
 

+ 4 - 0
Code/Framework/AzCore/AzCore/Settings/SettingsRegistryMergeUtils.h

@@ -44,6 +44,10 @@ namespace AZ::SettingsRegistryMergeUtils
     //! project settings can be stored
     inline static constexpr char FilePathKey_ProjectUserPath[] = "/Amazon/AzCore/Runtime/FilePaths/SourceProjectUserPath";
 
+    //! Store the absolute path to the Projects "log" directory, which is a transient directory where per user
+    //! logs can be stored. By default this would be on "{FilePathKey_ProjectUserPath}/log"
+    inline static constexpr char FilePathKey_ProjectLogPath[] = "/Amazon/AzCore/Runtime/FilePaths/SourceProjectLogPath";
+
     //! User facing key which represents the root of a project cmake build tree. i.e the ${CMAKE_BINARY_DIR}
     //! A relative path is taking relative to the *project* root, NOT *engine* root.
     inline constexpr AZStd::string_view ProjectBuildPath = "/Amazon/Project/Settings/Build/project_build_path";

+ 13 - 15
Code/Framework/AzFramework/AzFramework/Application/Application.cpp

@@ -707,25 +707,23 @@ namespace AzFramework
                 }
             }
 
-            if (AZ::IO::FixedMaxPath projectUserPath;
-                m_settingsRegistry->Get(projectUserPath.Native(), AZ::SettingsRegistryMergeUtils::FilePathKey_ProjectUserPath))
+            AZ::IO::FixedMaxPath engineRoot = GetEngineRoot();
+            AZ::IO::FixedMaxPath projectUserPath;
+            if (!m_settingsRegistry->Get(projectUserPath.Native(), AZ::SettingsRegistryMergeUtils::FilePathKey_ProjectUserPath))
             {
-                fileIoBase->SetAlias("@user@", projectUserPath.c_str());
-                AZ::IO::FixedMaxPath projectLogPath = projectUserPath / "log";
-                fileIoBase->SetAlias("@log@", projectLogPath.c_str());
-                fileIoBase->CreatePath(projectLogPath.c_str()); // Create the log directory at this point
-
-                CreateUserCache(projectUserPath, *fileIoBase);
+                projectUserPath = engineRoot / "user";
             }
-            else
+            fileIoBase->SetAlias("@user@", projectUserPath.c_str());
+            fileIoBase->CreatePath(projectUserPath.c_str());
+            CreateUserCache(projectUserPath, *fileIoBase);
+
+            AZ::IO::FixedMaxPath projectLogPath;
+            if (!m_settingsRegistry->Get(projectLogPath.Native(), AZ::SettingsRegistryMergeUtils::FilePathKey_ProjectLogPath))
             {
-                AZ::IO::FixedMaxPath fallbackLogPath = GetEngineRoot();
-                fallbackLogPath /= "user";
-                fileIoBase->SetAlias("@user@", fallbackLogPath.c_str());
-                fallbackLogPath /= "log";
-                fileIoBase->SetAlias("@log@", fallbackLogPath.c_str());
-                fileIoBase->CreatePath(fallbackLogPath.c_str());
+                projectLogPath = projectUserPath / "log";
             }
+            fileIoBase->SetAlias("@log@", projectLogPath.c_str());
+            fileIoBase->CreatePath(projectLogPath.c_str());
         }
     }
 

+ 2 - 1
Code/Framework/AzToolsFramework/AzToolsFramework/API/EditorPythonRunnerRequestsBus.h

@@ -35,11 +35,12 @@ namespace AzToolsFramework
             [[maybe_unused]] AZStd::string_view filename, [[maybe_unused]] const AZStd::vector<AZStd::string_view>& args) {}
 
         //! executes a Python script as a test
-        virtual void ExecuteByFilenameAsTest(
+        virtual bool ExecuteByFilenameAsTest(
             [[maybe_unused]] AZStd::string_view filename,
             [[maybe_unused]] AZStd::string_view testCase,
             [[maybe_unused]] const AZStd::vector<AZStd::string_view>& args)
         {
+            return false;
         }
     };
     using EditorPythonRunnerRequestBus = AZ::EBus<EditorPythonRunnerRequests>;

+ 3 - 11
Gems/EditorPythonBindings/Code/Source/PythonSystemComponent.cpp

@@ -650,21 +650,13 @@ namespace EditorPythonBindings
         ExecuteByFilenameWithArgs(filename, args);
     }
 
-    void PythonSystemComponent::ExecuteByFilenameAsTest(AZStd::string_view filename, AZStd::string_view testCase, const AZStd::vector<AZStd::string_view>& args)
+    bool PythonSystemComponent::ExecuteByFilenameAsTest(AZStd::string_view filename, AZStd::string_view testCase, const AZStd::vector<AZStd::string_view>& args)
     {
+        AZ_TracePrintf("python", "Running automated test: %.*s (testcase %.*s)", AZ_STRING_ARG(filename), AZ_STRING_ARG(testCase))
         AzToolsFramework::EditorPythonScriptNotificationsBus::Broadcast(
             &AzToolsFramework::EditorPythonScriptNotificationsBus::Events::OnStartExecuteByFilenameAsTest, filename, testCase, args);
         const Result evalResult = EvaluateFile(filename, args);
-        if (evalResult == Result::Okay)
-        {
-            // all good, the test script will need to exit the application now
-            return;
-        }
-        else
-        {
-            // something went wrong with executing the test script
-            AZ::Debug::Trace::Terminate(0xF);
-        }
+        return evalResult == Result::Okay;
     }
 
     void PythonSystemComponent::ExecuteByFilenameWithArgs(AZStd::string_view filename, const AZStd::vector<AZStd::string_view>& args)

+ 1 - 1
Gems/EditorPythonBindings/Code/Source/PythonSystemComponent.h

@@ -55,7 +55,7 @@ namespace EditorPythonBindings
         void ExecuteByString(AZStd::string_view script, bool printResult) override;
         void ExecuteByFilename(AZStd::string_view filename) override;
         void ExecuteByFilenameWithArgs(AZStd::string_view filename, const AZStd::vector<AZStd::string_view>& args) override;
-        void ExecuteByFilenameAsTest(AZStd::string_view filename, AZStd::string_view testCase, const AZStd::vector<AZStd::string_view>& args) override;
+        bool ExecuteByFilenameAsTest(AZStd::string_view filename, AZStd::string_view testCase, const AZStd::vector<AZStd::string_view>& args) override;
         ////////////////////////////////////////////////////////////////////////
         
     private:

+ 2 - 0
Registry/application_options.setreg

@@ -7,6 +7,8 @@
                     "engine-path",
                     "project-cache-path",
                     "project-build-path",
+                    "project-user-path",
+                    "project-log-path",
                     "regset",
                     "regremove",
                     "regdump",

+ 2 - 1
Templates/DefaultProject/Template/.gitignore

@@ -1,3 +1,4 @@
 [Bb]uild/
 [Cc]ache/
-[Uu]ser/
+[Uu]ser/
+[Uu]ser_test*/

+ 43 - 0
Tools/LyTestTools/ly_test_tools/_internal/pytest_plugin/editor_test.py

@@ -0,0 +1,43 @@
+"""
+Copyright (c) Contributors to the Open 3D Engine Project.
+For complete copyright and license terms please see the LICENSE at the root of this distribution.
+
+SPDX-License-Identifier: Apache-2.0 OR MIT
+"""
+
+"""
+Utility for specifying an Editor test, supports seamless parallelization and/or batching of tests.
+"""
+
+import pytest
+import inspect
+
+__test__ = False
+
+def pytest_addoption(parser):
+    parser.addoption("--no-editor-batch", action="store_true", help="Don't batch multiple tests in single editor")
+    parser.addoption("--no-editor-parallel", action="store_true", help="Don't run multiple editors in parallel")
+    parser.addoption("--parallel-editors", type=int, action="store", help="Override the number editors to run at the same time")
+
+# Create a custom custom item collection if the class defines pytest_custom_makeitem function
+# This is used for automtically generating test functions with a custom collector
+def pytest_pycollect_makeitem(collector, name, obj):
+    if inspect.isclass(obj):
+        for base in obj.__bases__:
+            if hasattr(base, "pytest_custom_makeitem"):
+                return base.pytest_custom_makeitem(collector, name, obj)
+
+# Add custom modification of items.
+# This is used for adding the runners into the item list
[email protected](hookwrapper=True)
+def pytest_collection_modifyitems(session, items, config):
+    all_classes = set()
+    for item in items:
+        all_classes.add(item.instance.__class__)
+
+    yield
+
+    for cls in all_classes:
+        if hasattr(cls, "pytest_custom_modify_items"):
+            cls.pytest_custom_modify_items(session, items, config)
+                

+ 10 - 1
Tools/LyTestTools/ly_test_tools/_internal/pytest_plugin/test_tools_fixtures.py

@@ -46,7 +46,6 @@ def pytest_addoption(parser):
                      help="An existing CMake binary output directory which contains the lumberyard executables,"
                           "such as: D:/ly/dev/windows_vs2017/bin/profile/")
 
-
 def pytest_configure(config):
     """
     Save custom CLI options during Pytest configuration, so they are later accessible without using fixtures
@@ -57,6 +56,13 @@ def pytest_configure(config):
     ly_test_tools._internal.pytest_plugin.output_path = _get_output_path(config)
 
 
+def pytest_pycollect_makeitem(collector, name, obj):
+    import inspect
+    if inspect.isclass(obj):
+        for base in obj.__bases__:
+            if hasattr(base, "pytest_custom_makeitem"):
+                return base.pytest_custom_makeitem(collector, name, obj)
+
 def _get_build_directory(config):
     """
     Fetch and verify the cmake build directory CLI arg, without creating an error when unset
@@ -359,6 +365,9 @@ def _workspace(request,  # type: _pytest.fixtures.SubRequest
                ):
     """Separate implementation to call directly during unit tests"""
 
+    # Convert build directory to absolute path in case it was provided as relative path
+    build_directory = os.path.abspath(build_directory)
+
     workspace = helpers.create_builtin_workspace(
         build_directory=build_directory,
         project=project,

+ 7 - 7
Tools/LyTestTools/ly_test_tools/launchers/platforms/base.py

@@ -66,7 +66,7 @@ class Launcher(object):
 
         return config_dict
 
-    def setup(self, backupFiles=True, launch_ap=True):
+    def setup(self, backupFiles=True, launch_ap=True, configure_settings=True):
         """
         Perform setup of this launcher, must be called before launching.
         Subclasses should call its parent's setup() before calling its own code, unless it changes configuration files
@@ -180,7 +180,7 @@ class Launcher(object):
         """
         raise NotImplementedError("There is no binary file for this launcher")
 
-    def start(self, backupFiles=True, launch_ap=None):
+    def start(self, backupFiles=True, launch_ap=None, configure_settings=True):
         """
         Automatically prepare and launch the application
         When called using "with launcher.start():" it will automatically call stop() when block exits
@@ -188,16 +188,16 @@ class Launcher(object):
 
         :return: Application wrapper for context management, not intended to be called directly
         """
-        return _Application(self, backupFiles, launch_ap=launch_ap)
+        return _Application(self, backupFiles, launch_ap=launch_ap, configure_settings=configure_settings)
 
-    def _start_impl(self, backupFiles = True, launch_ap=None):
+    def _start_impl(self, backupFiles = True, launch_ap=None, configure_settings=True):
         """
         Implementation of start(), intended to be called via context manager in _Application
 
         :param backupFiles: Bool to backup settings files
         :return None:
         """
-        self.setup(backupFiles=backupFiles, launch_ap=launch_ap)
+        self.setup(backupFiles=backupFiles, launch_ap=launch_ap, configure_settings=configure_settings)
         self.launch()
 
     def stop(self):
@@ -313,7 +313,7 @@ class _Application(object):
     """
     Context-manager for opening an application, enables using both "launcher.start()" and "with launcher.start()"
     """
-    def __init__(self, launcher, backupFiles = True, launch_ap=None):
+    def __init__(self, launcher, backupFiles = True, launch_ap=None, configure_settings=True):
         """
         Called during both "launcher.start()" and "with launcher.start()"
 
@@ -321,7 +321,7 @@ class _Application(object):
         :return None:
         """
         self.launcher = launcher
-        launcher._start_impl(backupFiles, launch_ap)
+        launcher._start_impl(backupFiles, launch_ap, configure_settings)
 
     def __enter__(self):
         """

+ 3 - 2
Tools/LyTestTools/ly_test_tools/launchers/platforms/win/launcher.py

@@ -38,7 +38,7 @@ class WinLauncher(Launcher):
         assert self.workspace.project is not None
         return os.path.join(self.workspace.paths.build_directory(), f"{self.workspace.project}.GameLauncher.exe")
 
-    def setup(self, backupFiles=True, launch_ap=True):
+    def setup(self, backupFiles=True, launch_ap=True, configure_settings=True):
         """
         Perform setup of this launcher, must be called before launching.
         Subclasses should call its parent's setup() before calling its own code, unless it changes configuration files
@@ -56,7 +56,8 @@ class WinLauncher(Launcher):
             launch_ap = True
 
         # Modify and re-configure
-        self.configure_settings()
+        if configure_settings:
+            self.configure_settings()
         super(WinLauncher, self).setup(backupFiles, launch_ap)
 
     def launch(self):

+ 777 - 0
Tools/LyTestTools/ly_test_tools/o3de/editor_test.py

@@ -0,0 +1,777 @@
+"""
+Copyright (c) Contributors to the Open 3D Engine Project.
+For complete copyright and license terms please see the LICENSE at the root of this distribution.
+
+SPDX-License-Identifier: Apache-2.0 OR MIT
+"""
+
+import pytest
+import inspect
+from typing import List
+from abc import ABC
+from inspect import getmembers, isclass
+import os, sys
+import threading
+import inspect
+import math
+import json
+import logging
+import types
+import functools
+import re
+
+import ly_test_tools.environment.file_system as file_system
+import ly_test_tools.environment.waiter as waiter
+import ly_test_tools.environment.process_utils as process_utils
+
+from ly_test_tools.o3de.asset_processor import AssetProcessor
+from ly_test_tools.launchers.exceptions import WaitTimeoutError
+from . import editor_test_utils as editor_utils
+
+# This file provides editor testing functionality to easily write automated editor tests for O3DE.
+# For using these utilities, you can subclass your test suite from EditorTestSuite, this allows an easy way of specifying 
+# python test scripts that the editor will run without needing to write any boilerplace code.
+# It supports out of the box parallelization(running multiple editor instances at once), batching(running multiple tests in the same editor instance) and
+# crash detection.
+# Usage example:
+#    class MyTestSuite(EditorTestSuite):
+#   
+#        class MyFirstTest(EditorSingleTest):
+#            from . import script_to_be_run_by_editor as test_module
+#   
+#        class MyTestInParallel_1(EditorParallelTest):
+#            from . import another_script_to_be_run_by_editor as test_module
+#        
+#        class MyTestInParallel_2(EditorParallelTest):
+#            from . import yet_another_script_to_be_run_by_editor as test_module
+#
+#
+# EditorTestSuite does introspection of the defined classes inside of it and automatically prepares the tests, parallelizing/batching as required
+
+# This file contains no tests, but with this we make sure it won't be picked up by the runner since the file ends with _test
+__test__ = False
+
+# Abstract base class for an editor test.
+class EditorTestBase(ABC):
+    # Maximum time for run, in seconds
+    timeout = 180
+    # Test file that this test will run
+    test_module = None
+
+# Test that will be run alone in one editor
+class EditorSingleTest(EditorTestBase):
+    # Extra cmdline arguments to supply to the editor for the test
+    extra_cmdline_args = []
+
+    # Custom setup function, will run before the test
+    @staticmethod
+    def setup(instance, request, workspace, editor, editor_test_results, launcher_platform):
+        pass
+
+    # Custom run wrapping. The code before yield will run before the test, and after the yield after the test
+    @staticmethod
+    def wrap_run(instance, request, workspace, editor, editor_test_results, launcher_platform):
+        yield
+
+    # Custom teardown function, will run after the test    
+    @staticmethod
+    def teardown(instance, request, workspace, editor, editor_test_results, launcher_platform):
+        pass
+
+# Test that will be both be run in parallel and batched with eachother in a single editor.
+# Does not support per test setup/teardown for avoiding any possible race conditions
+class EditorSharedTest(EditorTestBase):
+    # Specifies if the test can be batched in the same editor
+    is_batchable = True
+    # Specifies if the test can be run in multiple editors in parallel
+    is_parallelizable = True
+
+# Test that will be only run in parallel editors.
+class EditorParallelTest(EditorSharedTest):
+    is_batchable = False
+    is_parallelizable = True
+
+# Test that will be batched along with the other batched tests in the same editor.
+class EditorBatchedTest(EditorSharedTest):
+    is_batchable = True
+    is_parallelizable = False
+
+class Result:
+    class Base:
+        def get_output_str(self):
+            if hasattr(self, "output") and self.output is not None:
+                return self.output
+            else:
+                return "-- No output --"
+            
+        def get_editor_log_str(self):
+            if hasattr(self, "editor_log") and self.editor_log is not None:
+                return self.editor_log
+            else:
+                return "-- No editor log found --"
+
+    class Pass(Base):
+        @classmethod
+        def create(cls, output : str, editor_log : str):
+            r = cls()
+            r.output = output
+            r.editor_log = editor_log
+            return r
+
+        def __str__(self):
+            output = (
+                f"Test Passed\n"
+                f"------------\n"
+                f"|  Output  |\n"
+                f"------------\n"
+                f"{self.get_output_str()}\n"
+            )
+            return output
+
+    class Fail(Base):       
+        @classmethod
+        def create(cls, output, editor_log : str):
+            r = cls()
+            r.output = output
+            r.editor_log = editor_log
+            return r
+            
+        def __str__(self):
+            output = (
+                f"Test FAILED\n"
+                f"------------\n"
+                f"|  Output  |\n"
+                f"------------\n"
+                f"{self.get_output_str()}\n"
+                f"--------------\n"
+                f"| Editor log |\n"
+                f"--------------\n"
+                f"{self.get_editor_log_str()}\n"
+            )
+            return output
+
+    class Crash(Base):
+        @classmethod
+        def create(cls, output : str, ret_code : int, stacktrace : str, editor_log : str):
+            r = cls()
+            r.output = output
+            r.ret_code = ret_code
+            r.stacktrace = stacktrace
+            r.editor_log = editor_log
+            return r
+            
+        def __str__(self):
+            stacktrace_str = "-- No stacktrace data found --" if not self.stacktrace else self.stacktrace
+            output = (
+                f"Test CRASHED, return code {hex(self.ret_code)}\n"
+                f"---------------\n"
+                f"|  Stacktrace |\n"
+                f"---------------\n"
+                f"{stacktrace_str}"
+                f"------------\n"
+                f"|  Output  |\n"
+                f"------------\n"
+                f"{self.get_output_str()}\n"
+                f"--------------\n"
+                f"| Editor log |\n"
+                f"--------------\n"
+                f"{self.get_editor_log_str()}\n"
+            )
+            crash_str = "-- No crash information found --"
+            return output
+
+    class Timeout(Base):
+        @classmethod
+        def create(cls, output : str, time_secs : float, editor_log : str):
+            r = cls()
+            r.output = output
+            r.time_secs = time_secs
+            r.editor_log = editor_log
+            return r
+            
+        def __str__(self):
+            output = (
+                f"Test TIMED OUT after {self.time_secs} seconds\n"
+                f"------------\n"
+                f"|  Output  |\n"
+                f"------------\n"
+                f"{self.get_output_str()}\n"
+                f"--------------\n"
+                f"| Editor log |\n"
+                f"--------------\n"
+                f"{self.get_editor_log_str()}\n"
+            )
+            return output
+
+    class Unknown(Base):
+        @classmethod
+        def create(cls, output : str, extra_info : str, editor_log : str):
+            r = cls()
+            r.output = output
+            r.editor_log = editor_log
+            r.extra_info = extra_info
+            return r
+            
+        def __str__(self):
+            output = (
+                f"Unknown test result, possible cause: {self.extra_info}\n"
+                f"------------\n"
+                f"|  Output  |\n"
+                f"------------\n"
+                f"{self.get_output_str()}\n"
+                f"--------------\n"
+                f"| Editor log |\n"
+                f"--------------\n"
+                f"{self.get_editor_log_str()}\n"
+            )
+            return output
+
[email protected]("crash_log_watchdog", [("raise_on_crash", False)])
+class EditorTestSuite():
+    #- Configurable params -#
+    
+    # Extra cmdline arguments to supply for every editor instance for this test suite
+    global_extra_cmdline_args = ["-BatchMode", "-autotest_mode"]
+    # Tests usually run with no renderer, however some tests require a renderer 
+    use_null_renderer = True
+    # Maximum time for a single editor to stay open on a shared test
+    timeout_editor_shared_test = 300
+
+    # Function to calculate number of editors to run in parallel, this can be overriden by the user
+    @staticmethod
+    def get_number_parallel_editors():
+        return 8
+
+    ## Internal ##
+    _TIMEOUT_CRASH_LOG = 20 # Maximum time (seconds) for waiting for a crash file, in secondss
+    _TEST_FAIL_RETCODE = 0xF # Return code for test failure 
+    _asset_processor = None
+    _results = {}
+
+    @pytest.fixture(scope="class")
+    def editor_test_results(self, request):
+        results = {}
+        return results
+
+    class Runner():
+        def __init__(self, name, func, tests):
+            self.name = name
+            self.func = func
+            self.tests = tests
+            self.run_pytestfunc = None
+            self.result_pytestfuncs = []
+
+    # Custom collector class. This collector is where the magic happens, it programatically adds the test functions
+    # to the class based on the test specifications used in the TestSuite class.
+    class EditorTestClass(pytest.Class):
+
+        def collect(self):
+            cls = self.obj
+            # This collector does the following:
+            # 1) Iterates through all the EditorSingleTest subclasses defined inside the suite.
+            #    With these, it adds a test function to the suite per each, that will run the test using the specs
+            # 2) Iterates through all the EditorSharedTest subclasses defined inside the suite.
+            #    The subclasses then are grouped based on the specs in by 3 categories: 
+            #    batched, parallel and batched+parallel. 
+            #    Each category will have a test runner function associated that will run all the tests of the category,
+            #    then a result function will be added for every test, which will pass/fail based on what happened in the previos
+            #    runner function
+
+            # Decorator function to add extra lookup information for the test functions
+            def set_marks(marks):
+                def spec_impl(func):
+                    @functools.wraps(func)
+                    def inner(*args, **argv):
+                        return func(*args, **argv)
+                    inner.marks = marks
+                    return inner
+                return spec_impl
+
+            # Retrieve the test specs
+            single_tests = self.obj.get_single_tests()            
+            shared_tests = self.obj.get_shared_tests()
+            batched_tests = cls.filter_shared_tests(shared_tests, is_batchable=True)
+            parallel_tests = cls.filter_shared_tests(shared_tests, is_parallelizable=True)
+            parallel_batched_tests = cls.filter_shared_tests(shared_tests, is_parallelizable=True, is_batchable=True)
+
+            # If user provides option to not parallelize/batch the tests, move them into single tests
+            no_parallelize = self.config.getoption("--no-editor-parallel", default=False)
+            no_batch = self.config.getoption("--no-editor-batch", default=False)
+            if no_parallelize:
+                single_tests += parallel_tests
+                parallel_tests = []
+                batched_tests += parallel_batched_tests
+                parallel_batched_tests = []
+            if no_batch:
+                single_tests += batched_tests
+                batched_tests = []
+                parallel_tests += parallel_batched_tests
+                parallel_batched_tests = []
+
+            # Add the single tests, these will run normally
+            for test_spec in single_tests:
+                name = test_spec.__name__
+                def make_test_func(name, test_spec):
+                    @set_marks({"run_type" : "run_single"})
+                    def single_run(self, request, workspace, editor, editor_test_results, launcher_platform):
+                        # only single tests are allowed to have setup/teardown, however we can have shared tests that
+                        # were explicitly set as single, for example via cmdline argument override
+                        is_single_test = issubclass(test_spec, EditorSingleTest)
+                        if is_single_test:
+                            # Setup step for wrap_run
+                            wrap = test_spec.wrap_run(self, request, workspace, editor, editor_test_results, launcher_platform)
+                            assert isinstance(wrap, types.GeneratorType), "wrap_run must return a generator, did you forget 'yield'?"
+                            next(wrap, None)
+                            # Setup step                        
+                            test_spec.setup(self, request, workspace, editor, editor_test_results, launcher_platform)
+                        # Run
+                        self._run_single_test(request, workspace, editor, editor_test_results, test_spec)
+                        if is_single_test:
+                            # Teardown
+                            test_spec.teardown(self, request, workspace, editor, editor_test_results, launcher_platform)
+                            # Teardown step for wrap_run
+                            next(wrap, None)
+                    return single_run
+                setattr(self.obj, name, make_test_func(name, test_spec))
+
+            # Add the shared tests, for these we will create a runner class for storing the run information
+            # that will be later used for selecting what tests runners will be run
+            runners = []
+
+            def create_runner(name, function, tests):
+                runner = EditorTestSuite.Runner(name, function, tests)
+                def make_func():
+                    @set_marks({"runner" : runner, "run_type" : "run_shared"})
+                    def shared_run(self, request, workspace, editor, editor_test_results, launcher_platform):
+                        getattr(self, function.__name__)(request, workspace, editor, editor_test_results, runner.tests)
+                    return shared_run
+                setattr(self.obj, name, make_func())
+                
+                # Add the shared tests results, these just succeed/fail based what happened on the Runner.
+                for test_spec in tests:
+                    def make_func(test_spec):
+                        @set_marks({"runner" : runner, "test_spec" : test_spec, "run_type" : "result"})
+                        def result(self, request, workspace, editor, editor_test_results, launcher_platform):
+                            # The runner must have filled the editor_test_results dict fixture for this test.
+                            # Hitting this assert could mean if there was an error executing the runner
+                            assert test_spec.__name__ in editor_test_results, f"No run data for test: {test_spec.__name__}."
+                            cls._report_result(test_spec.__name__, editor_test_results[test_spec.__name__])
+                        return result
+                    
+                    result_func = make_func(test_spec)
+                    setattr(self.obj, test_spec.__name__, result_func)
+                runners.append(runner)
+            
+            create_runner("run_batched_tests", cls._run_batched_tests, batched_tests)
+            create_runner("run_parallel_tests", cls._run_parallel_tests, parallel_tests)
+            create_runner("run_parallel_batched_tests", cls._run_parallel_batched_tests, parallel_batched_tests)
+
+            # Now that we have added all the functions to the class, we will run
+            # a class test collection to retrieve all the tests.
+            instance = super().collect()[0]
+
+            # Override the istestfunction for the object, with this we make sure that the
+            # runners are always collected, even if they don't follow the "test_" naming
+            original_istestfunction = instance.istestfunction
+            def istestfunction(self, obj, name):
+                ret = original_istestfunction(obj, name)
+                if not ret:
+                    ret = hasattr(obj, "marks")
+                return ret
+            instance.istestfunction = types.MethodType(istestfunction, instance)
+            collection = instance.collect()
+            def get_func_run_type(f):
+                return getattr(f, "marks", {}).setdefault("run_type", None)
+
+            collected_run_pytestfuncs = [
+                item for item in collection if get_func_run_type(item.obj) == "run_shared"
+            ]
+            collected_result_pytestfuncs = [
+                item for item in collection if get_func_run_type(item.obj) == "result"
+            ]
+            # We'll remove and store the runner functions for later, this way they won't 
+            # be deselected by any filtering mechanism. The result functions for these we are actually
+            # interested on them to be filtered to tell what is the final subset of tests to run
+            collection = [
+                item for item in collection if item not in (collected_run_pytestfuncs)
+            ]
+                            
+            # Match each generated pytestfunctions with every runner and store them 
+            for run_pytestfunc in collected_run_pytestfuncs:
+                runner = run_pytestfunc.function.marks["runner"]
+                runner.run_pytestfunc = run_pytestfunc
+            
+            for result_pytestfunc in collected_result_pytestfuncs:
+                runner = result_pytestfunc.function.marks["runner"]
+                runner.result_pytestfuncs.append(result_pytestfunc)
+
+            self.obj._runners = runners
+            return collection
+
+
+    @staticmethod
+    def pytest_custom_makeitem(collector, name, obj):
+        return EditorTestSuite.EditorTestClass(name, collector)
+
+    @classmethod
+    def pytest_custom_modify_items(cls, session, items, config):
+        # Add here the runners functions and filter the tests that will be run.
+        # The runners will be added if they have any selected tests
+        new_items = []
+        for runner in cls._runners:
+            runner.tests[:] = cls.filter_session_shared_tests(items, runner.tests)
+            if len(runner.tests) > 0:
+                new_items.append(runner.run_pytestfunc)
+                # Re-order dependent tests so they are run just after the runner
+                for result_pytestfunc in runner.result_pytestfuncs:
+                    found_test = next((item for item in items if item == result_pytestfunc), None)
+                    if found_test:
+                        items.remove(found_test)
+                        new_items.append(found_test)
+
+        items[:] = items + new_items
+
+    @classmethod
+    def get_single_tests(cls):
+        single_tests = [c[1] for c in cls.__dict__.items() if inspect.isclass(c[1]) and issubclass(c[1], EditorSingleTest)]
+        return single_tests
+        
+    @classmethod
+    def get_shared_tests(cls):
+        shared_tests = [c[1] for c in cls.__dict__.items() if inspect.isclass(c[1]) and issubclass(c[1], EditorSharedTest)]
+        return shared_tests
+
+    @classmethod
+    def get_session_shared_tests(cls, session):
+        shared_tests = cls.get_shared_tests()
+        return cls.filter_session_shared_tests(session, shared_tests)
+
+    @staticmethod
+    def filter_session_shared_tests(session_items, shared_tests):
+        # Retrieve the test sub-set that was collected
+        # this can be less than the original set if were overriden via -k argument or similars
+        collected_elem_names = [test.originalname for test in session_items]
+        selected_shared_tests = [test for test in shared_tests if test.__name__ in collected_elem_names]
+        return selected_shared_tests
+        
+    @staticmethod
+    def filter_shared_tests(shared_tests, is_batchable=False, is_parallelizable=False):
+        # Retrieve the test sub-set that was collected
+        # this can be less than the original set if were overriden via -k argument or similars
+        return [
+            t for t in shared_tests if (
+                getattr(t, "is_batchable", None) is is_batchable
+                and
+                getattr(t, "is_parallelizable", None) is is_parallelizable
+            )
+        ]
+
+    def setup_class(cls):
+        cls._asset_processor = None
+    
+    def teardown_class(cls):
+        if cls._asset_processor:
+            cls._asset_processor.stop(1)
+            cls._asset_processor.teardown()
+            cls._asset_processor = None
+            editor_utils.kill_all_ly_processes(include_asset_processor=True)
+        else:
+            editor_utils.kill_all_ly_processes(include_asset_processor=False)
+
+    ### Utils ###
+
+    # Prepares the asset processor for the test
+    def _prepare_asset_processor(self, workspace):
+        try:
+            # Start-up an asset processor if we are not running one
+            # If another AP process exist, don't kill it, as we don't own it
+            if self._asset_processor is None:
+                if not process_utils.process_exists("AssetProcessor", ignore_extensions=True):
+                    editor_utils.kill_all_ly_processes()
+                    self._asset_processor = AssetProcessor(workspace)
+                    self._asset_processor.start()
+                else:
+                    editor_utils.kill_all_ly_processes(include_asset_processor=False)
+            else:
+                # Make sure the asset processor from before wasn't closed by accident
+                self._asset_processor.start()
+        except Exception as ex:
+            self._asset_processor = None
+            raise ex
+
+    def _setup_editor_test(self, editor, workspace):
+        self._prepare_asset_processor(workspace)
+        editor_utils.kill_all_ly_processes(include_asset_processor=False)
+        editor.configure_settings()
+
+    # Utility function for parsing the output information from the editor.
+    # It deserializes the JSON content printed in the output for every test and returns that information.
+    @staticmethod
+    def _get_results_using_output(test_spec_list, output, editor_log_content):
+        results = {}
+        pattern = re.compile(r"JSON_START\((.+?)\)JSON_END")
+        out_matches = pattern.finditer(output)
+        found_jsons = {}
+        for m in out_matches:
+            try:
+                elem = json.loads(m.groups()[0])
+                found_jsons[elem["name"]] = elem
+            except Exception:
+                continue # Avoid to fail if the output data is corrupt
+        
+        # Try to find the element in the log, this is used for cutting the log contents later
+        log_matches = pattern.finditer(editor_log_content)
+        for m in log_matches:
+            try:
+                elem = json.loads(m.groups()[0])
+                if elem["name"] in found_jsons:
+                    found_jsons[elem["name"]]["log_match"] = m
+            except Exception:
+                continue # Avoid to fail if the log data is corrupt
+
+        log_start = 0
+        for test_spec in test_spec_list:
+            name = editor_utils.get_module_filename(test_spec.test_module)
+            if name not in found_jsons.keys():
+                results[test_spec.__name__] = Result.Unknown.create(output, "Couldn't find any test run information on stdout", editor_log_content)
+            else:
+                result = None
+                json_result = found_jsons[name]
+                json_output = json_result["output"]
+
+                # Cut the editor log so it only has the output for this run
+                m = json_result["log_match"]
+                end = m.end() if test_spec != test_spec_list[-1] else -1
+                cur_log = editor_log_content[log_start : end]
+                log_start = end
+
+                if json_result["success"]:
+                    result = Result.Pass.create(json_output, cur_log)
+                else:
+                    result = Result.Fail.create(json_output, cur_log)
+                results[test_spec.__name__] = result
+
+        return results
+
+    # Fails the test if the test result is not a PASS, specifying the information
+    @staticmethod
+    def _report_result(name : str, result : Result.Base):
+        if isinstance(result, Result.Pass):
+            output_str = f"Test {name}:\n{str(result)}"
+            print(output_str)
+        else:
+            error_str = f"Test {name}:\n{str(result)}"
+            pytest.fail(error_str)
+
+    ### Running tests ###
+    # Starts the editor with the given test and retuns an result dict with a single element specifying the result
+    def _exec_editor_test(self, request, workspace, editor, run_id : int, log_name : str,
+                          test_spec : EditorTestBase, cmdline_args : List[str] = []):
+
+        test_cmdline_args = self.global_extra_cmdline_args + cmdline_args
+        if self.use_null_renderer:
+            test_cmdline_args += ["-rhi=null"]
+            
+        # Cycle any old crash report in case it wasn't cycled properly
+        editor_utils.cycle_crash_report(run_id, workspace)
+
+        test_result = None
+        results = {}
+        test_filename = editor_utils.get_testcase_module_filepath(test_spec.test_module)
+        cmdline = [
+            "--runpythontest", test_filename,
+            "-logfile", f"@log@/{log_name}",
+            "-project-log-path", editor_utils.retrieve_log_path(run_id, workspace)] + test_cmdline_args
+        editor.args.extend(cmdline)
+        editor.start(backupFiles = False, launch_ap = False, configure_settings=False)
+
+        try:
+            editor.wait(test_spec.timeout)
+            output = editor.get_output()
+            return_code = editor.get_returncode()
+            editor_log_content = editor_utils.retrieve_editor_log_content(run_id, log_name, workspace)
+
+            if return_code == 0:
+                test_result = Result.Pass.create(output, editor_log_content)
+            else:
+                has_crashed = return_code != EditorTestSuite._TEST_FAIL_RETCODE
+                if has_crashed:
+                    test_result = Result.Crash.create(output, return_code, editor_utils.retrieve_crash_output(run_id, workspace, self._TIMEOUT_CRASH_LOG), None)
+                    editor_utils.cycle_crash_report(run_id, workspace)
+                else:
+                    test_result = Result.Fail.create(output, editor_log_content)
+        except WaitTimeoutError:
+            editor.kill()            
+            editor_log_content = editor_utils.retrieve_editor_log_content(run_id, log_name, workspace)
+            test_result = Result.Timeout.create(output, test_spec.timeout, editor_log_content)
+    
+        editor_log_content = editor_utils.retrieve_editor_log_content(run_id, log_name, workspace)
+        results = self._get_results_using_output([test_spec], output, editor_log_content)
+        results[test_spec.__name__] = test_result
+        return results
+
+    # Starts an editor executable with a list of tests and returns a dict of the result of every test ran within that editor
+    # instance. In case of failure this function also parses the editor output to find out what specific tests that failed
+    def _exec_editor_multitest(self, request, workspace, editor, run_id : int, log_name : str,
+                               test_spec_list : List[EditorTestBase], cmdline_args=[]):
+
+        test_cmdline_args = self.global_extra_cmdline_args + cmdline_args
+        if self.use_null_renderer:
+            test_cmdline_args += ["-rhi=null"]
+            
+        # Cycle any old crash report in case it wasn't cycled properly
+        editor_utils.cycle_crash_report(run_id, workspace)
+
+        results = {}
+        test_filenames_str = ";".join(editor_utils.get_testcase_module_filepath(test_spec.test_module) for test_spec in test_spec_list)
+        cmdline = [
+            "--runpythontest", test_filenames_str,
+            "-logfile", f"@log@/{log_name}",
+            "-project-log-path", editor_utils.retrieve_log_path(run_id, workspace)] + test_cmdline_args
+
+        editor.args.extend(cmdline)
+        editor.start(backupFiles = False, launch_ap = False, configure_settings=False)
+
+        output = ""
+        editor_log_content = ""
+        try:
+            editor.wait(self.timeout_editor_shared_test)
+            output = editor.get_output()
+            return_code = editor.get_returncode()
+            editor_log_content = editor_utils.retrieve_editor_log_content(run_id, log_name, workspace)
+
+            if return_code == 0:
+                # No need to scrap the output, as all the tests have passed
+                for test_spec in test_spec_list:
+                    results[test_spec.__name__] = Result.Pass.create(output, editor_log_content)
+            else:
+                results = self._get_results_using_output(test_spec_list, output, editor_log_content)
+                has_crashed = return_code != EditorTestSuite._TEST_FAIL_RETCODE
+                if has_crashed:
+                    crashed_test = None
+                    for key, result in results.items():
+                        if isinstance(result, Result.Unknown):
+                            if not crashed_test:
+                                crash_error = editor_utils.retrieve_crash_output(run_id, workspace, self._TIMEOUT_CRASH_LOG)
+                                editor_utils.cycle_crash_report(run_id, workspace)
+                                results[key] = Result.Crash.create(output, return_code, crash_error, result.editor_log)
+                                crashed_test = results[key]
+                            else:
+                                results[key] = Result.Unknown.create(output, f"This test has unknown result, test '{crashed_test.__name__}' crashed before this test could be executed", result.editor_log)
+
+        except WaitTimeoutError:
+            results = self._get_results_using_output(test_spec_list, output, editor_log_content)
+            editor_log_content = editor_utils.retrieve_editor_log_content(run_id, log_name, workspace)
+            editor.kill()
+            for key, result in results.items():
+                if isinstance(result, Result.Unknown):
+                    results[key] = Result.Timeout.create(result.output, total_timeout, result.editor_log)
+
+        return results
+    
+    # Runs a single test with the given specs, used by the collector to register the test
+    def _run_single_test(self, request, workspace, editor, editor_test_results, test_spec : EditorTestBase):
+        self._setup_editor_test(editor, workspace)
+        extra_cmdline_args = []
+        if hasattr(test_spec, "extra_cmdline_args"):
+            extra_cmdline_args = test_spec.extra_cmdline_args
+
+        results = self._exec_editor_test(request, workspace, editor, 1, "editor_test.log", test_spec, extra_cmdline_args)
+        if not hasattr(self.__class__, "_results"):
+            self.__class__._results = {}
+
+        editor_test_results.update(results)
+        test_name, test_result = next(iter(results.items()))
+        self._report_result(test_name, test_result)
+
+    # Runs a batch of tests in one single editor with the given spec list
+    def _run_batched_tests(self, request, workspace, editor, editor_test_results, test_spec_list : List[EditorTestBase], extra_cmdline_args=[]):
+        if not test_spec_list:
+            return
+
+        self._setup_editor_test(editor, workspace)
+        results = self._exec_editor_multitest(request, workspace, editor, 1, "editor_test.log", test_spec_list, extra_cmdline_args)
+        assert results is not None
+        editor_test_results.update(results)
+
+    # Runs multiple editors with one test on each editor
+    def _run_parallel_tests(self, request, workspace, editor, editor_test_results, test_spec_list : List[EditorTestBase], extra_cmdline_args=[]):
+        if not test_spec_list:
+            return
+
+        self._setup_editor_test(editor, workspace)
+        parallel_editors = self._get_number_parallel_editors(request)
+        assert parallel_editors > 0, "Must have at least one editor"
+        
+        # If there are more tests than max parallel editors, we will split them into multiple consecutive runs
+        num_iterations = int(math.ceil(len(test_spec_list) / parallel_editors))
+        for iteration in range(num_iterations):
+            tests_for_iteration = test_spec_list[iteration*parallel_editors:(iteration+1)*parallel_editors]
+            total_threads = len(tests_for_iteration)
+            threads = []
+            results_per_thread = [None] * total_threads
+            for i in range(total_threads):
+                def make_func(test_spec, index, my_editor):
+                    def run(request, workspace, extra_cmdline_args):
+                        results = self._exec_editor_test(request, workspace, my_editor, index+1, f"editor_test.log", test_spec, extra_cmdline_args)
+                        assert results is not None
+                        results_per_thread[index] = results
+                    return run
+
+                # Duplicate the editor using the one coming from the fixture
+                cur_editor = editor.__class__(workspace, editor.args.copy())
+                f = make_func(tests_for_iteration[i], i, cur_editor)
+                t = threading.Thread(target=f, args=(request, workspace, extra_cmdline_args))
+                t.start()
+                threads.append(t)
+
+            for t in threads:
+                t.join()
+
+            for result in results_per_thread:
+                editor_test_results.update(result)
+
+    # Runs multiple editors with a batch of tests for each editor
+    def _run_parallel_batched_tests(self, request, workspace, editor, editor_test_results, test_spec_list : List[EditorTestBase], extra_cmdline_args=[]):
+        if not test_spec_list:
+            return
+
+        self._setup_editor_test(editor, workspace)
+        total_threads = self._get_number_parallel_editors(request)
+        assert total_threads > 0, "Must have at least one editor"
+        threads = []
+        tests_per_editor = int(math.ceil(len(test_spec_list) / total_threads))
+        results_per_thread = [None] * total_threads
+        for i in range(total_threads):
+            tests_for_thread = test_spec_list[i*tests_per_editor:(i+1)*tests_per_editor]
+            def make_func(test_spec_list_for_editor, index, my_editor):
+                def run(request, workspace, extra_cmdline_args):
+                    results = None
+                    if len(test_spec_list_for_editor) > 0:
+                        results = self._exec_editor_multitest(request, workspace, my_editor, index+1, f"editor_test.log", test_spec_list_for_editor, extra_cmdline_args)
+                        assert results is not None
+                    else:
+                        results = {}
+                    results_per_thread[index] = results
+                return run
+
+            # Duplicate the editor using the one coming from the fixture
+            cur_editor = editor.__class__(workspace, editor.args.copy())
+            f = make_func(tests_for_thread, i, cur_editor)
+            t = threading.Thread(target=f, args=(request, workspace, extra_cmdline_args))
+            t.start()
+            threads.append(t)
+
+        for t in threads:
+            t.join()
+
+        for result in results_per_thread:
+            editor_test_results.update(result)
+
+    # Retrieves the number of parallel preference cmdline overrides
+    def _get_number_parallel_editors(self, request):
+        parallel_editors_value = request.config.getoption("parallel_editors", None)
+        if parallel_editors_value:
+            return int(parallel_editors_value)
+
+        return self.get_number_parallel_editors()

+ 144 - 0
Tools/LyTestTools/ly_test_tools/o3de/editor_test_utils.py

@@ -0,0 +1,144 @@
+"""
+Copyright (c) Contributors to the Open 3D Engine Project.
+For complete copyright and license terms please see the LICENSE at the root of this distribution.
+
+SPDX-License-Identifier: Apache-2.0 OR MIT
+"""
+
+import os
+import time
+import logging
+
+import ly_test_tools.environment.process_utils as process_utils
+import ly_test_tools.environment.waiter as waiter
+
+logger = logging.getLogger(__name__)
+
+def kill_all_ly_processes(include_asset_processor=True):
+    LY_PROCESSES = [
+        'Editor', 'Profiler', 'RemoteConsole',
+    ]
+    AP_PROCESSES = [
+        'AssetProcessor', 'AssetProcessorBatch', 'AssetBuilder'
+    ]
+    
+    if include_asset_processor:
+        process_utils.kill_processes_named(LY_PROCESSES+AP_PROCESSES, ignore_extensions=True)
+    else:
+        process_utils.kill_processes_named(LY_PROCESSES, ignore_extensions=True)
+
+def get_testcase_module_filepath(testcase_module):
+    # type: (Module) -> str
+    """
+    return the full path of the test module using always '.py' extension
+    :param testcase_module: The testcase python module being tested
+    :return str: The full path to the testcase module
+    """
+    return os.path.splitext(testcase_module.__file__)[0] + ".py"
+
+def get_module_filename(testcase_module):
+    # type: (Module) -> str
+    """
+    return The filename of the module without path
+    Note: This is differs from module.__name__ in the essence of not having the package directory.
+    for example: /mylibrary/myfile.py will be "myfile" instead of "mylibrary.myfile"
+    :param testcase_module: The testcase python module being tested
+    :return str: Filename of the module
+    """
+    return os.path.splitext(os.path.basename(testcase_module.__file__))[0]
+
+def retrieve_log_path(run_id : int, workspace):
+    """
+    return the log/ project path for this test run.
+    :param run_id: editor id that will be used for differentiating paths
+    :param workspace: Workspace fixture
+    :return str: The full path to the given editor the log/ path
+    """
+    return os.path.join(workspace.paths.project(), "user", f"log_test_{run_id}")
+
+def retrieve_crash_output(run_id : int, workspace, timeout : float):
+    """
+    returns the crash output string for the given test run.
+    :param run_id: editor id that will be used for differentiating paths
+    :param workspace: Workspace fixture
+    :timeout: Maximum time (seconds) to wait for crash output file to appear
+    :return str: The contents of the editor crash file (error.log)
+    """
+    crash_info = "-- No crash log available --"
+    crash_log = os.path.join(retrieve_log_path(run_id, workspace), 'error.log')
+    try:
+        waiter.wait_for(lambda: os.path.exists(crash_log), timeout=timeout)
+    except AssertionError:                    
+        pass
+        
+    # Even if the path didn't exist, we are interested on the exact reason why it couldn't be read
+    try:
+        with open(crash_log) as f:
+            crash_info = f.read()
+    except Exception as ex:
+        crash_info += f"\n{str(ex)}"
+    return crash_info
+
+def cycle_crash_report(run_id : int, workspace):
+    """
+    Attempts to rename error.log and error.dmp(crash files) into new names with the timestamp on it.
+    :param run_id: editor id that will be used for differentiating paths
+    :param workspace: Workspace fixture
+    """
+    log_path = retrieve_log_path(run_id, workspace)
+    files_to_cycle = ['error.log', 'error.dmp']
+    for filename in files_to_cycle:
+        filepath = os.path.join(log_path, filename)
+        name, ext = os.path.splitext(filename)
+        if os.path.exists(filepath):
+            try:
+                modTimesinceEpoc = os.path.getmtime(filepath)
+                modStr = time.strftime('%Y_%m_%d_%H_%M_%S', time.localtime(modTimesinceEpoc))
+                new_filepath = os.path.join(log_path, f'{name}_{modStr}{ext}')
+                os.rename(filepath, new_filepath)
+            except Exception as ex:
+                logger.warning(f"Couldn't cycle file {filepath}. Error: {str(ex)}")
+
+def retrieve_editor_log_content(run_id : int, log_name : str, workspace, timeout=10):
+    """
+    Retrieves the contents of the given editor log file.
+    :param run_id: editor id that will be used for differentiating paths
+    :param workspace: Workspace fixture
+    :timeout: Maximum time to wait for the log file to appear
+    :return str: The contents of the log
+    """
+    editor_info = "-- No editor log available --"
+    editor_log = os.path.join(retrieve_log_path(run_id, workspace), log_name)
+    try:
+        waiter.wait_for(lambda: os.path.exists(editor_log), timeout=timeout)
+    except AssertionError:              
+        pass
+        
+    # Even if the path didn't exist, we are interested on the exact reason why it couldn't be read
+    try:
+        with open(editor_log) as f:
+            editor_info = ""
+            for line in f:
+                editor_info += f"[editor.log]  {line}"
+    except Exception as ex:
+        editor_info = f"-- Error reading editor.log: {str(ex)} --"
+    return editor_info
+
+def retrieve_last_run_test_index_from_output(test_spec_list, output : str):
+    """
+    Finds out what was the last test that was run by inspecting the input.
+    This is used for determining what was the batched test has crashed the editor
+    :param test_spec_list: List of tests that were run in this editor
+    :output: Editor output to inspect
+    :return: Index in the given test_spec_list of the last test that ran
+    """
+    index = -1
+    find_pos = 0
+    for test_spec in test_spec_list:
+        find_pos = output.find(test_spec.__name__, find_pos)
+        if find_pos == -1:
+            index = max(index, 0) # <- if we didn't even find the first test, assume its been the first one that crashed
+            return index
+        else:
+            index += 1
+    return index

+ 2 - 1
Tools/LyTestTools/setup.py

@@ -47,7 +47,8 @@ if __name__ == '__main__':
             'pytest11': [
                 'ly_test_tools=ly_test_tools._internal.pytest_plugin.test_tools_fixtures',
                 'testrail_filter=ly_test_tools._internal.pytest_plugin.case_id',
-                'terminal_report=ly_test_tools._internal.pytest_plugin.terminal_report'
+                'terminal_report=ly_test_tools._internal.pytest_plugin.terminal_report',
+                'editor_test=ly_test_tools._internal.pytest_plugin.editor_test'
             ],
         },
     )