Browse Source

Clean up EditorTest and set environment-aware default parallelism (#9987)

* Clean up EditorTest and set environment-aware default parallelism

Signed-off-by: Sweeney <[email protected]>
Kadino 3 years ago
parent
commit
1ec4916bfc

+ 12 - 6
Tools/LyTestTools/ly_test_tools/_internal/pytest_plugin/editor_test.py

@@ -4,24 +4,29 @@ For complete copyright and license terms please see the LICENSE at the root of t
 
 SPDX-License-Identifier: Apache-2.0 OR MIT
 
-Utility for specifying an Editor test, supports seamless parallelization and/or batching of tests. This is not a set of
-tools to directly invoke, but a plugin with functions intended to be called by only the Pytest framework.
+PyTest plugin for collecting an EditorTest, and setting commandline parameters. Intended to only be called by PyTest.
 """
 from __future__ import annotations
 import pytest
 import inspect
+import ly_test_tools.o3de.editor_test
 
 __test__ = False
 
+
 def pytest_addoption(parser: argparse.ArgumentParser) -> None:
     """
     Options when running editor tests in batches or parallel.
     :param parser: The ArgumentParser object
     :return: None
     """
-    parser.addoption("--no-editor-batch", action="store_true", help="Don't batch multiple tests in single editor")
-    parser.addoption("--no-editor-parallel", action="store_true", help="Don't run multiple editors in parallel")
-    parser.addoption("--editors-parallel", type=int, action="store", help="Override the number editors to run at the same time")
+    parser.addoption("--no-editor-batch", action="store_true", help="Disable batching multiple tests within each single editor")
+    parser.addoption("--no-editor-parallel", action="store_true", help="Disable running multiple editors in parallel")
+    parser.addoption("--editors-parallel", type=int, action="store",
+                     help="Override the number editors to run at the same time. Tests can override also this value, "
+                          f"which has a higher precedence than this option. Default value is: "
+                          f"{ly_test_tools.o3de.editor_test.EditorTestSuite.get_number_parallel_editors()}")
+
 
 def pytest_pycollect_makeitem(collector: PyCollector, name: str, obj: object) -> PyCollector:
     """
@@ -37,8 +42,9 @@ def pytest_pycollect_makeitem(collector: PyCollector, name: str, obj: object) ->
             if hasattr(base, "pytest_custom_makeitem"):
                 return base.pytest_custom_makeitem(collector, name, obj)
 
+
 @pytest.hookimpl(hookwrapper=True)
-def pytest_collection_modifyitems(session: Session, items: list[EditorTestBase], config: Config) -> None:
+def pytest_collection_modifyitems(session: Session, items: list[EditorTest], config: Config) -> None:
     """
     Add custom modification of items. This is used for adding the runners into the item list.
     :param session: The Pytest Session

+ 197 - 141
Tools/LyTestTools/ly_test_tools/o3de/editor_test.py

@@ -4,11 +4,11 @@ For complete copyright and license terms please see the LICENSE at the root of t
 
 SPDX-License-Identifier: Apache-2.0 OR MIT
 
-Simplified O3DE Editor test-writing utilities.
+Test-writing utilities that simplify creating O3DE in-Editor tests in Python.
 
-Test writers should subclass a test suite from EditorTestSuite for easy specifcation of python test scripts for
-the editor to run. Tests can be parallelized (run in multiple editor instances at once) and/or batched (multiple tests
-run in the same editor instance), with collated results and crash detection.
+Test writers should subclass a test suite from EditorTestSuite to hold the specification of python test scripts for
+the editor to load and run. Tests can be parallelized (run in multiple editor instances at once) and/or batched
+(multiple tests run in the same editor instance), with collated results and crash detection.
 
 Usage example:
    class MyTestSuite(EditorTestSuite):
@@ -24,13 +24,6 @@ Usage example:
 """
 from __future__ import annotations
 
-import tempfile
-
-import pytest
-import _pytest.python
-import _pytest.outcomes
-from _pytest.skipping import pytest_runtest_setup as skipping_pytest_runtest_setup
-
 import abc
 import functools
 import inspect
@@ -38,42 +31,46 @@ import json
 import logging
 import math
 import os
+import pytest
 import re
+import tempfile
 import threading
 import types
 import warnings
 
+import _pytest.python
+import _pytest.outcomes
+from _pytest.skipping import pytest_runtest_setup as skip_pytest_runtest_setup
+
 import ly_test_tools.environment.process_utils as process_utils
 import ly_test_tools.o3de.editor_test_utils as editor_utils
-import ly_test_tools._internal.pytest_plugin.test_tools_fixtures
-
 from ly_test_tools.o3de.asset_processor import AssetProcessor
 from ly_test_tools.launchers.exceptions import WaitTimeoutError
 
-# This file contains ready-to-use test functions which are not actual tests, avoid pytest collection
-__test__ = False
+__test__ = False  # This file contains ready-to-use test functions which are not actual tests, avoid pytest collection
 
 logger = logging.getLogger(__name__)
 
 
-class EditorTestBase(abc.ABC):
+class EditorTest(abc.ABC):
     """
-    Abstract Editor Test
+    Abstract Test targeting the O3DE Editor. The following attributes can be overridden by the test writer:
     """
-    # Maximum time for run, in seconds
-    timeout = 180
-    # Test file that this test will run
+    # Test file that this test will run, must be set by user for EditorTest to execute
     test_module = None
-    # Attach debugger when running the test, useful for debugging crashes. This should never be True on production.
-    # It's also recommended to switch to EditorSingleTest for debugging in isolation
+    # Maximum time to allow the editor to run, in seconds
+    timeout = 180
+    # Attach debugger when running the test, useful for debugging crashes. Should never be left True in production.
+    # Where possible, also recommended to switch to EditorSingleTest for better debugging in isolation.
     attach_debugger = False
     # Wait until a debugger is attached at the startup of the test, this is another way of debugging.
     wait_for_debugger = False
 
 
-class EditorSingleTest(EditorTestBase):
+class EditorSingleTest(EditorTest):
     """
-    Test that will be run alone in one editor, with no parallel editors
+    Test that will run alone in one editor with no parallel editors, limiting environmental side-effects at the
+    expense of redundant isolated work
     """
     def __init__(self):
         # Extra cmdline arguments to supply to the editor for the test
@@ -82,31 +79,59 @@ class EditorSingleTest(EditorTestBase):
         self.use_null_renderer = None
 
     @staticmethod
-    def setup(instance, request, workspace, editor, editor_test_results, launcher_platform):
+    def setup(instance: EditorTestSuite.EditorTestClass, request: _pytest.fixtures.FixtureRequest,
+              workspace: ly_test_tools._internal.managers.workspace.AbstractWorkspaceManager,
+              editor: ly_test_tools.launchers.platforms.base.Launcher, editor_test_results: EditorTestSuite.TestData,
+              launcher_platform: str) -> None:
         """
-        User-overrideable setup function, which will run before the test
+        User-overrideable setup function, which will run before the test.
+        :param instance: Parent EditorTestClass instance executing the test
+        :param request: PyTest request object
+        :param workspace: LyTestTools workspace manager
+        :param editor: LyTestTools editor-launcher object
+        :param editor_test_results: Currently recorded EditorTest results
+        :param launcher_platform: user-parameterized string for LyTestTools
         """
         pass
 
     @staticmethod
-    def wrap_run(instance, request, workspace, editor, editor_test_results, launcher_platform):
+    def wrap_run(instance: EditorTestSuite.EditorTestClass, request: _pytest.fixtures.FixtureRequest,
+                 workspace: ly_test_tools._internal.managers.workspace.AbstractWorkspaceManager,
+                 editor: ly_test_tools.launchers.platforms.base.Launcher, editor_test_results: EditorTestSuite.TestData,
+                 launcher_platform: str) -> None:
         """
-        User-overrideable wrapper function, which will run before and after test.
+        User-overrideable wrapper function, which will run both before and after test.
         Any code before the 'yield' statement will run before the test. With code after yield run after the test.
+        Setup will run before wrap_run starts. Teardown will run after it completes.
+        :param instance: Parent EditorTestClass instance executing the test
+        :param request: PyTest request object
+        :param workspace: LyTestTools workspace manager
+        :param editor: LyTestTools editor-launcher object
+        :param editor_test_results: Currently recorded EditorTest results
+        :param launcher_platform: user-parameterized string for LyTestTools
         """
         yield
 
     @staticmethod
-    def teardown(instance, request, workspace, editor, editor_test_results, launcher_platform):
+    def teardown(instance: EditorTestSuite.EditorTestClass, request: _pytest.fixtures.FixtureRequest,
+                 workspace: ly_test_tools._internal.managers.workspace.AbstractWorkspaceManager,
+                 editor: ly_test_tools.launchers.platforms.base.Launcher, editor_test_results: EditorTestSuite.TestData,
+                 launcher_platform: str) -> None:
         """
         User-overrideable teardown function, which will run after the test
+        :param instance: Parent EditorTestClass instance executing the test
+        :param request: PyTest request object
+        :param workspace: LyTestTools workspace manager
+        :param editor: LyTestTools editor-launcher object
+        :param editor_test_results: Currently recorded EditorTest results
+        :param launcher_platform: user-parameterized string for LyTestTools
         """
         pass
 
 
-class EditorSharedTest(EditorTestBase):
+class EditorSharedTest(EditorTest):
     """
-    Test that will be run in parallel with tests in different editor instances, as well as serially batched with other
+    Test that will run in parallel with tests in different editor instances, as well as serially batching with other
     tests in each editor instance. Minimizes total test run duration.
 
     Does not support per test setup/teardown to avoid creating race conditions
@@ -119,7 +144,7 @@ class EditorSharedTest(EditorTestBase):
 
 class EditorParallelTest(EditorSharedTest):
     """
-    Test that will be run in parallel with tests in different editor instances, though not serially batched with other
+    Test that will run in parallel with tests in different editor instances, though not serially batched with other
     tests in each editor instance. Reduces total test run duration, while limiting side-effects between tests.
 
     Does not support per test setup/teardown to avoid creating race conditions
@@ -130,20 +155,29 @@ class EditorParallelTest(EditorSharedTest):
 
 class EditorBatchedTest(EditorSharedTest):
     """
-    Test that will be batched along with the other batched tests in the same editor instance, though not executed in
-    parallel with other editor instances. Reduces repeated overhead from starting the Editor.
+    Test that will run serially batched with the tests in the same editor instance, though not executed in parallel with
+    other editor instances. Reduces overhead from starting the Editor, while limiting side-effects between editors.
 
     Does not support per test setup/teardown to avoid creating race conditions
     """
     is_batchable = True
     is_parallelizable = False
 
+
 class Result:
 
     class EditorTestResultException(Exception):
         """ Indicates that an unknown result was found during the tests  """
 
-    class Base:
+    class ResultType(abc.ABC):
+        """
+        Generic result-type for data shared among results
+        """
+        @abc.abstractmethod
+        def __str__(self):
+            # type () -> str
+            return ""
+
         def get_output_str(self):
             # type () -> str
             """
@@ -155,7 +189,7 @@ class Result:
                 return output
             else:
                 return "-- No output --"
-            
+
         def get_editor_log_str(self):
             # type () -> str
             """
@@ -168,12 +202,12 @@ class Result:
             else:
                 return "-- No editor log found --"
 
-    class Pass(Base):
+    class Pass(ResultType):
 
-        def __init__(self, test_spec: type(EditorTestBase), output: str, editor_log: str):
+        def __init__(self, test_spec: type(EditorTest), output: str, editor_log: str):
             """
             Represents a test success
-            :test_spec: The type of EditorTestBase
+            :test_spec: The type of EditorTest
             :output: The test output
             :editor_log: The editor log's output
             """
@@ -191,12 +225,12 @@ class Result:
             )
             return output
 
-    class Fail(Base):
+    class Fail(ResultType):
 
-        def __init__(self, test_spec: type(EditorTestBase), output: str, editor_log: str):
+        def __init__(self, test_spec: type(EditorTest), output: str, editor_log: str):
             """
             Represents a normal test failure
-            :test_spec: The type of EditorTestBase
+            :test_spec: The type of EditorTest
             :output: The test output
             :editor_log: The editor log's output
             """
@@ -218,13 +252,13 @@ class Result:
             )
             return output
 
-    class Crash(Base):
+    class Crash(ResultType):
 
-        def __init__(self, test_spec: type(EditorTestBase), output: str, ret_code: int, stacktrace: str,
+        def __init__(self, test_spec: type(EditorTest), output: str, ret_code: int, stacktrace: str,
                      editor_log: str):
             """
             Represents a test which failed with an unexpected crash
-            :test_spec: The type of EditorTestBase
+            :test_spec: The type of EditorTest
             :output: The test output
             :ret_code: The test's return code
             :stacktrace: The test's stacktrace if available
@@ -255,12 +289,12 @@ class Result:
             )
             return output
 
-    class Timeout(Base):
+    class Timeout(ResultType):
 
-        def __init__(self, test_spec: type(EditorTestBase), output: str, time_secs: float, editor_log: str):
+        def __init__(self, test_spec: type(EditorTest), output: str, time_secs: float, editor_log: str):
             """
             Represents a test which failed due to freezing, hanging, or executing slowly
-            :test_spec: The type of EditorTestBase
+            :test_spec: The type of EditorTest
             :output: The test output
             :time_secs: The timeout duration in seconds
             :editor_log: The editor log's output
@@ -285,13 +319,13 @@ class Result:
             )
             return output
 
-    class Unknown(Base):
+    class Unknown(ResultType):
 
-        def __init__(self, test_spec: type(EditorTestBase), output: str = None, extra_info: str = None,
+        def __init__(self, test_spec: type(EditorTest), output: str = None, extra_info: str = None,
                      editor_log: str = None):
             """
             Represents a failure that the test framework cannot classify
-            :test_spec: The type of EditorTestBase
+            :test_spec: The type of EditorTest
             :output: The test output
             :extra_info: Any extra information as a string
             :editor_log: The editor log's output
@@ -320,30 +354,43 @@ class Result:
 class EditorTestSuite:
     # Extra cmdline arguments to supply for every editor instance for this test suite
     global_extra_cmdline_args = ["-BatchMode", "-autotest_mode"]
-    # Tests usually run with no renderer, however some tests require a renderer 
+    # Tests usually run with no renderer, however some tests require a renderer and will disable this
     use_null_renderer = True
-    # Maximum time for a single editor to stay open on a shared test
+    # Maximum time in seconds for a single editor to stay open across the set of shared tests
     timeout_editor_shared_test = 300
 
-    # Function to calculate number of editors to run in parallel, this can be overridden by the user
     @staticmethod
-    def get_number_parallel_editors():
-        return 8
-
-    _TIMEOUT_CRASH_LOG = 20  # Maximum time (seconds) for waiting for a crash file, in seconds
+    def get_number_parallel_editors() -> int:
+        """
+        Number of editors to run in parallel, this method can be overridden by the user.
+        Note: CLI option '--editors-parallel' takes precedence over class settings.
+        :return: count of parallel editors to run
+        """
+        count = 1
+        found_processors = os.cpu_count()
+        if found_processors:
+            # only schedule on half the cores since the application will also run multithreaded
+            # also compensates for hyperthreaded/clustered/virtual cores inflating this count
+            count = math.floor(found_processors / 2)
+            if count < 1:
+                count = 1
+
+        return count
+
+    _TIMEOUT_CRASH_LOG = 20  # Maximum time (seconds) for waiting for a crash file to finish being dumped to disk
     _TEST_FAIL_RETCODE = 0xF  # Return code for test failure
 
     class TestData:
-        __test__ = False  # Required to tell PyTest to skip collecting this class even though it has "Test" in the name; avoids PyTest warnings.
+        __test__ = False  # Tell PyTest to skip collecting this even though it has "Test" in the name; avoids warnings.
 
         def __init__(self):
-            self.results = {}  # Dict of str(test_spec.__name__) -> Result
+            self.results = {}  # Dict of str(test_spec.__name__) -> Result.ResultType
             self.asset_processor = None
 
     @pytest.fixture(scope="class")
     def editor_test_data(self, request: _pytest.fixtures.FixtureRequest) -> EditorTestSuite.TestData:
         """
-        Yields a per-testsuite structure to store the data of each test result and an AssetProcessor object that will be
+        Yields a per-class structure to store the data of each test result and an AssetProcessor object that will be
         re-used on the whole suite
         :request: The Pytest request object
         :yield: The TestData object
@@ -352,16 +399,19 @@ class EditorTestSuite:
 
     def _editor_test_data(self, request: _pytest.fixtures.FixtureRequest) -> EditorTestSuite.TestData:
         """
-        A wrapper function for unit testing of this file to call directly. Do not use in production.
+        A wrapped implementation to simplify unit testing pytest fixtures. Users should not call this directly.
+        :request: The Pytest request object (unused, but always passed by pytest)
+        :yield: The TestData object
         """
         test_data = EditorTestSuite.TestData()
-        yield test_data
-        if test_data.asset_processor:
+        yield test_data  # yield to pytest while test-class executes
+        # resumed by pytest after each test-class finishes
+        if test_data.asset_processor:  # was assigned an AP to manage
             test_data.asset_processor.stop(1)
             test_data.asset_processor.teardown()
             test_data.asset_processor = None
             editor_utils.kill_all_ly_processes(include_asset_processor=True)
-        else:
+        else:  # do not interfere as a preexisting AssetProcessor may be owned by something else
             editor_utils.kill_all_ly_processes(include_asset_processor=False)
 
     class Runner:
@@ -401,13 +451,13 @@ class EditorTestSuite:
                 return spec_impl
 
             # Retrieve the test specs
-            single_tests = self.obj.get_single_tests()            
+            single_tests = self.obj.get_single_tests()
             shared_tests = self.obj.get_shared_tests()
-            batched_tests = cls.filter_shared_tests(shared_tests, is_batchable=True)
-            parallel_tests = cls.filter_shared_tests(shared_tests, is_parallelizable=True)
+            batched_tests = cls.filter_shared_tests(shared_tests, is_parallelizable=False, is_batchable=True)
+            parallel_tests = cls.filter_shared_tests(shared_tests, is_parallelizable=True, is_batchable=False)
             parallel_batched_tests = cls.filter_shared_tests(shared_tests, is_parallelizable=True, is_batchable=True)
 
-            # If user provides option to not parallelize/batch the tests, move them into single tests
+            # user can provide CLI option to not parallelize/batch the tests
             no_parallelize = self.config.getoption("--no-editor-parallel", default=False)
             no_batch = self.config.getoption("--no-editor-batch", default=False)
             if no_parallelize:
@@ -421,82 +471,84 @@ class EditorTestSuite:
                 parallel_tests += parallel_batched_tests
                 parallel_batched_tests = []
 
-            # Add the single tests, these will run normally
+            # Add the single tests, these will run separately
             for test_spec in single_tests:
                 name = test_spec.__name__
 
-                def make_test_func(name, test_spec):
+                def make_test_func(inner_test_spec):
                     @set_marks({"run_type": "run_single"})
                     def single_run(self, request, workspace, editor, editor_test_data, launcher_platform):
                         # only single tests are allowed to have setup/teardown, however we can have shared tests that
                         # were explicitly set as single, for example via cmdline argument override
-                        is_single_test = issubclass(test_spec, EditorSingleTest)
+                        is_single_test = issubclass(inner_test_spec, EditorSingleTest)
                         if is_single_test:
                             # Setup step for wrap_run
-                            wrap = test_spec.wrap_run(self, request, workspace, editor, editor_test_data, launcher_platform)
+                            wrap = inner_test_spec.wrap_run(self, request, workspace, editor, editor_test_data, launcher_platform)
                             assert isinstance(wrap, types.GeneratorType), "wrap_run must return a generator, did you forget 'yield'?"
                             next(wrap, None)
-                            # Setup step                        
-                            test_spec.setup(self, request, workspace, editor, editor_test_data, launcher_platform)
+                            # Setup step
+                            inner_test_spec.setup(self, request, workspace, editor, editor_test_data, launcher_platform)
                         # Run
-                        self._run_single_test(request, workspace, editor, editor_test_data, test_spec)
+                        self._run_single_test(request, workspace, editor, editor_test_data, inner_test_spec)
                         if is_single_test:
                             # Teardown
-                            test_spec.teardown(self, request, workspace, editor, editor_test_data, launcher_platform)
+                            inner_test_spec.teardown(self, request, workspace, editor, editor_test_data, launcher_platform)
                             # Teardown step for wrap_run
                             next(wrap, None)
                     return single_run
-                f = make_test_func(name, test_spec)
+                f = make_test_func(test_spec)
                 if hasattr(test_spec, "pytestmark"):
                     f.pytestmark = test_spec.pytestmark
                 setattr(self.obj, name, f)
 
-            # Add the shared tests, for these we will create a runner class for storing the run information
-            # that will be later used for selecting what tests runners will be run
+            # Add the shared tests, with a runner class for storing information from each shared run
             runners = []
 
-            def create_runner(name, function, tests):
-                runner = EditorTestSuite.Runner(name, function, tests)
+            def create_runner(runner_name, function, tests):
+                target_runner = EditorTestSuite.Runner(runner_name, function, tests)
 
                 def make_func():
-                    @set_marks({"runner": runner, "run_type": "run_shared"})
+                    @set_marks({"runner": target_runner, "run_type": "run_shared"})
                     def shared_run(self, request, workspace, editor, editor_test_data, launcher_platform):
-                        getattr(self, function.__name__)(request, workspace, editor, editor_test_data, runner.tests)
+                        getattr(self, function.__name__)(request, workspace, editor, editor_test_data, target_runner.tests)
+
                     return shared_run
-                setattr(self.obj, name, make_func())
-                
-                # Add the shared tests results, these just succeed/fail based what happened on the Runner.
-                for test_spec in tests:
-                    def make_func(test_spec):
-                        @set_marks({"runner": runner, "test_spec": test_spec, "run_type": "result"})
+
+                setattr(self.obj, runner_name, make_func())
+
+                # Add the shared tests results, which succeed/fail based what happened on the Runner.
+                for shared_test_spec in tests:
+                    def make_func(inner_test_spec):
+                        @set_marks({"runner": target_runner, "test_spec": inner_test_spec, "run_type": "result"})
                         def result(self, request, workspace, editor, editor_test_data, launcher_platform):
+                            result_key = inner_test_spec.__name__
                             # The runner must have filled the editor_test_data.results dict fixture for this test.
                             # Hitting this assert could mean if there was an error executing the runner
-                            if test_spec.__name__ not in editor_test_data.results:
-                                raise Result.EditorTestResultException(f"No results found for {test_spec.__name__}. "
+                            if result_key not in editor_test_data.results:
+                                raise Result.EditorTestResultException(f"No results found for {result_key}. "
                                                                        f"Test may not have ran due to the Editor "
                                                                        f"shutting down. Check for issues in previous "
                                                                        f"tests.")
-                            cls._report_result(test_spec.__name__, editor_test_data.results[test_spec.__name__])
+                            cls._report_result(result_key, editor_test_data.results[result_key])
+
                         return result
-                    
-                    result_func = make_func(test_spec)
-                    if hasattr(test_spec, "pytestmark"):
-                        result_func.pytestmark = test_spec.pytestmark
-                    setattr(self.obj, test_spec.__name__, result_func)
-                runners.append(runner)
-            
+
+                    result_func = make_func(shared_test_spec)
+                    if hasattr(shared_test_spec, "pytestmark"):
+                        result_func.pytestmark = shared_test_spec.pytestmark
+                    setattr(self.obj, shared_test_spec.__name__, result_func)
+                runners.append(target_runner)
+
             create_runner("run_batched_tests", cls._run_batched_tests, batched_tests)
             create_runner("run_parallel_tests", cls._run_parallel_tests, parallel_tests)
             create_runner("run_parallel_batched_tests", cls._run_parallel_batched_tests, parallel_batched_tests)
 
-            # Now that we have added all the functions to the class, we will run
-            # a class test collection to retrieve all the tests.
-            instance = super().collect()[0]
+            # Now that we have added the functions to the class, have pytest retrieve all the tests the class contains
+            pytest_class_instance = super().collect()[0]
 
             # Override the istestfunction for the object, with this we make sure that the
             # runners are always collected, even if they don't follow the "test_" naming
-            original_istestfunction = instance.istestfunction
+            original_istestfunction = pytest_class_instance.istestfunction
 
             def istestfunction(self, obj, name):
                 ret = original_istestfunction(obj, name)
@@ -504,11 +556,11 @@ class EditorTestSuite:
                     ret = hasattr(obj, "marks")
                 return ret
 
-            instance.istestfunction = types.MethodType(istestfunction, instance)
-            collection = instance.collect()
+            pytest_class_instance.istestfunction = types.MethodType(istestfunction, pytest_class_instance)
+            collection = pytest_class_instance.collect()
 
-            def get_func_run_type(f):
-                return getattr(f, "marks", {}).setdefault("run_type", None)
+            def get_func_run_type(function):
+                return getattr(function, "marks", {}).setdefault("run_type", None)
 
             collected_run_pytestfuncs = [
                 item for item in collection if get_func_run_type(item.obj) == "run_shared"
@@ -516,18 +568,18 @@ class EditorTestSuite:
             collected_result_pytestfuncs = [
                 item for item in collection if get_func_run_type(item.obj) == "result"
             ]
-            # We'll remove and store the runner functions for later, this way they won't 
+            # We'll remove and store the runner functions for later, this way they won't
             # be deselected by any filtering mechanism. The result functions for these we are actually
             # interested on them to be filtered to tell what is the final subset of tests to run
             collection = [
                 item for item in collection if item not in collected_run_pytestfuncs
             ]
-                            
-            # Match each generated pytestfunctions with every runner and store them 
+
+            # Match each generated pytestfunctions with every runner and store them
             for run_pytestfunc in collected_run_pytestfuncs:
                 runner = run_pytestfunc.function.marks["runner"]
                 runner.run_pytestfunc = run_pytestfunc
-            
+
             for result_pytestfunc in collected_result_pytestfuncs:
                 runner = result_pytestfunc.function.marks["runner"]
                 runner.result_pytestfuncs.append(result_pytestfunc)
@@ -540,7 +592,7 @@ class EditorTestSuite:
         return EditorTestSuite.EditorTestClass(name, collector)
 
     @classmethod
-    def pytest_custom_modify_items(cls, session: _pytest.main.Session, items: list[EditorTestBase],
+    def pytest_custom_modify_items(cls, session: _pytest.main.Session, items: list[EditorTest],
                                    config: _pytest.config.Config) -> None:
         """
         Adds the runners' functions and filters the tests that will run. The runners will be added if they have any
@@ -574,7 +626,8 @@ class EditorTestSuite:
                    from . import script_to_be_run_by_editor as test_module
         :return: The list of single tests
         """
-        single_tests = [c[1] for c in cls.__dict__.items() if inspect.isclass(c[1]) and issubclass(c[1], EditorSingleTest)]
+        single_tests = [c[1] for c in cls.__dict__.items()
+                        if inspect.isclass(c[1]) and issubclass(c[1], EditorSingleTest)]
         return single_tests
         
     @classmethod
@@ -587,11 +640,12 @@ class EditorTestSuite:
                    from . import script_to_be_run_by_editor as test_module
         :return: The list of shared tests
         """
-        shared_tests = [c[1] for c in cls.__dict__.items() if inspect.isclass(c[1]) and issubclass(c[1], EditorSharedTest)]
+        shared_tests = [c[1] for c in cls.__dict__.items()
+                        if inspect.isclass(c[1]) and issubclass(c[1], EditorSharedTest)]
         return shared_tests
 
     @classmethod
-    def get_session_shared_tests(cls, session: _pytest.main.Session) -> list[EditorTestBase]:
+    def get_session_shared_tests(cls, session: _pytest.main.Session) -> list[EditorTest]:
         """
         Filters and returns all of the shared tests in a given session.
         :session: The test session
@@ -601,17 +655,18 @@ class EditorTestSuite:
         return cls.filter_session_shared_tests(session, shared_tests)
 
     @staticmethod
-    def filter_session_shared_tests(session_items: list[_pytest.python.Function(EditorTestBase)], shared_tests: list[EditorSharedTest]) -> list[EditorTestBase]:
+    def filter_session_shared_tests(session_items: list[_pytest.python.Function(EditorTest)],
+                                    shared_tests: list[EditorSharedTest]) -> list[EditorTest]:
         """
-        Retrieve the test sub-set that was collected this can be less than the original set if were overriden via -k
-        argument or similars
+        Retrieve the test sub-set that was collected
+        Note that this can be less than the original set if modified, such as via pytest argument -k
         :session_items: The tests in a session to run
         :shared_tests: All of the shared tests
         :return: The list of filtered tests
         """
         def will_run(item):
             try:
-                skipping_pytest_runtest_setup(item)
+                skip_pytest_runtest_setup(item)
                 return True
             except (Warning, Exception, _pytest.outcomes.OutcomeException) as ex:
                 # intentionally broad to avoid events other than system interrupts
@@ -627,8 +682,8 @@ class EditorTestSuite:
     def filter_shared_tests(shared_tests: list[EditorSharedTest], is_batchable: bool = False,
                             is_parallelizable: bool = False) -> list[EditorSharedTest]:
         """
-        Filters and returns all tests based off of if they are batchable and/or parallelizable
-        :shared_tests: All shared tests
+        Filters the provided list of tests on whether they are batchable and/or parallelizable
+        :shared_tests: List of shared tests
         :is_batchable: Filter to batchable tests
         :is_parallelizable: Filter to parallelizable tests
         :return: The list of filtered tests
@@ -641,7 +696,8 @@ class EditorTestSuite:
             )
         ]
 
-    def _prepare_asset_processor(self, workspace: ly_test_tools._internal.managers.workspace.AbstractWorkspaceManager,
+    @staticmethod
+    def _prepare_asset_processor(workspace: ly_test_tools._internal.managers.workspace.AbstractWorkspaceManager,
                                  editor_test_data: TestData) -> None:
         """
         Prepares the asset processor for the test depending on whether or not the process is open and if the current
@@ -651,17 +707,15 @@ class EditorTestSuite:
         :return: None
         """
         try:
-            # Start-up an asset processor if we are not running one
-            # If another AP process exist, don't kill it, as we don't own it
+            # Start-up an asset processor if we are not already managing one
             if editor_test_data.asset_processor is None:
                 if not process_utils.process_exists("AssetProcessor", ignore_extensions=True):
                     editor_utils.kill_all_ly_processes(include_asset_processor=True)
                     editor_test_data.asset_processor = AssetProcessor(workspace)
                     editor_test_data.asset_processor.start()
-                else:
+                else:  # If another AP process already exists, do not kill it as we do not manage it
                     editor_utils.kill_all_ly_processes(include_asset_processor=False)
-            else:
-                # Make sure the asset processor from before wasn't closed by accident
+            else:  # Make sure existing asset processor wasn't closed by accident
                 editor_test_data.asset_processor.start()
         except Exception as ex:
             editor_test_data.asset_processor = None
@@ -682,7 +736,7 @@ class EditorTestSuite:
         editor.configure_settings()
 
     @staticmethod
-    def _get_results_using_output(test_spec_list: list[EditorTestBase], output: str, editor_log_content: str) -> dict[str, Result]:
+    def _get_results_using_output(test_spec_list: list[EditorTest], output: str, editor_log_content: str) -> dict[str, Result.ResultType]:
         """
         Utility function for parsing the output information from the editor. It deserializes the JSON content printed in
         the output for every test and returns that information.
@@ -700,6 +754,7 @@ class EditorTestSuite:
                 elem = json.loads(m.groups()[0])
                 found_jsons[elem["name"]] = elem
             except Exception:  # Intentionally broad to avoid failing if the output data is corrupt
+                logging.warning("Error reading result JSON", exc_info=True)
                 continue
         
         # Try to find the element in the log, this is used for cutting the log contents later
@@ -710,6 +765,7 @@ class EditorTestSuite:
                 if elem["name"] in found_jsons:
                     found_jsons[elem["name"]]["log_match"] = m
             except Exception:  # Intentionally broad, to avoid failing if the log data is corrupt
+                logging.warning("Error reading result JSON", exc_info=True)
                 continue
 
         log_start = 0
@@ -743,9 +799,9 @@ class EditorTestSuite:
         return results
 
     @staticmethod
-    def _report_result(name: str, result: Result) -> None:
+    def _report_result(name: str, result: Result.ResultType) -> None:
         """
-        Fails the test if the test result is not a PASS, specifying the information
+        Raises a pytest failure if the test result is not a PASS, specifying the information
         :name: Name of the test
         :result: The Result object which denotes if the test passed or not
         :return: None
@@ -760,18 +816,18 @@ class EditorTestSuite:
     def _exec_editor_test(self, request: _pytest.fixtures.FixtureRequest,
                           workspace: ly_test_tools._internal.managers.workspace.AbstractWorkspaceManager,
                           editor: ly_test_tools.launchers.platforms.base.Launcher,
-                          run_id: int, log_name: str, test_spec: EditorTestBase,
-                          cmdline_args: list[str] = None) -> dict[str, Result]:
+                          run_id: int, log_name: str, test_spec: EditorTest,
+                          cmdline_args: list[str] = None) -> dict[str, Result.ResultType]:
         """
-        Starts the editor with the given test and retuns an result dict with a single element specifying the result
+        Starts the editor with the given test and returns a result dict with a single element specifying the result
         :request: The pytest request
         :workspace: The LyTestTools Workspace object
         :editor: The LyTestTools Editor object
         :run_id: The unique run id
         :log_name: The name of the editor log to retrieve
-        :test_spec: The type of EditorTestBase
+        :test_spec: The type of EditorTest
         :cmdline_args: Any additional command line args
-        :return: a dictionary of Result objects
+        :return: a dictionary of Result objects (should be only one)
         """
         if cmdline_args is None:
             cmdline_args = []
@@ -845,8 +901,8 @@ class EditorTestSuite:
     def _exec_editor_multitest(self, request: _pytest.fixtures.FixtureRequest,
                                workspace: ly_test_tools._internal.managers.workspace.AbstractWorkspaceManager,
                                editor: ly_test_tools.launchers.platforms.base.Launcher, run_id: int, log_name: str,
-                               test_spec_list: list[EditorTestBase],
-                               cmdline_args: list[str] = None) -> dict[str, Result]:
+                               test_spec_list: list[EditorTest],
+                               cmdline_args: list[str] = None) -> dict[str, Result.ResultType]:
         """
         Starts an editor executable with a list of tests and returns a dict of the result of every test ran within that
         editor instance. In case of failure this function also parses the editor output to find out what specific tests
@@ -856,7 +912,7 @@ class EditorTestSuite:
         :editor: The LyTestTools Editor object
         :run_id: The unique run id
         :log_name: The name of the editor log to retrieve
-        :test_spec_list: A list of EditorTestBase tests to run in the same editor instance
+        :test_spec_list: A list of EditorTest tests to run in the same editor instance
         :cmdline_args: Any additional command line args
         :return: A dict of Result objects
         """
@@ -1194,8 +1250,8 @@ class EditorTestSuite:
 
     def _get_number_parallel_editors(self, request: _pytest.fixtures.FixtureRequest) -> int:
         """
-        Retrieves the number of parallel preference cmdline overrides
-        :request: The Pytest Request
+        Retrieves the number of parallel preference based on cmdline overrides, class overrides, or default
+        :request: The Pytest Request object
         :return: The number of parallel editors to use
         """
         parallel_editors_value = request.config.getoption("--editors-parallel", None)

+ 22 - 19
Tools/LyTestTools/ly_test_tools/o3de/editor_test_utils.py

@@ -7,10 +7,10 @@ SPDX-License-Identifier: Apache-2.0 OR MIT
 Utility functions mostly for the editor_test module. They can also be used for assisting Editor tests.
 """
 from __future__ import annotations
-import os
-import time
 import logging
+import os
 import re
+import time
 
 import ly_test_tools.environment.process_utils as process_utils
 import ly_test_tools.environment.waiter as waiter
@@ -25,20 +25,20 @@ def kill_all_ly_processes(include_asset_processor: bool = True) -> None:
     :param include_asset_processor: Boolean flag whether or not to kill the AP
     :return: None
     """
-    LY_PROCESSES = [
+    ly_processes = [
         'Editor', 'Profiler', 'RemoteConsole', 'o3de', 'AutomatedTesting.ServerLauncher'
     ]
-    AP_PROCESSES = [
+    ap_processes = [
         'AssetProcessor', 'AssetProcessorBatch', 'AssetBuilder'
     ]
     
     if include_asset_processor:
-        process_utils.kill_processes_named(LY_PROCESSES+AP_PROCESSES, ignore_extensions=True)
+        process_utils.kill_processes_named(ly_processes+ap_processes, ignore_extensions=True)
     else:
-        process_utils.kill_processes_named(LY_PROCESSES, ignore_extensions=True)
+        process_utils.kill_processes_named(ly_processes, ignore_extensions=True)
 
 
-def get_testcase_module_filepath(testcase_module: Module) -> str:
+def get_testcase_module_filepath(testcase_module: types.ModuleType) -> str:
     """
     return the full path of the test module using always '.py' extension
     :param testcase_module: The testcase python module being tested
@@ -47,7 +47,7 @@ def get_testcase_module_filepath(testcase_module: Module) -> str:
     return os.path.splitext(testcase_module.__file__)[0] + ".py"
 
 
-def get_module_filename(testcase_module: Module):
+def get_module_filename(testcase_module: types.ModuleType) -> str:
     """
     return The filename of the module without path
     Note: This is differs from module.__name__ in the essence of not having the package directory.
@@ -58,7 +58,7 @@ def get_module_filename(testcase_module: Module):
     return os.path.splitext(os.path.basename(testcase_module.__file__))[0]
 
 
-def retrieve_log_path(run_id: int, workspace: AbstractWorkspaceManager) -> str:
+def retrieve_log_path(run_id: int, workspace: ly_test_tools._internal.managers.workspace.AbstractWorkspaceManager) -> str:
     """
     return the log/ project path for this test run.
     :param run_id: editor id that will be used for differentiating paths
@@ -68,12 +68,13 @@ def retrieve_log_path(run_id: int, workspace: AbstractWorkspaceManager) -> str:
     return os.path.join(workspace.paths.project(), "user", f"log_test_{run_id}")
 
 
-def retrieve_crash_output(run_id: int, workspace: AbstractWorkspaceManager, timeout: float = 10) -> str:
+def retrieve_crash_output(run_id: int, workspace: ly_test_tools._internal.managers.workspace.AbstractWorkspaceManager,
+                          timeout: float = 10) -> str:
     """
     returns the crash output string for the given test run.
     :param run_id: editor id that will be used for differentiating paths
     :param workspace: Workspace fixture
-    :timeout: Maximum time (seconds) to wait for crash output file to appear
+    :param timeout: Maximum time (seconds) to wait for crash output file to appear
     :return str: The contents of the editor crash file (error.log)
     """
     crash_info = "-- No crash log available --"
@@ -94,7 +95,7 @@ def retrieve_crash_output(run_id: int, workspace: AbstractWorkspaceManager, time
     return crash_info
 
 
-def cycle_crash_report(run_id: int, workspace: AbstractWorkspaceManager) -> None:
+def cycle_crash_report(run_id: int, workspace: ly_test_tools._internal.managers.workspace.AbstractWorkspaceManager) -> None:
     """
     Attempts to rename error.log and error.dmp(crash files) into new names with the timestamp on it.
     :param run_id: editor id that will be used for differentiating paths
@@ -115,13 +116,15 @@ def cycle_crash_report(run_id: int, workspace: AbstractWorkspaceManager) -> None
                 logger.warning(f"Couldn't cycle file {filepath}. Error: {str(ex)}")
 
 
-def retrieve_editor_log_content(run_id: int, log_name: str, workspace: AbstractWorkspaceManager, timeout: int = 10) -> str:
+def retrieve_editor_log_content(run_id: int, log_name: str,
+                                workspace: ly_test_tools._internal.managers.workspace.AbstractWorkspaceManager,
+                                timeout: int = 10) -> str:
     """
     Retrieves the contents of the given editor log file.
     :param run_id: editor id that will be used for differentiating paths
     :log_name: The name of the editor log to retrieve
     :param workspace: Workspace fixture
-    :timeout: Maximum time to wait for the log file to appear
+    :param timeout: Maximum time to wait for the log file to appear
     :return str: The contents of the log
     """
     editor_info = "-- No editor log available --"
@@ -142,12 +145,12 @@ def retrieve_editor_log_content(run_id: int, log_name: str, workspace: AbstractW
     return editor_info
 
 
-def retrieve_last_run_test_index_from_output(test_spec_list: list[EditorTestBase], output: str) -> int:
+def retrieve_last_run_test_index_from_output(test_spec_list: list[EditorTest], output: str) -> int:
     """
     Finds out what was the last test that was run by inspecting the input.
     This is used for determining what was the batched test has crashed the editor
     :param test_spec_list: List of tests that were run in this editor
-    :output: Editor output to inspect
+    :param output: Editor output to inspect
     :return: Index in the given test_spec_list of the last test that ran
     """
     index = -1
@@ -155,14 +158,14 @@ def retrieve_last_run_test_index_from_output(test_spec_list: list[EditorTestBase
     for test_spec in test_spec_list:
         find_pos = output.find(test_spec.__name__, find_pos)
         if find_pos == -1:
-            index = max(index, 0) # <- if we didn't even find the first test, assume its been the first one that crashed
+            index = max(index, 0)  # didn't even find the first test, assume the first one immediately crashed
             return index
         else:
             index += 1
     return index
 
 
-def save_failed_asset_joblogs(workspace: AbstractWorkspace) -> None:
+def save_failed_asset_joblogs(workspace: ly_test_tools._internal.managers.workspace.AbstractWorkspaceManager) -> None:
     """
     Checks all asset logs in the JobLogs directory to see if the asset has any warnings or errors. If so, the asset is
     saved via ArtifactManager.
@@ -201,6 +204,6 @@ def _check_log_errors_warnings(log_path: str) -> bool:
             if regex_match is not None:
                 break
     # If we match any non zero numbers in: n error, n warnings
-    if regex_match is None or (int)(regex_match.group(1)) != 0 or (int)(regex_match.group(2)) != 0:
+    if regex_match is None or int(regex_match.group(1)) != 0 or int(regex_match.group(2)) != 0:
         return True
     return False

+ 12 - 8
Tools/LyTestTools/tests/unit/test_o3de_editor_test.py

@@ -16,7 +16,7 @@ import ly_test_tools.launchers.exceptions
 pytestmark = pytest.mark.SUITE_smoke
 
 
-class TestEditorTestBase(unittest.TestCase):
+class TestEditorTest(unittest.TestCase):
 
     def test_EditorSharedTest_Init_CorrectAttributes(self):
         mock_editorsharedtest = editor_test.EditorSharedTest()
@@ -34,10 +34,14 @@ class TestEditorTestBase(unittest.TestCase):
         assert not mock_editorsharedtest.is_parallelizable
 
 
-class TestResultBase(unittest.TestCase):
+class TestResultType(unittest.TestCase):
+
+    class DummySubclass(editor_test.Result.ResultType):
+        def __str__(self):
+            return None
 
     def setUp(self):
-        self.mock_result = editor_test.Result.Base()
+        self.mock_result = TestResultType.DummySubclass()
 
     def test_GetOutputStr_HasOutput_ReturnsCorrectly(self):
         self.mock_result.output = 'expected output'
@@ -304,7 +308,7 @@ class TestEditorTestSuite(unittest.TestCase):
         assert mock_get_shared_tests.called
         assert mock_filter_session.called
 
-    @mock.patch('ly_test_tools.o3de.editor_test.skipping_pytest_runtest_setup', mock.MagicMock())
+    @mock.patch('ly_test_tools.o3de.editor_test.skip_pytest_runtest_setup', mock.MagicMock())
     def test_FilterSessionSharedTests_OneSharedTest_ReturnsOne(self):
         def mock_test():
             pass
@@ -317,7 +321,7 @@ class TestEditorTestSuite(unittest.TestCase):
         assert selected_tests == mock_session_items
         assert len(selected_tests) == 1
 
-    @mock.patch('ly_test_tools.o3de.editor_test.skipping_pytest_runtest_setup', mock.MagicMock())
+    @mock.patch('ly_test_tools.o3de.editor_test.skip_pytest_runtest_setup', mock.MagicMock())
     def test_FilterSessionSharedTests_ManyTests_ReturnsCorrectTests(self):
         def mock_test():
             pass
@@ -337,7 +341,7 @@ class TestEditorTestSuite(unittest.TestCase):
         selected_tests = editor_test.EditorTestSuite.filter_session_shared_tests(mock_session_items, mock_shared_tests)
         assert selected_tests == mock_session_items
 
-    @mock.patch('ly_test_tools.o3de.editor_test.skipping_pytest_runtest_setup')
+    @mock.patch('ly_test_tools.o3de.editor_test.skip_pytest_runtest_setup')
     def test_FilterSessionSharedTests_SkipOneTest_ReturnsCorrectTests(self, mock_skip):
         def mock_test():
             pass
@@ -358,7 +362,7 @@ class TestEditorTestSuite(unittest.TestCase):
         selected_tests = editor_test.EditorTestSuite.filter_session_shared_tests(mock_session_items, mock_shared_tests)
         assert selected_tests == [mock_test]
 
-    @mock.patch('ly_test_tools.o3de.editor_test.skipping_pytest_runtest_setup', mock.MagicMock(side_effect=Exception))
+    @mock.patch('ly_test_tools.o3de.editor_test.skip_pytest_runtest_setup', mock.MagicMock(side_effect=Exception))
     def test_FilterSessionSharedTests_ExceptionDuringSkipSetup_SkipsAddingTest(self):
         def mock_test():
             pass
@@ -426,6 +430,7 @@ class TestUtils(unittest.TestCase):
         assert isinstance(mock_editor_data.asset_processor, ly_test_tools.o3de.asset_processor.AssetProcessor)
         assert mock_start.called
 
+#TODO not run???
     @mock.patch('ly_test_tools.o3de.asset_processor.AssetProcessor.start')
     @mock.patch('ly_test_tools.environment.process_utils.process_exists')
     @mock.patch('ly_test_tools.o3de.editor_test_utils.kill_all_ly_processes')
@@ -596,7 +601,6 @@ class TestRunningTests(unittest.TestCase):
         assert mock_cycle_crash.called
         assert mock_editor.start.called
 
-
     @mock.patch('ly_test_tools.o3de.editor_test.EditorTestSuite._get_results_using_output')
     @mock.patch('ly_test_tools.o3de.editor_test_utils.retrieve_editor_log_content')
     @mock.patch('ly_test_tools.o3de.editor_test_utils.retrieve_log_path')