|
@@ -4,11 +4,11 @@ For complete copyright and license terms please see the LICENSE at the root of t
|
|
|
|
|
|
SPDX-License-Identifier: Apache-2.0 OR MIT
|
|
|
|
|
|
-Simplified O3DE Editor test-writing utilities.
|
|
|
+Test-writing utilities that simplify creating O3DE in-Editor tests in Python.
|
|
|
|
|
|
-Test writers should subclass a test suite from EditorTestSuite for easy specifcation of python test scripts for
|
|
|
-the editor to run. Tests can be parallelized (run in multiple editor instances at once) and/or batched (multiple tests
|
|
|
-run in the same editor instance), with collated results and crash detection.
|
|
|
+Test writers should subclass a test suite from EditorTestSuite to hold the specification of python test scripts for
|
|
|
+the editor to load and run. Tests can be parallelized (run in multiple editor instances at once) and/or batched
|
|
|
+(multiple tests run in the same editor instance), with collated results and crash detection.
|
|
|
|
|
|
Usage example:
|
|
|
class MyTestSuite(EditorTestSuite):
|
|
@@ -24,13 +24,6 @@ Usage example:
|
|
|
"""
|
|
|
from __future__ import annotations
|
|
|
|
|
|
-import tempfile
|
|
|
-
|
|
|
-import pytest
|
|
|
-import _pytest.python
|
|
|
-import _pytest.outcomes
|
|
|
-from _pytest.skipping import pytest_runtest_setup as skipping_pytest_runtest_setup
|
|
|
-
|
|
|
import abc
|
|
|
import functools
|
|
|
import inspect
|
|
@@ -38,42 +31,46 @@ import json
|
|
|
import logging
|
|
|
import math
|
|
|
import os
|
|
|
+import pytest
|
|
|
import re
|
|
|
+import tempfile
|
|
|
import threading
|
|
|
import types
|
|
|
import warnings
|
|
|
|
|
|
+import _pytest.python
|
|
|
+import _pytest.outcomes
|
|
|
+from _pytest.skipping import pytest_runtest_setup as skip_pytest_runtest_setup
|
|
|
+
|
|
|
import ly_test_tools.environment.process_utils as process_utils
|
|
|
import ly_test_tools.o3de.editor_test_utils as editor_utils
|
|
|
-import ly_test_tools._internal.pytest_plugin.test_tools_fixtures
|
|
|
-
|
|
|
from ly_test_tools.o3de.asset_processor import AssetProcessor
|
|
|
from ly_test_tools.launchers.exceptions import WaitTimeoutError
|
|
|
|
|
|
-# This file contains ready-to-use test functions which are not actual tests, avoid pytest collection
|
|
|
-__test__ = False
|
|
|
+__test__ = False # This file contains ready-to-use test functions which are not actual tests, avoid pytest collection
|
|
|
|
|
|
logger = logging.getLogger(__name__)
|
|
|
|
|
|
|
|
|
-class EditorTestBase(abc.ABC):
|
|
|
+class EditorTest(abc.ABC):
|
|
|
"""
|
|
|
- Abstract Editor Test
|
|
|
+ Abstract Test targeting the O3DE Editor. The following attributes can be overridden by the test writer:
|
|
|
"""
|
|
|
- # Maximum time for run, in seconds
|
|
|
- timeout = 180
|
|
|
- # Test file that this test will run
|
|
|
+ # Test file that this test will run, must be set by user for EditorTest to execute
|
|
|
test_module = None
|
|
|
- # Attach debugger when running the test, useful for debugging crashes. This should never be True on production.
|
|
|
- # It's also recommended to switch to EditorSingleTest for debugging in isolation
|
|
|
+ # Maximum time to allow the editor to run, in seconds
|
|
|
+ timeout = 180
|
|
|
+ # Attach debugger when running the test, useful for debugging crashes. Should never be left True in production.
|
|
|
+ # Where possible, also recommended to switch to EditorSingleTest for better debugging in isolation.
|
|
|
attach_debugger = False
|
|
|
# Wait until a debugger is attached at the startup of the test, this is another way of debugging.
|
|
|
wait_for_debugger = False
|
|
|
|
|
|
|
|
|
-class EditorSingleTest(EditorTestBase):
|
|
|
+class EditorSingleTest(EditorTest):
|
|
|
"""
|
|
|
- Test that will be run alone in one editor, with no parallel editors
|
|
|
+ Test that will run alone in one editor with no parallel editors, limiting environmental side-effects at the
|
|
|
+ expense of redundant isolated work
|
|
|
"""
|
|
|
def __init__(self):
|
|
|
# Extra cmdline arguments to supply to the editor for the test
|
|
@@ -82,31 +79,59 @@ class EditorSingleTest(EditorTestBase):
|
|
|
self.use_null_renderer = None
|
|
|
|
|
|
@staticmethod
|
|
|
- def setup(instance, request, workspace, editor, editor_test_results, launcher_platform):
|
|
|
+ def setup(instance: EditorTestSuite.EditorTestClass, request: _pytest.fixtures.FixtureRequest,
|
|
|
+ workspace: ly_test_tools._internal.managers.workspace.AbstractWorkspaceManager,
|
|
|
+ editor: ly_test_tools.launchers.platforms.base.Launcher, editor_test_results: EditorTestSuite.TestData,
|
|
|
+ launcher_platform: str) -> None:
|
|
|
"""
|
|
|
- User-overrideable setup function, which will run before the test
|
|
|
+ User-overrideable setup function, which will run before the test.
|
|
|
+ :param instance: Parent EditorTestClass instance executing the test
|
|
|
+ :param request: PyTest request object
|
|
|
+ :param workspace: LyTestTools workspace manager
|
|
|
+ :param editor: LyTestTools editor-launcher object
|
|
|
+ :param editor_test_results: Currently recorded EditorTest results
|
|
|
+ :param launcher_platform: user-parameterized string for LyTestTools
|
|
|
"""
|
|
|
pass
|
|
|
|
|
|
@staticmethod
|
|
|
- def wrap_run(instance, request, workspace, editor, editor_test_results, launcher_platform):
|
|
|
+ def wrap_run(instance: EditorTestSuite.EditorTestClass, request: _pytest.fixtures.FixtureRequest,
|
|
|
+ workspace: ly_test_tools._internal.managers.workspace.AbstractWorkspaceManager,
|
|
|
+ editor: ly_test_tools.launchers.platforms.base.Launcher, editor_test_results: EditorTestSuite.TestData,
|
|
|
+ launcher_platform: str) -> None:
|
|
|
"""
|
|
|
- User-overrideable wrapper function, which will run before and after test.
|
|
|
+ User-overrideable wrapper function, which will run both before and after test.
|
|
|
Any code before the 'yield' statement will run before the test. With code after yield run after the test.
|
|
|
+ Setup will run before wrap_run starts. Teardown will run after it completes.
|
|
|
+ :param instance: Parent EditorTestClass instance executing the test
|
|
|
+ :param request: PyTest request object
|
|
|
+ :param workspace: LyTestTools workspace manager
|
|
|
+ :param editor: LyTestTools editor-launcher object
|
|
|
+ :param editor_test_results: Currently recorded EditorTest results
|
|
|
+ :param launcher_platform: user-parameterized string for LyTestTools
|
|
|
"""
|
|
|
yield
|
|
|
|
|
|
@staticmethod
|
|
|
- def teardown(instance, request, workspace, editor, editor_test_results, launcher_platform):
|
|
|
+ def teardown(instance: EditorTestSuite.EditorTestClass, request: _pytest.fixtures.FixtureRequest,
|
|
|
+ workspace: ly_test_tools._internal.managers.workspace.AbstractWorkspaceManager,
|
|
|
+ editor: ly_test_tools.launchers.platforms.base.Launcher, editor_test_results: EditorTestSuite.TestData,
|
|
|
+ launcher_platform: str) -> None:
|
|
|
"""
|
|
|
User-overrideable teardown function, which will run after the test
|
|
|
+ :param instance: Parent EditorTestClass instance executing the test
|
|
|
+ :param request: PyTest request object
|
|
|
+ :param workspace: LyTestTools workspace manager
|
|
|
+ :param editor: LyTestTools editor-launcher object
|
|
|
+ :param editor_test_results: Currently recorded EditorTest results
|
|
|
+ :param launcher_platform: user-parameterized string for LyTestTools
|
|
|
"""
|
|
|
pass
|
|
|
|
|
|
|
|
|
-class EditorSharedTest(EditorTestBase):
|
|
|
+class EditorSharedTest(EditorTest):
|
|
|
"""
|
|
|
- Test that will be run in parallel with tests in different editor instances, as well as serially batched with other
|
|
|
+ Test that will run in parallel with tests in different editor instances, as well as serially batching with other
|
|
|
tests in each editor instance. Minimizes total test run duration.
|
|
|
|
|
|
Does not support per test setup/teardown to avoid creating race conditions
|
|
@@ -119,7 +144,7 @@ class EditorSharedTest(EditorTestBase):
|
|
|
|
|
|
class EditorParallelTest(EditorSharedTest):
|
|
|
"""
|
|
|
- Test that will be run in parallel with tests in different editor instances, though not serially batched with other
|
|
|
+ Test that will run in parallel with tests in different editor instances, though not serially batched with other
|
|
|
tests in each editor instance. Reduces total test run duration, while limiting side-effects between tests.
|
|
|
|
|
|
Does not support per test setup/teardown to avoid creating race conditions
|
|
@@ -130,20 +155,29 @@ class EditorParallelTest(EditorSharedTest):
|
|
|
|
|
|
class EditorBatchedTest(EditorSharedTest):
|
|
|
"""
|
|
|
- Test that will be batched along with the other batched tests in the same editor instance, though not executed in
|
|
|
- parallel with other editor instances. Reduces repeated overhead from starting the Editor.
|
|
|
+ Test that will run serially batched with the tests in the same editor instance, though not executed in parallel with
|
|
|
+ other editor instances. Reduces overhead from starting the Editor, while limiting side-effects between editors.
|
|
|
|
|
|
Does not support per test setup/teardown to avoid creating race conditions
|
|
|
"""
|
|
|
is_batchable = True
|
|
|
is_parallelizable = False
|
|
|
|
|
|
+
|
|
|
class Result:
|
|
|
|
|
|
class EditorTestResultException(Exception):
|
|
|
""" Indicates that an unknown result was found during the tests """
|
|
|
|
|
|
- class Base:
|
|
|
+ class ResultType(abc.ABC):
|
|
|
+ """
|
|
|
+ Generic result-type for data shared among results
|
|
|
+ """
|
|
|
+ @abc.abstractmethod
|
|
|
+ def __str__(self):
|
|
|
+ # type () -> str
|
|
|
+ return ""
|
|
|
+
|
|
|
def get_output_str(self):
|
|
|
# type () -> str
|
|
|
"""
|
|
@@ -155,7 +189,7 @@ class Result:
|
|
|
return output
|
|
|
else:
|
|
|
return "-- No output --"
|
|
|
-
|
|
|
+
|
|
|
def get_editor_log_str(self):
|
|
|
# type () -> str
|
|
|
"""
|
|
@@ -168,12 +202,12 @@ class Result:
|
|
|
else:
|
|
|
return "-- No editor log found --"
|
|
|
|
|
|
- class Pass(Base):
|
|
|
+ class Pass(ResultType):
|
|
|
|
|
|
- def __init__(self, test_spec: type(EditorTestBase), output: str, editor_log: str):
|
|
|
+ def __init__(self, test_spec: type(EditorTest), output: str, editor_log: str):
|
|
|
"""
|
|
|
Represents a test success
|
|
|
- :test_spec: The type of EditorTestBase
|
|
|
+ :test_spec: The type of EditorTest
|
|
|
:output: The test output
|
|
|
:editor_log: The editor log's output
|
|
|
"""
|
|
@@ -191,12 +225,12 @@ class Result:
|
|
|
)
|
|
|
return output
|
|
|
|
|
|
- class Fail(Base):
|
|
|
+ class Fail(ResultType):
|
|
|
|
|
|
- def __init__(self, test_spec: type(EditorTestBase), output: str, editor_log: str):
|
|
|
+ def __init__(self, test_spec: type(EditorTest), output: str, editor_log: str):
|
|
|
"""
|
|
|
Represents a normal test failure
|
|
|
- :test_spec: The type of EditorTestBase
|
|
|
+ :test_spec: The type of EditorTest
|
|
|
:output: The test output
|
|
|
:editor_log: The editor log's output
|
|
|
"""
|
|
@@ -218,13 +252,13 @@ class Result:
|
|
|
)
|
|
|
return output
|
|
|
|
|
|
- class Crash(Base):
|
|
|
+ class Crash(ResultType):
|
|
|
|
|
|
- def __init__(self, test_spec: type(EditorTestBase), output: str, ret_code: int, stacktrace: str,
|
|
|
+ def __init__(self, test_spec: type(EditorTest), output: str, ret_code: int, stacktrace: str,
|
|
|
editor_log: str):
|
|
|
"""
|
|
|
Represents a test which failed with an unexpected crash
|
|
|
- :test_spec: The type of EditorTestBase
|
|
|
+ :test_spec: The type of EditorTest
|
|
|
:output: The test output
|
|
|
:ret_code: The test's return code
|
|
|
:stacktrace: The test's stacktrace if available
|
|
@@ -255,12 +289,12 @@ class Result:
|
|
|
)
|
|
|
return output
|
|
|
|
|
|
- class Timeout(Base):
|
|
|
+ class Timeout(ResultType):
|
|
|
|
|
|
- def __init__(self, test_spec: type(EditorTestBase), output: str, time_secs: float, editor_log: str):
|
|
|
+ def __init__(self, test_spec: type(EditorTest), output: str, time_secs: float, editor_log: str):
|
|
|
"""
|
|
|
Represents a test which failed due to freezing, hanging, or executing slowly
|
|
|
- :test_spec: The type of EditorTestBase
|
|
|
+ :test_spec: The type of EditorTest
|
|
|
:output: The test output
|
|
|
:time_secs: The timeout duration in seconds
|
|
|
:editor_log: The editor log's output
|
|
@@ -285,13 +319,13 @@ class Result:
|
|
|
)
|
|
|
return output
|
|
|
|
|
|
- class Unknown(Base):
|
|
|
+ class Unknown(ResultType):
|
|
|
|
|
|
- def __init__(self, test_spec: type(EditorTestBase), output: str = None, extra_info: str = None,
|
|
|
+ def __init__(self, test_spec: type(EditorTest), output: str = None, extra_info: str = None,
|
|
|
editor_log: str = None):
|
|
|
"""
|
|
|
Represents a failure that the test framework cannot classify
|
|
|
- :test_spec: The type of EditorTestBase
|
|
|
+ :test_spec: The type of EditorTest
|
|
|
:output: The test output
|
|
|
:extra_info: Any extra information as a string
|
|
|
:editor_log: The editor log's output
|
|
@@ -320,30 +354,43 @@ class Result:
|
|
|
class EditorTestSuite:
|
|
|
# Extra cmdline arguments to supply for every editor instance for this test suite
|
|
|
global_extra_cmdline_args = ["-BatchMode", "-autotest_mode"]
|
|
|
- # Tests usually run with no renderer, however some tests require a renderer
|
|
|
+ # Tests usually run with no renderer, however some tests require a renderer and will disable this
|
|
|
use_null_renderer = True
|
|
|
- # Maximum time for a single editor to stay open on a shared test
|
|
|
+ # Maximum time in seconds for a single editor to stay open across the set of shared tests
|
|
|
timeout_editor_shared_test = 300
|
|
|
|
|
|
- # Function to calculate number of editors to run in parallel, this can be overridden by the user
|
|
|
@staticmethod
|
|
|
- def get_number_parallel_editors():
|
|
|
- return 8
|
|
|
-
|
|
|
- _TIMEOUT_CRASH_LOG = 20 # Maximum time (seconds) for waiting for a crash file, in seconds
|
|
|
+ def get_number_parallel_editors() -> int:
|
|
|
+ """
|
|
|
+ Number of editors to run in parallel, this method can be overridden by the user.
|
|
|
+ Note: CLI option '--editors-parallel' takes precedence over class settings.
|
|
|
+ :return: count of parallel editors to run
|
|
|
+ """
|
|
|
+ count = 1
|
|
|
+ found_processors = os.cpu_count()
|
|
|
+ if found_processors:
|
|
|
+ # only schedule on half the cores since the application will also run multithreaded
|
|
|
+ # also compensates for hyperthreaded/clustered/virtual cores inflating this count
|
|
|
+ count = math.floor(found_processors / 2)
|
|
|
+ if count < 1:
|
|
|
+ count = 1
|
|
|
+
|
|
|
+ return count
|
|
|
+
|
|
|
+ _TIMEOUT_CRASH_LOG = 20 # Maximum time (seconds) for waiting for a crash file to finish being dumped to disk
|
|
|
_TEST_FAIL_RETCODE = 0xF # Return code for test failure
|
|
|
|
|
|
class TestData:
|
|
|
- __test__ = False # Required to tell PyTest to skip collecting this class even though it has "Test" in the name; avoids PyTest warnings.
|
|
|
+ __test__ = False # Tell PyTest to skip collecting this even though it has "Test" in the name; avoids warnings.
|
|
|
|
|
|
def __init__(self):
|
|
|
- self.results = {} # Dict of str(test_spec.__name__) -> Result
|
|
|
+ self.results = {} # Dict of str(test_spec.__name__) -> Result.ResultType
|
|
|
self.asset_processor = None
|
|
|
|
|
|
@pytest.fixture(scope="class")
|
|
|
def editor_test_data(self, request: _pytest.fixtures.FixtureRequest) -> EditorTestSuite.TestData:
|
|
|
"""
|
|
|
- Yields a per-testsuite structure to store the data of each test result and an AssetProcessor object that will be
|
|
|
+ Yields a per-class structure to store the data of each test result and an AssetProcessor object that will be
|
|
|
re-used on the whole suite
|
|
|
:request: The Pytest request object
|
|
|
:yield: The TestData object
|
|
@@ -352,16 +399,19 @@ class EditorTestSuite:
|
|
|
|
|
|
def _editor_test_data(self, request: _pytest.fixtures.FixtureRequest) -> EditorTestSuite.TestData:
|
|
|
"""
|
|
|
- A wrapper function for unit testing of this file to call directly. Do not use in production.
|
|
|
+ A wrapped implementation to simplify unit testing pytest fixtures. Users should not call this directly.
|
|
|
+ :request: The Pytest request object (unused, but always passed by pytest)
|
|
|
+ :yield: The TestData object
|
|
|
"""
|
|
|
test_data = EditorTestSuite.TestData()
|
|
|
- yield test_data
|
|
|
- if test_data.asset_processor:
|
|
|
+ yield test_data # yield to pytest while test-class executes
|
|
|
+ # resumed by pytest after each test-class finishes
|
|
|
+ if test_data.asset_processor: # was assigned an AP to manage
|
|
|
test_data.asset_processor.stop(1)
|
|
|
test_data.asset_processor.teardown()
|
|
|
test_data.asset_processor = None
|
|
|
editor_utils.kill_all_ly_processes(include_asset_processor=True)
|
|
|
- else:
|
|
|
+ else: # do not interfere as a preexisting AssetProcessor may be owned by something else
|
|
|
editor_utils.kill_all_ly_processes(include_asset_processor=False)
|
|
|
|
|
|
class Runner:
|
|
@@ -401,13 +451,13 @@ class EditorTestSuite:
|
|
|
return spec_impl
|
|
|
|
|
|
# Retrieve the test specs
|
|
|
- single_tests = self.obj.get_single_tests()
|
|
|
+ single_tests = self.obj.get_single_tests()
|
|
|
shared_tests = self.obj.get_shared_tests()
|
|
|
- batched_tests = cls.filter_shared_tests(shared_tests, is_batchable=True)
|
|
|
- parallel_tests = cls.filter_shared_tests(shared_tests, is_parallelizable=True)
|
|
|
+ batched_tests = cls.filter_shared_tests(shared_tests, is_parallelizable=False, is_batchable=True)
|
|
|
+ parallel_tests = cls.filter_shared_tests(shared_tests, is_parallelizable=True, is_batchable=False)
|
|
|
parallel_batched_tests = cls.filter_shared_tests(shared_tests, is_parallelizable=True, is_batchable=True)
|
|
|
|
|
|
- # If user provides option to not parallelize/batch the tests, move them into single tests
|
|
|
+ # user can provide CLI option to not parallelize/batch the tests
|
|
|
no_parallelize = self.config.getoption("--no-editor-parallel", default=False)
|
|
|
no_batch = self.config.getoption("--no-editor-batch", default=False)
|
|
|
if no_parallelize:
|
|
@@ -421,82 +471,84 @@ class EditorTestSuite:
|
|
|
parallel_tests += parallel_batched_tests
|
|
|
parallel_batched_tests = []
|
|
|
|
|
|
- # Add the single tests, these will run normally
|
|
|
+ # Add the single tests, these will run separately
|
|
|
for test_spec in single_tests:
|
|
|
name = test_spec.__name__
|
|
|
|
|
|
- def make_test_func(name, test_spec):
|
|
|
+ def make_test_func(inner_test_spec):
|
|
|
@set_marks({"run_type": "run_single"})
|
|
|
def single_run(self, request, workspace, editor, editor_test_data, launcher_platform):
|
|
|
# only single tests are allowed to have setup/teardown, however we can have shared tests that
|
|
|
# were explicitly set as single, for example via cmdline argument override
|
|
|
- is_single_test = issubclass(test_spec, EditorSingleTest)
|
|
|
+ is_single_test = issubclass(inner_test_spec, EditorSingleTest)
|
|
|
if is_single_test:
|
|
|
# Setup step for wrap_run
|
|
|
- wrap = test_spec.wrap_run(self, request, workspace, editor, editor_test_data, launcher_platform)
|
|
|
+ wrap = inner_test_spec.wrap_run(self, request, workspace, editor, editor_test_data, launcher_platform)
|
|
|
assert isinstance(wrap, types.GeneratorType), "wrap_run must return a generator, did you forget 'yield'?"
|
|
|
next(wrap, None)
|
|
|
- # Setup step
|
|
|
- test_spec.setup(self, request, workspace, editor, editor_test_data, launcher_platform)
|
|
|
+ # Setup step
|
|
|
+ inner_test_spec.setup(self, request, workspace, editor, editor_test_data, launcher_platform)
|
|
|
# Run
|
|
|
- self._run_single_test(request, workspace, editor, editor_test_data, test_spec)
|
|
|
+ self._run_single_test(request, workspace, editor, editor_test_data, inner_test_spec)
|
|
|
if is_single_test:
|
|
|
# Teardown
|
|
|
- test_spec.teardown(self, request, workspace, editor, editor_test_data, launcher_platform)
|
|
|
+ inner_test_spec.teardown(self, request, workspace, editor, editor_test_data, launcher_platform)
|
|
|
# Teardown step for wrap_run
|
|
|
next(wrap, None)
|
|
|
return single_run
|
|
|
- f = make_test_func(name, test_spec)
|
|
|
+ f = make_test_func(test_spec)
|
|
|
if hasattr(test_spec, "pytestmark"):
|
|
|
f.pytestmark = test_spec.pytestmark
|
|
|
setattr(self.obj, name, f)
|
|
|
|
|
|
- # Add the shared tests, for these we will create a runner class for storing the run information
|
|
|
- # that will be later used for selecting what tests runners will be run
|
|
|
+ # Add the shared tests, with a runner class for storing information from each shared run
|
|
|
runners = []
|
|
|
|
|
|
- def create_runner(name, function, tests):
|
|
|
- runner = EditorTestSuite.Runner(name, function, tests)
|
|
|
+ def create_runner(runner_name, function, tests):
|
|
|
+ target_runner = EditorTestSuite.Runner(runner_name, function, tests)
|
|
|
|
|
|
def make_func():
|
|
|
- @set_marks({"runner": runner, "run_type": "run_shared"})
|
|
|
+ @set_marks({"runner": target_runner, "run_type": "run_shared"})
|
|
|
def shared_run(self, request, workspace, editor, editor_test_data, launcher_platform):
|
|
|
- getattr(self, function.__name__)(request, workspace, editor, editor_test_data, runner.tests)
|
|
|
+ getattr(self, function.__name__)(request, workspace, editor, editor_test_data, target_runner.tests)
|
|
|
+
|
|
|
return shared_run
|
|
|
- setattr(self.obj, name, make_func())
|
|
|
-
|
|
|
- # Add the shared tests results, these just succeed/fail based what happened on the Runner.
|
|
|
- for test_spec in tests:
|
|
|
- def make_func(test_spec):
|
|
|
- @set_marks({"runner": runner, "test_spec": test_spec, "run_type": "result"})
|
|
|
+
|
|
|
+ setattr(self.obj, runner_name, make_func())
|
|
|
+
|
|
|
+ # Add the shared tests results, which succeed/fail based what happened on the Runner.
|
|
|
+ for shared_test_spec in tests:
|
|
|
+ def make_func(inner_test_spec):
|
|
|
+ @set_marks({"runner": target_runner, "test_spec": inner_test_spec, "run_type": "result"})
|
|
|
def result(self, request, workspace, editor, editor_test_data, launcher_platform):
|
|
|
+ result_key = inner_test_spec.__name__
|
|
|
# The runner must have filled the editor_test_data.results dict fixture for this test.
|
|
|
# Hitting this assert could mean if there was an error executing the runner
|
|
|
- if test_spec.__name__ not in editor_test_data.results:
|
|
|
- raise Result.EditorTestResultException(f"No results found for {test_spec.__name__}. "
|
|
|
+ if result_key not in editor_test_data.results:
|
|
|
+ raise Result.EditorTestResultException(f"No results found for {result_key}. "
|
|
|
f"Test may not have ran due to the Editor "
|
|
|
f"shutting down. Check for issues in previous "
|
|
|
f"tests.")
|
|
|
- cls._report_result(test_spec.__name__, editor_test_data.results[test_spec.__name__])
|
|
|
+ cls._report_result(result_key, editor_test_data.results[result_key])
|
|
|
+
|
|
|
return result
|
|
|
-
|
|
|
- result_func = make_func(test_spec)
|
|
|
- if hasattr(test_spec, "pytestmark"):
|
|
|
- result_func.pytestmark = test_spec.pytestmark
|
|
|
- setattr(self.obj, test_spec.__name__, result_func)
|
|
|
- runners.append(runner)
|
|
|
-
|
|
|
+
|
|
|
+ result_func = make_func(shared_test_spec)
|
|
|
+ if hasattr(shared_test_spec, "pytestmark"):
|
|
|
+ result_func.pytestmark = shared_test_spec.pytestmark
|
|
|
+ setattr(self.obj, shared_test_spec.__name__, result_func)
|
|
|
+ runners.append(target_runner)
|
|
|
+
|
|
|
create_runner("run_batched_tests", cls._run_batched_tests, batched_tests)
|
|
|
create_runner("run_parallel_tests", cls._run_parallel_tests, parallel_tests)
|
|
|
create_runner("run_parallel_batched_tests", cls._run_parallel_batched_tests, parallel_batched_tests)
|
|
|
|
|
|
- # Now that we have added all the functions to the class, we will run
|
|
|
- # a class test collection to retrieve all the tests.
|
|
|
- instance = super().collect()[0]
|
|
|
+ # Now that we have added the functions to the class, have pytest retrieve all the tests the class contains
|
|
|
+ pytest_class_instance = super().collect()[0]
|
|
|
|
|
|
# Override the istestfunction for the object, with this we make sure that the
|
|
|
# runners are always collected, even if they don't follow the "test_" naming
|
|
|
- original_istestfunction = instance.istestfunction
|
|
|
+ original_istestfunction = pytest_class_instance.istestfunction
|
|
|
|
|
|
def istestfunction(self, obj, name):
|
|
|
ret = original_istestfunction(obj, name)
|
|
@@ -504,11 +556,11 @@ class EditorTestSuite:
|
|
|
ret = hasattr(obj, "marks")
|
|
|
return ret
|
|
|
|
|
|
- instance.istestfunction = types.MethodType(istestfunction, instance)
|
|
|
- collection = instance.collect()
|
|
|
+ pytest_class_instance.istestfunction = types.MethodType(istestfunction, pytest_class_instance)
|
|
|
+ collection = pytest_class_instance.collect()
|
|
|
|
|
|
- def get_func_run_type(f):
|
|
|
- return getattr(f, "marks", {}).setdefault("run_type", None)
|
|
|
+ def get_func_run_type(function):
|
|
|
+ return getattr(function, "marks", {}).setdefault("run_type", None)
|
|
|
|
|
|
collected_run_pytestfuncs = [
|
|
|
item for item in collection if get_func_run_type(item.obj) == "run_shared"
|
|
@@ -516,18 +568,18 @@ class EditorTestSuite:
|
|
|
collected_result_pytestfuncs = [
|
|
|
item for item in collection if get_func_run_type(item.obj) == "result"
|
|
|
]
|
|
|
- # We'll remove and store the runner functions for later, this way they won't
|
|
|
+ # We'll remove and store the runner functions for later, this way they won't
|
|
|
# be deselected by any filtering mechanism. The result functions for these we are actually
|
|
|
# interested on them to be filtered to tell what is the final subset of tests to run
|
|
|
collection = [
|
|
|
item for item in collection if item not in collected_run_pytestfuncs
|
|
|
]
|
|
|
-
|
|
|
- # Match each generated pytestfunctions with every runner and store them
|
|
|
+
|
|
|
+ # Match each generated pytestfunctions with every runner and store them
|
|
|
for run_pytestfunc in collected_run_pytestfuncs:
|
|
|
runner = run_pytestfunc.function.marks["runner"]
|
|
|
runner.run_pytestfunc = run_pytestfunc
|
|
|
-
|
|
|
+
|
|
|
for result_pytestfunc in collected_result_pytestfuncs:
|
|
|
runner = result_pytestfunc.function.marks["runner"]
|
|
|
runner.result_pytestfuncs.append(result_pytestfunc)
|
|
@@ -540,7 +592,7 @@ class EditorTestSuite:
|
|
|
return EditorTestSuite.EditorTestClass(name, collector)
|
|
|
|
|
|
@classmethod
|
|
|
- def pytest_custom_modify_items(cls, session: _pytest.main.Session, items: list[EditorTestBase],
|
|
|
+ def pytest_custom_modify_items(cls, session: _pytest.main.Session, items: list[EditorTest],
|
|
|
config: _pytest.config.Config) -> None:
|
|
|
"""
|
|
|
Adds the runners' functions and filters the tests that will run. The runners will be added if they have any
|
|
@@ -574,7 +626,8 @@ class EditorTestSuite:
|
|
|
from . import script_to_be_run_by_editor as test_module
|
|
|
:return: The list of single tests
|
|
|
"""
|
|
|
- single_tests = [c[1] for c in cls.__dict__.items() if inspect.isclass(c[1]) and issubclass(c[1], EditorSingleTest)]
|
|
|
+ single_tests = [c[1] for c in cls.__dict__.items()
|
|
|
+ if inspect.isclass(c[1]) and issubclass(c[1], EditorSingleTest)]
|
|
|
return single_tests
|
|
|
|
|
|
@classmethod
|
|
@@ -587,11 +640,12 @@ class EditorTestSuite:
|
|
|
from . import script_to_be_run_by_editor as test_module
|
|
|
:return: The list of shared tests
|
|
|
"""
|
|
|
- shared_tests = [c[1] for c in cls.__dict__.items() if inspect.isclass(c[1]) and issubclass(c[1], EditorSharedTest)]
|
|
|
+ shared_tests = [c[1] for c in cls.__dict__.items()
|
|
|
+ if inspect.isclass(c[1]) and issubclass(c[1], EditorSharedTest)]
|
|
|
return shared_tests
|
|
|
|
|
|
@classmethod
|
|
|
- def get_session_shared_tests(cls, session: _pytest.main.Session) -> list[EditorTestBase]:
|
|
|
+ def get_session_shared_tests(cls, session: _pytest.main.Session) -> list[EditorTest]:
|
|
|
"""
|
|
|
Filters and returns all of the shared tests in a given session.
|
|
|
:session: The test session
|
|
@@ -601,17 +655,18 @@ class EditorTestSuite:
|
|
|
return cls.filter_session_shared_tests(session, shared_tests)
|
|
|
|
|
|
@staticmethod
|
|
|
- def filter_session_shared_tests(session_items: list[_pytest.python.Function(EditorTestBase)], shared_tests: list[EditorSharedTest]) -> list[EditorTestBase]:
|
|
|
+ def filter_session_shared_tests(session_items: list[_pytest.python.Function(EditorTest)],
|
|
|
+ shared_tests: list[EditorSharedTest]) -> list[EditorTest]:
|
|
|
"""
|
|
|
- Retrieve the test sub-set that was collected this can be less than the original set if were overriden via -k
|
|
|
- argument or similars
|
|
|
+ Retrieve the test sub-set that was collected
|
|
|
+ Note that this can be less than the original set if modified, such as via pytest argument -k
|
|
|
:session_items: The tests in a session to run
|
|
|
:shared_tests: All of the shared tests
|
|
|
:return: The list of filtered tests
|
|
|
"""
|
|
|
def will_run(item):
|
|
|
try:
|
|
|
- skipping_pytest_runtest_setup(item)
|
|
|
+ skip_pytest_runtest_setup(item)
|
|
|
return True
|
|
|
except (Warning, Exception, _pytest.outcomes.OutcomeException) as ex:
|
|
|
# intentionally broad to avoid events other than system interrupts
|
|
@@ -627,8 +682,8 @@ class EditorTestSuite:
|
|
|
def filter_shared_tests(shared_tests: list[EditorSharedTest], is_batchable: bool = False,
|
|
|
is_parallelizable: bool = False) -> list[EditorSharedTest]:
|
|
|
"""
|
|
|
- Filters and returns all tests based off of if they are batchable and/or parallelizable
|
|
|
- :shared_tests: All shared tests
|
|
|
+ Filters the provided list of tests on whether they are batchable and/or parallelizable
|
|
|
+ :shared_tests: List of shared tests
|
|
|
:is_batchable: Filter to batchable tests
|
|
|
:is_parallelizable: Filter to parallelizable tests
|
|
|
:return: The list of filtered tests
|
|
@@ -641,7 +696,8 @@ class EditorTestSuite:
|
|
|
)
|
|
|
]
|
|
|
|
|
|
- def _prepare_asset_processor(self, workspace: ly_test_tools._internal.managers.workspace.AbstractWorkspaceManager,
|
|
|
+ @staticmethod
|
|
|
+ def _prepare_asset_processor(workspace: ly_test_tools._internal.managers.workspace.AbstractWorkspaceManager,
|
|
|
editor_test_data: TestData) -> None:
|
|
|
"""
|
|
|
Prepares the asset processor for the test depending on whether or not the process is open and if the current
|
|
@@ -651,17 +707,15 @@ class EditorTestSuite:
|
|
|
:return: None
|
|
|
"""
|
|
|
try:
|
|
|
- # Start-up an asset processor if we are not running one
|
|
|
- # If another AP process exist, don't kill it, as we don't own it
|
|
|
+ # Start-up an asset processor if we are not already managing one
|
|
|
if editor_test_data.asset_processor is None:
|
|
|
if not process_utils.process_exists("AssetProcessor", ignore_extensions=True):
|
|
|
editor_utils.kill_all_ly_processes(include_asset_processor=True)
|
|
|
editor_test_data.asset_processor = AssetProcessor(workspace)
|
|
|
editor_test_data.asset_processor.start()
|
|
|
- else:
|
|
|
+ else: # If another AP process already exists, do not kill it as we do not manage it
|
|
|
editor_utils.kill_all_ly_processes(include_asset_processor=False)
|
|
|
- else:
|
|
|
- # Make sure the asset processor from before wasn't closed by accident
|
|
|
+ else: # Make sure existing asset processor wasn't closed by accident
|
|
|
editor_test_data.asset_processor.start()
|
|
|
except Exception as ex:
|
|
|
editor_test_data.asset_processor = None
|
|
@@ -682,7 +736,7 @@ class EditorTestSuite:
|
|
|
editor.configure_settings()
|
|
|
|
|
|
@staticmethod
|
|
|
- def _get_results_using_output(test_spec_list: list[EditorTestBase], output: str, editor_log_content: str) -> dict[str, Result]:
|
|
|
+ def _get_results_using_output(test_spec_list: list[EditorTest], output: str, editor_log_content: str) -> dict[str, Result.ResultType]:
|
|
|
"""
|
|
|
Utility function for parsing the output information from the editor. It deserializes the JSON content printed in
|
|
|
the output for every test and returns that information.
|
|
@@ -700,6 +754,7 @@ class EditorTestSuite:
|
|
|
elem = json.loads(m.groups()[0])
|
|
|
found_jsons[elem["name"]] = elem
|
|
|
except Exception: # Intentionally broad to avoid failing if the output data is corrupt
|
|
|
+ logging.warning("Error reading result JSON", exc_info=True)
|
|
|
continue
|
|
|
|
|
|
# Try to find the element in the log, this is used for cutting the log contents later
|
|
@@ -710,6 +765,7 @@ class EditorTestSuite:
|
|
|
if elem["name"] in found_jsons:
|
|
|
found_jsons[elem["name"]]["log_match"] = m
|
|
|
except Exception: # Intentionally broad, to avoid failing if the log data is corrupt
|
|
|
+ logging.warning("Error reading result JSON", exc_info=True)
|
|
|
continue
|
|
|
|
|
|
log_start = 0
|
|
@@ -743,9 +799,9 @@ class EditorTestSuite:
|
|
|
return results
|
|
|
|
|
|
@staticmethod
|
|
|
- def _report_result(name: str, result: Result) -> None:
|
|
|
+ def _report_result(name: str, result: Result.ResultType) -> None:
|
|
|
"""
|
|
|
- Fails the test if the test result is not a PASS, specifying the information
|
|
|
+ Raises a pytest failure if the test result is not a PASS, specifying the information
|
|
|
:name: Name of the test
|
|
|
:result: The Result object which denotes if the test passed or not
|
|
|
:return: None
|
|
@@ -760,18 +816,18 @@ class EditorTestSuite:
|
|
|
def _exec_editor_test(self, request: _pytest.fixtures.FixtureRequest,
|
|
|
workspace: ly_test_tools._internal.managers.workspace.AbstractWorkspaceManager,
|
|
|
editor: ly_test_tools.launchers.platforms.base.Launcher,
|
|
|
- run_id: int, log_name: str, test_spec: EditorTestBase,
|
|
|
- cmdline_args: list[str] = None) -> dict[str, Result]:
|
|
|
+ run_id: int, log_name: str, test_spec: EditorTest,
|
|
|
+ cmdline_args: list[str] = None) -> dict[str, Result.ResultType]:
|
|
|
"""
|
|
|
- Starts the editor with the given test and retuns an result dict with a single element specifying the result
|
|
|
+ Starts the editor with the given test and returns a result dict with a single element specifying the result
|
|
|
:request: The pytest request
|
|
|
:workspace: The LyTestTools Workspace object
|
|
|
:editor: The LyTestTools Editor object
|
|
|
:run_id: The unique run id
|
|
|
:log_name: The name of the editor log to retrieve
|
|
|
- :test_spec: The type of EditorTestBase
|
|
|
+ :test_spec: The type of EditorTest
|
|
|
:cmdline_args: Any additional command line args
|
|
|
- :return: a dictionary of Result objects
|
|
|
+ :return: a dictionary of Result objects (should be only one)
|
|
|
"""
|
|
|
if cmdline_args is None:
|
|
|
cmdline_args = []
|
|
@@ -845,8 +901,8 @@ class EditorTestSuite:
|
|
|
def _exec_editor_multitest(self, request: _pytest.fixtures.FixtureRequest,
|
|
|
workspace: ly_test_tools._internal.managers.workspace.AbstractWorkspaceManager,
|
|
|
editor: ly_test_tools.launchers.platforms.base.Launcher, run_id: int, log_name: str,
|
|
|
- test_spec_list: list[EditorTestBase],
|
|
|
- cmdline_args: list[str] = None) -> dict[str, Result]:
|
|
|
+ test_spec_list: list[EditorTest],
|
|
|
+ cmdline_args: list[str] = None) -> dict[str, Result.ResultType]:
|
|
|
"""
|
|
|
Starts an editor executable with a list of tests and returns a dict of the result of every test ran within that
|
|
|
editor instance. In case of failure this function also parses the editor output to find out what specific tests
|
|
@@ -856,7 +912,7 @@ class EditorTestSuite:
|
|
|
:editor: The LyTestTools Editor object
|
|
|
:run_id: The unique run id
|
|
|
:log_name: The name of the editor log to retrieve
|
|
|
- :test_spec_list: A list of EditorTestBase tests to run in the same editor instance
|
|
|
+ :test_spec_list: A list of EditorTest tests to run in the same editor instance
|
|
|
:cmdline_args: Any additional command line args
|
|
|
:return: A dict of Result objects
|
|
|
"""
|
|
@@ -1194,8 +1250,8 @@ class EditorTestSuite:
|
|
|
|
|
|
def _get_number_parallel_editors(self, request: _pytest.fixtures.FixtureRequest) -> int:
|
|
|
"""
|
|
|
- Retrieves the number of parallel preference cmdline overrides
|
|
|
- :request: The Pytest Request
|
|
|
+ Retrieves the number of parallel preference based on cmdline overrides, class overrides, or default
|
|
|
+ :request: The Pytest Request object
|
|
|
:return: The number of parallel editors to use
|
|
|
"""
|
|
|
parallel_editors_value = request.config.getoption("--editors-parallel", None)
|