editor_test.py 56 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175
  1. """
  2. Copyright (c) Contributors to the Open 3D Engine Project.
  3. For complete copyright and license terms please see the LICENSE at the root of this distribution.
  4. SPDX-License-Identifier: Apache-2.0 OR MIT
  5. Simplified O3DE Editor test-writing utilities.
  6. Test writers should subclass a test suite from EditorTestSuite for easy specifcation of python test scripts for
  7. the editor to run. Tests can be parallelized (run in multiple editor instances at once) and/or batched (multiple tests
  8. run in the same editor instance), with collated results and crash detection.
  9. Usage example:
  10. class MyTestSuite(EditorTestSuite):
  11. class MyFirstTest(EditorSingleTest):
  12. from . import script_to_be_run_by_editor as test_module
  13. class MyTestInParallel_1(EditorParallelTest):
  14. from . import another_script_to_be_run_by_editor as test_module
  15. class MyTestInParallel_2(EditorParallelTest):
  16. from . import yet_another_script_to_be_run_by_editor as test_module
  17. """
  18. from __future__ import annotations
  19. import pytest
  20. import _pytest.python
  21. import _pytest.outcomes
  22. from _pytest.skipping import pytest_runtest_setup as skipping_pytest_runtest_setup
  23. import abc
  24. import functools
  25. import inspect
  26. import json
  27. import logging
  28. import math
  29. import os
  30. import re
  31. import threading
  32. import types
  33. import warnings
  34. import ly_test_tools.environment.process_utils as process_utils
  35. import ly_test_tools.o3de.editor_test_utils as editor_utils
  36. import ly_test_tools._internal.pytest_plugin.test_tools_fixtures
  37. from ly_test_tools.o3de.asset_processor import AssetProcessor
  38. from ly_test_tools.launchers.exceptions import WaitTimeoutError
  39. # This file contains ready-to-use test functions which are not actual tests, avoid pytest collection
  40. __test__ = False
  41. logger = logging.getLogger(__name__)
  42. class EditorTestBase(abc.ABC):
  43. """
  44. Abstract Editor Test
  45. """
  46. # Maximum time for run, in seconds
  47. timeout = 180
  48. # Test file that this test will run
  49. test_module = None
  50. # Attach debugger when running the test, useful for debugging crashes. This should never be True on production.
  51. # It's also recommended to switch to EditorSingleTest for debugging in isolation
  52. attach_debugger = False
  53. # Wait until a debugger is attached at the startup of the test, this is another way of debugging.
  54. wait_for_debugger = False
  55. class EditorSingleTest(EditorTestBase):
  56. """
  57. Test that will be run alone in one editor, with no parallel editors
  58. """
  59. def __init__(self):
  60. # Extra cmdline arguments to supply to the editor for the test
  61. self.extra_cmdline_args = []
  62. # Whether to use null renderer, this will override use_null_renderer for the Suite if not None
  63. self.use_null_renderer = None
  64. @staticmethod
  65. def setup(instance, request, workspace, editor, editor_test_results, launcher_platform):
  66. """
  67. User-overrideable setup function, which will run before the test
  68. """
  69. pass
  70. @staticmethod
  71. def wrap_run(instance, request, workspace, editor, editor_test_results, launcher_platform):
  72. """
  73. User-overrideable wrapper function, which will run before and after test.
  74. Any code before the 'yield' statement will run before the test. With code after yield run after the test.
  75. """
  76. yield
  77. @staticmethod
  78. def teardown(instance, request, workspace, editor, editor_test_results, launcher_platform):
  79. """
  80. User-overrideable teardown function, which will run after the test
  81. """
  82. pass
  83. class EditorSharedTest(EditorTestBase):
  84. """
  85. Test that will be run in parallel with tests in different editor instances, as well as serially batched with other
  86. tests in each editor instance. Minimizes total test run duration.
  87. Does not support per test setup/teardown to avoid creating race conditions
  88. """
  89. # Specifies if the test can be batched in the same editor
  90. is_batchable = True
  91. # Specifies if the test can be run in multiple editors in parallel
  92. is_parallelizable = True
  93. class EditorParallelTest(EditorSharedTest):
  94. """
  95. Test that will be run in parallel with tests in different editor instances, though not serially batched with other
  96. tests in each editor instance. Reduces total test run duration, while limiting side-effects between tests.
  97. Does not support per test setup/teardown to avoid creating race conditions
  98. """
  99. is_batchable = False
  100. is_parallelizable = True
  101. class EditorBatchedTest(EditorSharedTest):
  102. """
  103. Test that will be batched along with the other batched tests in the same editor instance, though not executed in
  104. parallel with other editor instances. Reduces repeated overhead from starting the Editor.
  105. Does not support per test setup/teardown to avoid creating race conditions
  106. """
  107. is_batchable = True
  108. is_parallelizable = False
  109. class Result:
  110. class Base:
  111. def get_output_str(self):
  112. # type () -> str
  113. """
  114. Checks if the output attribute exists and returns it.
  115. :return: Output string from running a test, or a no output message
  116. """
  117. output = getattr(self, "output", None)
  118. if output:
  119. return output
  120. else:
  121. return "-- No output --"
  122. def get_editor_log_str(self):
  123. # type () -> str
  124. """
  125. Checks if the editor_log attribute exists and returns it.
  126. :return: Either the editor_log string or a no output message
  127. """
  128. log = getattr(self, "editor_log", None)
  129. if log:
  130. return log
  131. else:
  132. return "-- No editor log found --"
  133. class Pass(Base):
  134. def __init__(self, test_spec: type(EditorTestBase), output: str, editor_log: str):
  135. """
  136. Represents a test success
  137. :test_spec: The type of EditorTestBase
  138. :output: The test output
  139. :editor_log: The editor log's output
  140. """
  141. self.test_spec = test_spec
  142. self.output = output
  143. self.editor_log = editor_log
  144. def __str__(self):
  145. output = (
  146. f"Test Passed\n"
  147. f"------------\n"
  148. f"| Output |\n"
  149. f"------------\n"
  150. f"{self.get_output_str()}\n"
  151. )
  152. return output
  153. class Fail(Base):
  154. def __init__(self, test_spec: type(EditorTestBase), output: str, editor_log: str):
  155. """
  156. Represents a normal test failure
  157. :test_spec: The type of EditorTestBase
  158. :output: The test output
  159. :editor_log: The editor log's output
  160. """
  161. self.test_spec = test_spec
  162. self.output = output
  163. self.editor_log = editor_log
  164. def __str__(self):
  165. output = (
  166. f"Test FAILED\n"
  167. f"------------\n"
  168. f"| Output |\n"
  169. f"------------\n"
  170. f"{self.get_output_str()}\n"
  171. f"--------------\n"
  172. f"| Editor log |\n"
  173. f"--------------\n"
  174. f"{self.get_editor_log_str()}\n"
  175. )
  176. return output
  177. class Crash(Base):
  178. def __init__(self, test_spec: type(EditorTestBase), output: str, ret_code: int, stacktrace: str,
  179. editor_log: str):
  180. """
  181. Represents a test which failed with an unexpected crash
  182. :test_spec: The type of EditorTestBase
  183. :output: The test output
  184. :ret_code: The test's return code
  185. :stacktrace: The test's stacktrace if available
  186. :editor_log: The editor log's output
  187. """
  188. self.output = output
  189. self.test_spec = test_spec
  190. self.ret_code = ret_code
  191. self.stacktrace = stacktrace
  192. self.editor_log = editor_log
  193. def __str__(self):
  194. stacktrace_str = "-- No stacktrace data found --" if not self.stacktrace else self.stacktrace
  195. output = (
  196. f"Test CRASHED, return code {hex(self.ret_code)}\n"
  197. f"---------------\n"
  198. f"| Stacktrace |\n"
  199. f"---------------\n"
  200. f"{stacktrace_str}"
  201. f"------------\n"
  202. f"| Output |\n"
  203. f"------------\n"
  204. f"{self.get_output_str()}\n"
  205. f"--------------\n"
  206. f"| Editor log |\n"
  207. f"--------------\n"
  208. f"{self.get_editor_log_str()}\n"
  209. )
  210. return output
  211. class Timeout(Base):
  212. def __init__(self, test_spec: type(EditorTestBase), output: str, time_secs: float, editor_log: str):
  213. """
  214. Represents a test which failed due to freezing, hanging, or executing slowly
  215. :test_spec: The type of EditorTestBase
  216. :output: The test output
  217. :time_secs: The timeout duration in seconds
  218. :editor_log: The editor log's output
  219. :return: The Timeout object
  220. """
  221. self.output = output
  222. self.test_spec = test_spec
  223. self.time_secs = time_secs
  224. self.editor_log = editor_log
  225. def __str__(self):
  226. output = (
  227. f"Test ABORTED after not completing within {self.time_secs} seconds\n"
  228. f"------------\n"
  229. f"| Output |\n"
  230. f"------------\n"
  231. f"{self.get_output_str()}\n"
  232. f"--------------\n"
  233. f"| Editor log |\n"
  234. f"--------------\n"
  235. f"{self.get_editor_log_str()}\n"
  236. )
  237. return output
  238. class Unknown(Base):
  239. def __init__(self, test_spec: type(EditorTestBase), output: str = None, extra_info: str = None,
  240. editor_log: str = None):
  241. """
  242. Represents a failure that the test framework cannot classify
  243. :test_spec: The type of EditorTestBase
  244. :output: The test output
  245. :extra_info: Any extra information as a string
  246. :editor_log: The editor log's output
  247. """
  248. self.output = output
  249. self.test_spec = test_spec
  250. self.editor_log = editor_log
  251. self.extra_info = extra_info
  252. def __str__(self):
  253. output = (
  254. f"Indeterminate test result interpreted as failure, possible cause: {self.extra_info}\n"
  255. f"------------\n"
  256. f"| Output |\n"
  257. f"------------\n"
  258. f"{self.get_output_str()}\n"
  259. f"--------------\n"
  260. f"| Editor log |\n"
  261. f"--------------\n"
  262. f"{self.get_editor_log_str()}\n"
  263. )
  264. return output
  265. @pytest.mark.parametrize("crash_log_watchdog", [("raise_on_crash", False)])
  266. class EditorTestSuite:
  267. # Extra cmdline arguments to supply for every editor instance for this test suite
  268. global_extra_cmdline_args = ["-BatchMode", "-autotest_mode"]
  269. # Tests usually run with no renderer, however some tests require a renderer
  270. use_null_renderer = True
  271. # Maximum time for a single editor to stay open on a shared test
  272. timeout_editor_shared_test = 300
  273. # Flag to determine whether to use new prefab system or use deprecated slice system for this test suite
  274. enable_prefab_system = True
  275. # Function to calculate number of editors to run in parallel, this can be overridden by the user
  276. @staticmethod
  277. def get_number_parallel_editors():
  278. return 8
  279. _TIMEOUT_CRASH_LOG = 20 # Maximum time (seconds) for waiting for a crash file, in seconds
  280. _TEST_FAIL_RETCODE = 0xF # Return code for test failure
  281. class TestData:
  282. __test__ = False # Required to tell PyTest to skip collecting this class even though it has "Test" in the name; avoids PyTest warnings.
  283. def __init__(self):
  284. self.results = {} # Dict of str(test_spec.__name__) -> Result
  285. self.asset_processor = None
  286. @pytest.fixture(scope="class")
  287. def editor_test_data(self, request: _pytest.fixtures.FixtureRequest) -> EditorTestSuite.TestData:
  288. """
  289. Yields a per-testsuite structure to store the data of each test result and an AssetProcessor object that will be
  290. re-used on the whole suite
  291. :request: The Pytest request object
  292. :yield: The TestData object
  293. """
  294. yield from self._editor_test_data(request)
  295. def _editor_test_data(self, request: _pytest.fixtures.FixtureRequest) -> EditorTestSuite.TestData:
  296. """
  297. A wrapper function for unit testing of this file to call directly. Do not use in production.
  298. """
  299. test_data = EditorTestSuite.TestData()
  300. yield test_data
  301. if test_data.asset_processor:
  302. test_data.asset_processor.stop(1)
  303. test_data.asset_processor.teardown()
  304. test_data.asset_processor = None
  305. editor_utils.kill_all_ly_processes(include_asset_processor=True)
  306. else:
  307. editor_utils.kill_all_ly_processes(include_asset_processor=False)
  308. class Runner:
  309. def __init__(self, name, func, tests):
  310. self.name = name
  311. self.func = func
  312. self.tests = tests
  313. self.run_pytestfunc = None
  314. self.result_pytestfuncs = []
  315. class EditorTestClass(pytest.Class):
  316. """
  317. Custom pytest collector which programmatically adds test functions based on data in the TestSuite class
  318. """
  319. def collect(self):
  320. """
  321. This collector does the following:
  322. 1) Iterates through all the EditorSingleTest subclasses defined inside the suite.
  323. Adds a test function to the suite to run each separately, and report results
  324. 2) Iterates through all the EditorSharedTest subclasses defined inside the suite,
  325. grouping tests based on the specs in by 3 categories: batched, parallel and batched+parallel.
  326. Each category gets a single test runner function registered to run all the tests of the category
  327. A result function will be added for every individual test, which will pass/fail based on the results
  328. from the previously executed runner function
  329. """
  330. cls = self.obj
  331. # Decorator function to add extra lookup information for the test functions
  332. def set_marks(marks):
  333. def spec_impl(func):
  334. @functools.wraps(func)
  335. def inner(*args, **argv):
  336. return func(*args, **argv)
  337. inner.marks = marks
  338. return inner
  339. return spec_impl
  340. # Retrieve the test specs
  341. single_tests = self.obj.get_single_tests()
  342. shared_tests = self.obj.get_shared_tests()
  343. batched_tests = cls.filter_shared_tests(shared_tests, is_batchable=True)
  344. parallel_tests = cls.filter_shared_tests(shared_tests, is_parallelizable=True)
  345. parallel_batched_tests = cls.filter_shared_tests(shared_tests, is_parallelizable=True, is_batchable=True)
  346. # If user provides option to not parallelize/batch the tests, move them into single tests
  347. no_parallelize = self.config.getoption("--no-editor-parallel", default=False)
  348. no_batch = self.config.getoption("--no-editor-batch", default=False)
  349. if no_parallelize:
  350. single_tests += parallel_tests
  351. parallel_tests = []
  352. batched_tests += parallel_batched_tests
  353. parallel_batched_tests = []
  354. if no_batch:
  355. single_tests += batched_tests
  356. batched_tests = []
  357. parallel_tests += parallel_batched_tests
  358. parallel_batched_tests = []
  359. # Add the single tests, these will run normally
  360. for test_spec in single_tests:
  361. name = test_spec.__name__
  362. def make_test_func(name, test_spec):
  363. @set_marks({"run_type": "run_single"})
  364. def single_run(self, request, workspace, editor, editor_test_data, launcher_platform):
  365. # only single tests are allowed to have setup/teardown, however we can have shared tests that
  366. # were explicitly set as single, for example via cmdline argument override
  367. is_single_test = issubclass(test_spec, EditorSingleTest)
  368. if is_single_test:
  369. # Setup step for wrap_run
  370. wrap = test_spec.wrap_run(self, request, workspace, editor, editor_test_data, launcher_platform)
  371. assert isinstance(wrap, types.GeneratorType), "wrap_run must return a generator, did you forget 'yield'?"
  372. next(wrap, None)
  373. # Setup step
  374. test_spec.setup(self, request, workspace, editor, editor_test_data, launcher_platform)
  375. # Run
  376. self._run_single_test(request, workspace, editor, editor_test_data, test_spec)
  377. if is_single_test:
  378. # Teardown
  379. test_spec.teardown(self, request, workspace, editor, editor_test_data, launcher_platform)
  380. # Teardown step for wrap_run
  381. next(wrap, None)
  382. return single_run
  383. f = make_test_func(name, test_spec)
  384. if hasattr(test_spec, "pytestmark"):
  385. f.pytestmark = test_spec.pytestmark
  386. setattr(self.obj, name, f)
  387. # Add the shared tests, for these we will create a runner class for storing the run information
  388. # that will be later used for selecting what tests runners will be run
  389. runners = []
  390. def create_runner(name, function, tests):
  391. runner = EditorTestSuite.Runner(name, function, tests)
  392. def make_func():
  393. @set_marks({"runner": runner, "run_type": "run_shared"})
  394. def shared_run(self, request, workspace, editor, editor_test_data, launcher_platform):
  395. getattr(self, function.__name__)(request, workspace, editor, editor_test_data, runner.tests)
  396. return shared_run
  397. setattr(self.obj, name, make_func())
  398. # Add the shared tests results, these just succeed/fail based what happened on the Runner.
  399. for test_spec in tests:
  400. def make_func(test_spec):
  401. @set_marks({"runner": runner, "test_spec": test_spec, "run_type": "result"})
  402. def result(self, request, workspace, editor, editor_test_data, launcher_platform):
  403. # The runner must have filled the editor_test_data.results dict fixture for this test.
  404. # Hitting this assert could mean if there was an error executing the runner
  405. assert test_spec.__name__ in editor_test_data.results, f"No run data for test: {test_spec.__name__}."
  406. cls._report_result(test_spec.__name__, editor_test_data.results[test_spec.__name__])
  407. return result
  408. result_func = make_func(test_spec)
  409. if hasattr(test_spec, "pytestmark"):
  410. result_func.pytestmark = test_spec.pytestmark
  411. setattr(self.obj, test_spec.__name__, result_func)
  412. runners.append(runner)
  413. create_runner("run_batched_tests", cls._run_batched_tests, batched_tests)
  414. create_runner("run_parallel_tests", cls._run_parallel_tests, parallel_tests)
  415. create_runner("run_parallel_batched_tests", cls._run_parallel_batched_tests, parallel_batched_tests)
  416. # Now that we have added all the functions to the class, we will run
  417. # a class test collection to retrieve all the tests.
  418. instance = super().collect()[0]
  419. # Override the istestfunction for the object, with this we make sure that the
  420. # runners are always collected, even if they don't follow the "test_" naming
  421. original_istestfunction = instance.istestfunction
  422. def istestfunction(self, obj, name):
  423. ret = original_istestfunction(obj, name)
  424. if not ret:
  425. ret = hasattr(obj, "marks")
  426. return ret
  427. instance.istestfunction = types.MethodType(istestfunction, instance)
  428. collection = instance.collect()
  429. def get_func_run_type(f):
  430. return getattr(f, "marks", {}).setdefault("run_type", None)
  431. collected_run_pytestfuncs = [
  432. item for item in collection if get_func_run_type(item.obj) == "run_shared"
  433. ]
  434. collected_result_pytestfuncs = [
  435. item for item in collection if get_func_run_type(item.obj) == "result"
  436. ]
  437. # We'll remove and store the runner functions for later, this way they won't
  438. # be deselected by any filtering mechanism. The result functions for these we are actually
  439. # interested on them to be filtered to tell what is the final subset of tests to run
  440. collection = [
  441. item for item in collection if item not in collected_run_pytestfuncs
  442. ]
  443. # Match each generated pytestfunctions with every runner and store them
  444. for run_pytestfunc in collected_run_pytestfuncs:
  445. runner = run_pytestfunc.function.marks["runner"]
  446. runner.run_pytestfunc = run_pytestfunc
  447. for result_pytestfunc in collected_result_pytestfuncs:
  448. runner = result_pytestfunc.function.marks["runner"]
  449. runner.result_pytestfuncs.append(result_pytestfunc)
  450. self.obj._runners = runners
  451. return collection
  452. @staticmethod
  453. def pytest_custom_makeitem(collector, name, obj):
  454. return EditorTestSuite.EditorTestClass(name, collector)
  455. @classmethod
  456. def pytest_custom_modify_items(cls, session: _pytest.main.Session, items: list[EditorTestBase],
  457. config: _pytest.config.Config) -> None:
  458. """
  459. Adds the runners' functions and filters the tests that will run. The runners will be added if they have any
  460. selected tests
  461. :param session: The Pytest Session
  462. :param items: The test case functions
  463. :param config: The Pytest Config object
  464. :return: None
  465. """
  466. new_items = []
  467. for runner in cls._runners:
  468. runner.tests[:] = cls.filter_session_shared_tests(items, runner.tests)
  469. if len(runner.tests) > 0:
  470. new_items.append(runner.run_pytestfunc)
  471. # Re-order dependent tests so they are run just after the runner
  472. for result_pytestfunc in runner.result_pytestfuncs:
  473. found_test = next((item for item in items if item == result_pytestfunc), None)
  474. if found_test:
  475. items.remove(found_test)
  476. new_items.append(found_test)
  477. items[:] = items + new_items
  478. @classmethod
  479. def get_single_tests(cls) -> list[EditorSingleTest]:
  480. """
  481. Grabs all of the EditorSingleTests subclassed tests from the EditorTestSuite class
  482. Usage example:
  483. class MyTestSuite(EditorTestSuite):
  484. class MyFirstTest(EditorSingleTest):
  485. from . import script_to_be_run_by_editor as test_module
  486. :return: The list of single tests
  487. """
  488. single_tests = [c[1] for c in cls.__dict__.items() if inspect.isclass(c[1]) and issubclass(c[1], EditorSingleTest)]
  489. return single_tests
  490. @classmethod
  491. def get_shared_tests(cls) -> list[EditorSharedTest]:
  492. """
  493. Grabs all of the EditorSharedTests from the EditorTestSuite
  494. Usage example:
  495. class MyTestSuite(EditorTestSuite):
  496. class MyFirstTest(EditorSharedTest):
  497. from . import script_to_be_run_by_editor as test_module
  498. :return: The list of shared tests
  499. """
  500. shared_tests = [c[1] for c in cls.__dict__.items() if inspect.isclass(c[1]) and issubclass(c[1], EditorSharedTest)]
  501. return shared_tests
  502. @classmethod
  503. def get_session_shared_tests(cls, session: _pytest.main.Session) -> list[EditorTestBase]:
  504. """
  505. Filters and returns all of the shared tests in a given session.
  506. :session: The test session
  507. :return: The list of tests
  508. """
  509. shared_tests = cls.get_shared_tests()
  510. return cls.filter_session_shared_tests(session, shared_tests)
  511. @staticmethod
  512. def filter_session_shared_tests(session_items: list[_pytest.python.Function(EditorTestBase)], shared_tests: list[EditorSharedTest]) -> list[EditorTestBase]:
  513. """
  514. Retrieve the test sub-set that was collected this can be less than the original set if were overriden via -k
  515. argument or similars
  516. :session_items: The tests in a session to run
  517. :shared_tests: All of the shared tests
  518. :return: The list of filtered tests
  519. """
  520. def will_run(item):
  521. try:
  522. skipping_pytest_runtest_setup(item)
  523. return True
  524. except (Warning, Exception, _pytest.outcomes.OutcomeException) as ex:
  525. # intentionally broad to avoid events other than system interrupts
  526. warnings.warn(f"Test deselected from execution queue due to {ex}")
  527. return False
  528. session_items_by_name = {item.originalname: item for item in session_items}
  529. selected_shared_tests = [test for test in shared_tests if test.__name__ in session_items_by_name.keys() and
  530. will_run(session_items_by_name[test.__name__])]
  531. return selected_shared_tests
  532. @staticmethod
  533. def filter_shared_tests(shared_tests: list[EditorSharedTest], is_batchable: bool = False,
  534. is_parallelizable: bool = False) -> list[EditorSharedTest]:
  535. """
  536. Filters and returns all tests based off of if they are batchable and/or parallelizable
  537. :shared_tests: All shared tests
  538. :is_batchable: Filter to batchable tests
  539. :is_parallelizable: Filter to parallelizable tests
  540. :return: The list of filtered tests
  541. """
  542. return [
  543. t for t in shared_tests if (
  544. getattr(t, "is_batchable", None) is is_batchable
  545. and
  546. getattr(t, "is_parallelizable", None) is is_parallelizable
  547. )
  548. ]
  549. def _prepare_asset_processor(self, workspace: ly_test_tools._internal.managers.workspace.AbstractWorkspaceManager,
  550. editor_test_data: TestData) -> None:
  551. """
  552. Prepares the asset processor for the test depending on whether or not the process is open and if the current
  553. test owns it.
  554. :workspace: The workspace object in case an AssetProcessor object needs to be created
  555. :editor_test_data: The test data from calling editor_test_data()
  556. :return: None
  557. """
  558. try:
  559. # Start-up an asset processor if we are not running one
  560. # If another AP process exist, don't kill it, as we don't own it
  561. if editor_test_data.asset_processor is None:
  562. if not process_utils.process_exists("AssetProcessor", ignore_extensions=True):
  563. editor_utils.kill_all_ly_processes(include_asset_processor=True)
  564. editor_test_data.asset_processor = AssetProcessor(workspace)
  565. editor_test_data.asset_processor.start()
  566. else:
  567. editor_utils.kill_all_ly_processes(include_asset_processor=False)
  568. else:
  569. # Make sure the asset processor from before wasn't closed by accident
  570. editor_test_data.asset_processor.start()
  571. except Exception as ex:
  572. editor_test_data.asset_processor = None
  573. raise ex
  574. def _setup_editor_test(self, editor: ly_test_tools.launchers.platforms.base.Launcher,
  575. workspace: ly_test_tools._internal.managers.workspace.AbstractWorkspaceManager,
  576. editor_test_data: TestData) -> None:
  577. """
  578. Sets up an editor test by preparing the Asset Processor, killing all other O3DE processes, and configuring
  579. :editor: The launcher Editor object
  580. :workspace: The test Workspace object
  581. :editor_test_data: The TestData from calling editor_test_data()
  582. :return: None
  583. """
  584. self._prepare_asset_processor(workspace, editor_test_data)
  585. editor_utils.kill_all_ly_processes(include_asset_processor=False)
  586. editor.configure_settings()
  587. @staticmethod
  588. def _get_results_using_output(test_spec_list: list[EditorTestBase], output: str, editor_log_content: str) -> dict[str, Result]:
  589. """
  590. Utility function for parsing the output information from the editor. It deserializes the JSON content printed in
  591. the output for every test and returns that information.
  592. :test_spec_list: The list of EditorTests
  593. :output: The Editor from Editor.get_output()
  594. :editor_log_content: The contents of the editor log as a string
  595. :return: A dict of the tests and their respective Result objects
  596. """
  597. results = {}
  598. pattern = re.compile(r"JSON_START\((.+?)\)JSON_END")
  599. out_matches = pattern.finditer(output)
  600. found_jsons = {}
  601. for m in out_matches:
  602. try:
  603. elem = json.loads(m.groups()[0])
  604. found_jsons[elem["name"]] = elem
  605. except Exception: # Intentionally broad to avoid failing if the output data is corrupt
  606. continue
  607. # Try to find the element in the log, this is used for cutting the log contents later
  608. log_matches = pattern.finditer(editor_log_content)
  609. for m in log_matches:
  610. try:
  611. elem = json.loads(m.groups()[0])
  612. if elem["name"] in found_jsons:
  613. found_jsons[elem["name"]]["log_match"] = m
  614. except Exception: # Intentionally broad, to avoid failing if the log data is corrupt
  615. continue
  616. log_start = 0
  617. for test_spec in test_spec_list:
  618. name = editor_utils.get_module_filename(test_spec.test_module)
  619. if name not in found_jsons.keys():
  620. results[test_spec.__name__] = Result.Unknown(
  621. test_spec, output,
  622. f"Found no test run information on stdout for {name} in the editor log",
  623. editor_log_content)
  624. else:
  625. result = None
  626. json_result = found_jsons[name]
  627. json_output = json_result["output"]
  628. # Cut the editor log so it only has the output for this run
  629. if "log_match" in json_result:
  630. m = json_result["log_match"]
  631. end = m.end() if test_spec != test_spec_list[-1] else -1
  632. else:
  633. end = -1
  634. cur_log = editor_log_content[log_start: end]
  635. log_start = end
  636. if json_result["success"]:
  637. result = Result.Pass(test_spec, json_output, cur_log)
  638. else:
  639. result = Result.Fail(test_spec, json_output, cur_log)
  640. results[test_spec.__name__] = result
  641. return results
  642. @staticmethod
  643. def _report_result(name: str, result: Result) -> None:
  644. """
  645. Fails the test if the test result is not a PASS, specifying the information
  646. :name: Name of the test
  647. :result: The Result object which denotes if the test passed or not
  648. :return: None
  649. """
  650. if isinstance(result, Result.Pass):
  651. output_str = f"Test {name}:\n{str(result)}"
  652. print(output_str)
  653. else:
  654. error_str = f"Test {name}:\n{str(result)}"
  655. pytest.fail(error_str)
  656. def _exec_editor_test(self, request: _pytest.fixtures.FixtureRequest,
  657. workspace: ly_test_tools._internal.managers.workspace.AbstractWorkspaceManager,
  658. editor: ly_test_tools.launchers.platforms.base.Launcher,
  659. run_id: int, log_name: str, test_spec: EditorTestBase,
  660. cmdline_args: list[str] = None) -> dict[str, Result]:
  661. """
  662. Starts the editor with the given test and retuns an result dict with a single element specifying the result
  663. :request: The pytest request
  664. :workspace: The LyTestTools Workspace object
  665. :editor: The LyTestTools Editor object
  666. :run_id: The unique run id
  667. :log_name: The name of the editor log to retrieve
  668. :test_spec: The type of EditorTestBase
  669. :cmdline_args: Any additional command line args
  670. :return: a dictionary of Result objects
  671. """
  672. if cmdline_args is None:
  673. cmdline_args = []
  674. test_cmdline_args = self.global_extra_cmdline_args + cmdline_args
  675. test_spec_uses_null_renderer = getattr(test_spec, "use_null_renderer", None)
  676. if test_spec_uses_null_renderer or (test_spec_uses_null_renderer is None and self.use_null_renderer):
  677. test_cmdline_args += ["-rhi=null"]
  678. if test_spec.attach_debugger:
  679. test_cmdline_args += ["--attach-debugger"]
  680. if test_spec.wait_for_debugger:
  681. test_cmdline_args += ["--wait-for-debugger"]
  682. if self.enable_prefab_system:
  683. test_cmdline_args += [
  684. "--regset=/Amazon/Preferences/EnablePrefabSystem=true",
  685. f"--regset-file={os.path.join(workspace.paths.engine_root(), 'Registry', 'prefab.test.setreg')}"]
  686. else:
  687. test_cmdline_args += ["--regset=/Amazon/Preferences/EnablePrefabSystem=false"]
  688. # Cycle any old crash report in case it wasn't cycled properly
  689. editor_utils.cycle_crash_report(run_id, workspace)
  690. test_result = None
  691. results = {}
  692. test_filename = editor_utils.get_testcase_module_filepath(test_spec.test_module)
  693. cmdline = [
  694. "--runpythontest", test_filename,
  695. "-logfile", f"@log@/{log_name}",
  696. "-project-log-path", editor_utils.retrieve_log_path(run_id, workspace)] + test_cmdline_args
  697. editor.args.extend(cmdline)
  698. editor.start(backupFiles=False, launch_ap=False, configure_settings=False)
  699. try:
  700. editor.wait(test_spec.timeout)
  701. output = editor.get_output()
  702. return_code = editor.get_returncode()
  703. editor_log_content = editor_utils.retrieve_editor_log_content(run_id, log_name, workspace)
  704. # Save the editor log
  705. workspace.artifact_manager.save_artifact(os.path.join(editor_utils.retrieve_log_path(run_id, workspace), log_name),
  706. f'({run_id}){log_name}')
  707. if return_code == 0:
  708. test_result = Result.Pass(test_spec, output, editor_log_content)
  709. else:
  710. has_crashed = return_code != EditorTestSuite._TEST_FAIL_RETCODE
  711. if has_crashed:
  712. crash_output = editor_utils.retrieve_crash_output(run_id, workspace, self._TIMEOUT_CRASH_LOG)
  713. test_result = Result.Crash(test_spec, output, return_code, crash_output, None)
  714. # Save the crash log
  715. crash_file_name = os.path.basename(workspace.paths.crash_log())
  716. if os.path.exists(crash_file_name):
  717. workspace.artifact_manager.save_artifact(
  718. os.path.join(editor_utils.retrieve_log_path(run_id, workspace), crash_file_name))
  719. editor_utils.cycle_crash_report(run_id, workspace)
  720. else:
  721. logger.warning(f"Crash occurred, but could not find log {crash_file_name}")
  722. else:
  723. test_result = Result.Fail(test_spec, output, editor_log_content)
  724. except WaitTimeoutError:
  725. output = editor.get_output()
  726. editor.stop()
  727. editor_log_content = editor_utils.retrieve_editor_log_content(run_id, log_name, workspace)
  728. test_result = Result.Timeout(test_spec, output, test_spec.timeout, editor_log_content)
  729. editor_log_content = editor_utils.retrieve_editor_log_content(run_id, log_name, workspace)
  730. results = self._get_results_using_output([test_spec], output, editor_log_content)
  731. results[test_spec.__name__] = test_result
  732. return results
  733. def _exec_editor_multitest(self, request: _pytest.fixtures.FixtureRequest,
  734. workspace: ly_test_tools._internal.managers.workspace.AbstractWorkspaceManager,
  735. editor: ly_test_tools.launchers.platforms.base.Launcher, run_id: int, log_name: str,
  736. test_spec_list: list[EditorTestBase],
  737. cmdline_args: list[str] = None) -> dict[str, Result]:
  738. """
  739. Starts an editor executable with a list of tests and returns a dict of the result of every test ran within that
  740. editor instance. In case of failure this function also parses the editor output to find out what specific tests
  741. failed.
  742. :request: The pytest request
  743. :workspace: The LyTestTools Workspace object
  744. :editor: The LyTestTools Editor object
  745. :run_id: The unique run id
  746. :log_name: The name of the editor log to retrieve
  747. :test_spec_list: A list of EditorTestBase tests to run in the same editor instance
  748. :cmdline_args: Any additional command line args
  749. :return: A dict of Result objects
  750. """
  751. if cmdline_args is None:
  752. cmdline_args = []
  753. test_cmdline_args = self.global_extra_cmdline_args + cmdline_args
  754. if self.use_null_renderer:
  755. test_cmdline_args += ["-rhi=null"]
  756. if any([t.attach_debugger for t in test_spec_list]):
  757. test_cmdline_args += ["--attach-debugger"]
  758. if any([t.wait_for_debugger for t in test_spec_list]):
  759. test_cmdline_args += ["--wait-for-debugger"]
  760. if self.enable_prefab_system:
  761. test_cmdline_args += [
  762. "--regset=/Amazon/Preferences/EnablePrefabSystem=true",
  763. f"--regset-file={os.path.join(workspace.paths.engine_root(), 'Registry', 'prefab.test.setreg')}"]
  764. else:
  765. test_cmdline_args += ["--regset=/Amazon/Preferences/EnablePrefabSystem=false"]
  766. # Cycle any old crash report in case it wasn't cycled properly
  767. editor_utils.cycle_crash_report(run_id, workspace)
  768. results = {}
  769. test_filenames_str = ";".join(editor_utils.get_testcase_module_filepath(test_spec.test_module) for test_spec in test_spec_list)
  770. cmdline = [
  771. "--runpythontest", test_filenames_str,
  772. "-logfile", f"@log@/{log_name}",
  773. "-project-log-path", editor_utils.retrieve_log_path(run_id, workspace)] + test_cmdline_args
  774. editor.args.extend(cmdline)
  775. editor.start(backupFiles = False, launch_ap = False, configure_settings=False)
  776. output = ""
  777. editor_log_content = ""
  778. try:
  779. editor.wait(self.timeout_editor_shared_test)
  780. output = editor.get_output()
  781. return_code = editor.get_returncode()
  782. editor_log_content = editor_utils.retrieve_editor_log_content(run_id, log_name, workspace)
  783. # Save the editor log
  784. workspace.artifact_manager.save_artifact(os.path.join(editor_utils.retrieve_log_path(run_id, workspace), log_name),
  785. f'({run_id}){log_name}')
  786. if return_code == 0:
  787. # No need to scrape the output, as all the tests have passed
  788. for test_spec in test_spec_list:
  789. results[test_spec.__name__] = Result.Pass(test_spec, output, editor_log_content)
  790. else:
  791. # Scrape the output to attempt to find out which tests failed.
  792. # This function should always populate the result list, if it didn't find it, it will have "Unknown" type of result
  793. results = self._get_results_using_output(test_spec_list, output, editor_log_content)
  794. assert len(results) == len(test_spec_list), "bug in _get_results_using_output(), the number of results don't match the tests ran"
  795. # If the editor crashed, find out in which test it happened and update the results
  796. has_crashed = return_code != EditorTestSuite._TEST_FAIL_RETCODE
  797. if has_crashed:
  798. crashed_result = None
  799. for test_spec_name, result in results.items():
  800. if isinstance(result, Result.Unknown):
  801. if not crashed_result:
  802. # The first test with "Unknown" result (no data in output) is likely the one that crashed
  803. crash_error = editor_utils.retrieve_crash_output(run_id, workspace,
  804. self._TIMEOUT_CRASH_LOG)
  805. # Save the crash log
  806. crash_file_name = os.path.basename(workspace.paths.crash_log())
  807. if os.path.exists(crash_file_name):
  808. workspace.artifact_manager.save_artifact(
  809. os.path.join(editor_utils.retrieve_log_path(run_id, workspace), crash_file_name))
  810. editor_utils.cycle_crash_report(run_id, workspace)
  811. else:
  812. logger.warning(f"Crash occurred, but could not find log {crash_file_name}")
  813. results[test_spec_name] = Result.Crash(result.test_spec, output, return_code,
  814. crash_error, result.editor_log)
  815. crashed_result = result
  816. else:
  817. # If there are remaning "Unknown" results, these couldn't execute because of the crash,
  818. # update with info about the offender
  819. results[test_spec_name].extra_info = f"This test has unknown result," \
  820. f"test '{crashed_result.test_spec.__name__}'" \
  821. f"crashed before this test could be executed"
  822. # if all the tests ran, the one that has caused the crash is the last test
  823. if not crashed_result:
  824. crash_error = editor_utils.retrieve_crash_output(run_id, workspace, self._TIMEOUT_CRASH_LOG)
  825. editor_utils.cycle_crash_report(run_id, workspace)
  826. results[test_spec_name] = Result.Crash(crashed_result.test_spec, output, return_code,
  827. crash_error, crashed_result.editor_log)
  828. except WaitTimeoutError:
  829. editor.stop()
  830. output = editor.get_output()
  831. editor_log_content = editor_utils.retrieve_editor_log_content(run_id, log_name, workspace)
  832. # The editor timed out when running the tests, get the data from the output to find out which ones ran
  833. results = self._get_results_using_output(test_spec_list, output, editor_log_content)
  834. assert len(results) == len(test_spec_list), "bug in _get_results_using_output(), the number of results don't match the tests ran"
  835. # Similar logic here as crashes, the first test that has no result is the one that timed out
  836. timed_out_result = None
  837. for test_spec_name, result in results.items():
  838. if isinstance(result, Result.Unknown):
  839. if not timed_out_result:
  840. results[test_spec_name] = Result.Timeout(result.test_spec, result.output,
  841. self.timeout_editor_shared_test,
  842. result.editor_log)
  843. timed_out_result = result
  844. else:
  845. # If there are remaning "Unknown" results, these couldn't execute because of the timeout,
  846. # update with info about the offender
  847. results[test_spec_name].extra_info = f"This test has unknown result, test " \
  848. f"'{timed_out_result.test_spec.__name__}' timed out " \
  849. f"before this test could be executed"
  850. # if all the tests ran, the one that has caused the timeout is the last test, as it didn't close the editor
  851. if not timed_out_result:
  852. results[test_spec_name] = Result.Timeout(timed_out_result.test_spec,
  853. results[test_spec_name].output,
  854. self.timeout_editor_shared_test, result.editor_log)
  855. return results
  856. def _run_single_test(self, request: _pytest.fixtures.FixtureRequest,
  857. workspace: ly_test_tools._internal.managers.workspace.AbstractWorkspaceManager,
  858. editor: ly_test_tools.launchers.platforms.base.Launcher,
  859. editor_test_data: TestData, test_spec: EditorSingleTest) -> None:
  860. """
  861. Runs a single test (one editor, one test) with the given specs
  862. :request: The Pytest Request
  863. :workspace: The LyTestTools Workspace object
  864. :editor: The LyTestTools Editor object
  865. :editor_test_data: The TestData from calling editor_test_data()
  866. :test_spec: The test class that should be a subclass of EditorSingleTest
  867. :return: None
  868. """
  869. self._setup_editor_test(editor, workspace, editor_test_data)
  870. extra_cmdline_args = []
  871. if hasattr(test_spec, "extra_cmdline_args"):
  872. extra_cmdline_args = test_spec.extra_cmdline_args
  873. result = self._exec_editor_test(request, workspace, editor, 1, "editor_test.log", test_spec, extra_cmdline_args)
  874. if result is None:
  875. logger.error(f"Unexpectedly found no test run in the editor log during {test_spec}")
  876. result = {"Unknown":
  877. Result.Unknown(
  878. test_spec=test_spec,
  879. extra_info="Unexpectedly found no test run information on stdout in the editor log")}
  880. editor_test_data.results.update(result)
  881. test_name, test_result = next(iter(result.items()))
  882. self._report_result(test_name, test_result)
  883. # If test did not pass, save assets with errors and warnings
  884. if not isinstance(test_result, Result.Pass):
  885. editor_utils.save_failed_asset_joblogs(workspace)
  886. def _run_batched_tests(self, request: _pytest.fixtures.FixtureRequest,
  887. workspace: ly_test_tools._internal.managers.workspace.AbstractWorkspaceManager,
  888. editor: ly_test_tools.launchers.platforms.base.Launcher, editor_test_data: TestData,
  889. test_spec_list: list[EditorSharedTest], extra_cmdline_args: list[str] = None) -> None:
  890. """
  891. Runs a batch of tests in one single editor with the given spec list (one editor, multiple tests)
  892. :request: The Pytest Request
  893. :workspace: The LyTestTools Workspace object
  894. :editor: The LyTestTools Editor object
  895. :editor_test_data: The TestData from calling editor_test_data()
  896. :test_spec_list: A list of EditorSharedTest tests to run
  897. :extra_cmdline_args: Any extra command line args in a list
  898. :return: None
  899. """
  900. if extra_cmdline_args is None:
  901. extra_cmdline_args = []
  902. if not test_spec_list:
  903. return
  904. self._setup_editor_test(editor, workspace, editor_test_data)
  905. results = self._exec_editor_multitest(request, workspace, editor, 1, "editor_test.log", test_spec_list,
  906. extra_cmdline_args)
  907. editor_test_data.results.update(results)
  908. # If at least one test did not pass, save assets with errors and warnings
  909. for result in results:
  910. if result is None:
  911. logger.error("Unexpectedly found no test run in the editor log during EditorBatchedTest")
  912. logger.debug(f"Results from EditorBatchedTest:\n{results}")
  913. if not isinstance(result, Result.Pass):
  914. editor_utils.save_failed_asset_joblogs(workspace)
  915. return # exit early on first batch failure
  916. def _run_parallel_tests(self, request: _pytest.fixtures.FixtureRequest,
  917. workspace: ly_test_tools._internal.managers.workspace.AbstractWorkspaceManager,
  918. editor: ly_test_tools.launchers.platforms.base.Launcher, editor_test_data: TestData,
  919. test_spec_list: list[EditorSharedTest], extra_cmdline_args: list[str] = None) -> None:
  920. """
  921. Runs multiple editors with one test on each editor (multiple editor, one test each)
  922. :request: The Pytest Request
  923. :workspace: The LyTestTools Workspace object
  924. :editor: The LyTestTools Editor object
  925. :editor_test_data: The TestData from calling editor_test_data()
  926. :test_spec_list: A list of EditorSharedTest tests to run
  927. :extra_cmdline_args: Any extra command line args in a list
  928. :return: None
  929. """
  930. if extra_cmdline_args is None:
  931. extra_cmdline_args = []
  932. if not test_spec_list:
  933. return
  934. self._setup_editor_test(editor, workspace, editor_test_data)
  935. parallel_editors = self._get_number_parallel_editors(request)
  936. assert parallel_editors > 0, "Must have at least one editor"
  937. # If there are more tests than max parallel editors, we will split them into multiple consecutive runs
  938. num_iterations = int(math.ceil(len(test_spec_list) / parallel_editors))
  939. for iteration in range(num_iterations):
  940. tests_for_iteration = test_spec_list[iteration*parallel_editors:(iteration+1)*parallel_editors]
  941. total_threads = len(tests_for_iteration)
  942. threads = []
  943. results_per_thread = [None] * total_threads
  944. for i in range(total_threads):
  945. def make_func(test_spec, index, my_editor):
  946. def run(request, workspace, extra_cmdline_args):
  947. results = self._exec_editor_test(request, workspace, my_editor, index+1, f"editor_test.log",
  948. test_spec, extra_cmdline_args)
  949. assert results is not None
  950. results_per_thread[index] = results
  951. return run
  952. # Duplicate the editor using the one coming from the fixture
  953. cur_editor = editor.__class__(workspace, editor.args.copy())
  954. f = make_func(tests_for_iteration[i], i, cur_editor)
  955. t = threading.Thread(target=f, args=(request, workspace, extra_cmdline_args))
  956. t.start()
  957. threads.append(t)
  958. for t in threads:
  959. t.join()
  960. save_asset_logs = False
  961. for result in results_per_thread:
  962. if result is None:
  963. logger.error("Unexpectedly found no test run in the editor log during EditorParallelTest")
  964. logger.debug(f"Results from EditorParallelTest thread:\n{results_per_thread}")
  965. result = {"Unknown":
  966. Result.Unknown(
  967. test_spec=EditorParallelTest,
  968. extra_info="Unexpectedly found no test run information on stdout in the editor log")}
  969. editor_test_data.results.update(result)
  970. if not isinstance(result, Result.Pass):
  971. save_asset_logs = True
  972. # If at least one test did not pass, save assets with errors and warnings
  973. if save_asset_logs:
  974. editor_utils.save_failed_asset_joblogs(workspace)
  975. def _run_parallel_batched_tests(self, request: _pytest.fixtures.FixtureRequest,
  976. workspace: ly_test_tools._internal.managers.workspace.AbstractWorkspaceManager,
  977. editor: ly_test_tools.launchers.platforms.base.Launcher, editor_test_data: TestData,
  978. test_spec_list: list[EditorSharedTest], extra_cmdline_args: list[str] = None) -> None:
  979. """
  980. Runs multiple editors with a batch of tests for each editor (multiple editor, multiple tests each)
  981. :request: The Pytest Request
  982. :workspace: The LyTestTools Workspace object
  983. :editor: The LyTestTools Editor object
  984. :editor_test_data: The TestData from calling editor_test_data()
  985. :test_spec_list: A list of EditorSharedTest tests to run
  986. :extra_cmdline_args: Any extra command line args in a list
  987. :return: None
  988. """
  989. if extra_cmdline_args is None:
  990. extra_cmdline_args = []
  991. if not test_spec_list:
  992. return
  993. self._setup_editor_test(editor, workspace, editor_test_data)
  994. total_threads = self._get_number_parallel_editors(request)
  995. assert total_threads > 0, "Must have at least one editor"
  996. threads = []
  997. tests_per_editor = int(math.ceil(len(test_spec_list) / total_threads))
  998. results_per_thread = [None] * total_threads
  999. for i in range(total_threads):
  1000. tests_for_thread = test_spec_list[i*tests_per_editor:(i+1)*tests_per_editor]
  1001. def make_func(test_spec_list_for_editor, index, my_editor):
  1002. def run(request, workspace, extra_cmdline_args):
  1003. results = None
  1004. if len(test_spec_list_for_editor) > 0:
  1005. results = self._exec_editor_multitest(request, workspace, my_editor, index+1,
  1006. f"editor_test.log", test_spec_list_for_editor,
  1007. extra_cmdline_args)
  1008. assert results is not None
  1009. else:
  1010. results = {}
  1011. results_per_thread[index] = results
  1012. return run
  1013. # Duplicate the editor using the one coming from the fixture
  1014. cur_editor = editor.__class__(workspace, editor.args.copy())
  1015. f = make_func(tests_for_thread, i, cur_editor)
  1016. t = threading.Thread(target=f, args=(request, workspace, extra_cmdline_args))
  1017. t.start()
  1018. threads.append(t)
  1019. for t in threads:
  1020. t.join()
  1021. save_asset_logs = False
  1022. for result in results_per_thread:
  1023. if result is None:
  1024. logger.error("Unexpectedly found no test run in the editor log during EditorSharedTest")
  1025. logger.debug(f"Results from EditorSharedTest thread:\n{results_per_thread}")
  1026. result = {"Unknown":
  1027. Result.Unknown(
  1028. test_spec=EditorSharedTest,
  1029. extra_info="Unexpectedly found no test run information on stdout in the editor log")}
  1030. editor_test_data.results.update(result)
  1031. if not isinstance(result, Result.Pass):
  1032. save_asset_logs = True
  1033. # If at least one test did not pass, save assets with errors and warnings
  1034. if save_asset_logs:
  1035. editor_utils.save_failed_asset_joblogs(workspace)
  1036. def _get_number_parallel_editors(self, request: _pytest.fixtures.FixtureRequest) -> int:
  1037. """
  1038. Retrieves the number of parallel preference cmdline overrides
  1039. :request: The Pytest Request
  1040. :return: The number of parallel editors to use
  1041. """
  1042. parallel_editors_value = request.config.getoption("--editors-parallel", None)
  1043. if parallel_editors_value:
  1044. return int(parallel_editors_value)
  1045. return self.get_number_parallel_editors()