Kaynağa Gözat

Added typed exceptions to LyTT. (#11694)

* Added typed exceptions to LyTT.
SSpalding 3 yıl önce
ebeveyn
işleme
dde34ebc71

+ 23 - 0
Tools/LyTestTools/ly_test_tools/_internal/exceptions.py

@@ -0,0 +1,23 @@
+"""
+Copyright (c) Contributors to the Open 3D Engine Project.
+For complete copyright and license terms please see the LICENSE at the root of this distribution.
+
+SPDX-License-Identifier: Apache-2.0 OR MIT
+
+Exceptions that can occur while interacting with a Launcher
+"""
+
+
+class EditorToolsFrameworkException(Exception):
+    """ Indicates that an Exception resulted from inside the LyTestTools framework """
+    pass
+
+
+class LyTestToolsFrameworkException(Exception):
+    """ Indicates that an Exception resulted from inside the LyTestTools framework """
+    pass
+
+
+class TestResultException(Exception):
+    """Indicates that an unknown result was found during the tests"""
+    pass

+ 2 - 1
Tools/LyTestTools/ly_test_tools/_internal/managers/abstract_resource_locator.py

@@ -17,6 +17,7 @@ from weakref import KeyedRef
 
 
 import ly_test_tools._internal.pytest_plugin
 import ly_test_tools._internal.pytest_plugin
 import ly_test_tools.environment.file_system
 import ly_test_tools.environment.file_system
+import ly_test_tools._internal.exceptions as exceptions
 
 
 logger = logging.getLogger(__name__)
 logger = logging.getLogger(__name__)
 
 
@@ -42,7 +43,7 @@ def _find_engine_root(initial_path):
         else:  # explicit else avoids aberrant behavior from following filesystem links
         else:  # explicit else avoids aberrant behavior from following filesystem links
             current_dir = os.path.abspath(os.path.join(current_dir, os.path.pardir))
             current_dir = os.path.abspath(os.path.join(current_dir, os.path.pardir))
 
 
-    raise OSError(f"Unable to find engine root directory. Verify root file '{root_file}' exists")
+    raise exceptions.LyTestToolsFrameworkException(f"Unable to find engine root directory. Verify root file '{root_file}' exists")
 
 
 
 
 def _find_project_json(engine_root, project):
 def _find_project_json(engine_root, project):

+ 4 - 4
Tools/LyTestTools/ly_test_tools/mobile/android.py

@@ -14,7 +14,7 @@ import subprocess
 
 
 import ly_test_tools.environment.process_utils as process_utils
 import ly_test_tools.environment.process_utils as process_utils
 import ly_test_tools.environment.waiter as waiter
 import ly_test_tools.environment.waiter as waiter
-
+import ly_test_tools._internal.exceptions as exceptions
 
 
 logger = logging.getLogger(__name__)
 logger = logging.getLogger(__name__)
 
 
@@ -56,7 +56,7 @@ def check_adb_connection_state():
     elif 'device' == output.strip():
     elif 'device' == output.strip():
         return SINGLE_DEVICE
         return SINGLE_DEVICE
     else:
     else:
-        raise RuntimeError("Detected unhandled output from adb get-state: {}".format(output.strip()))
+        raise exceptions.LyTestToolsFrameworkException("Detected unhandled output from adb get-state: {}".format(output.strip()))
 
 
 
 
 def reverse_tcp(device, host_port, device_port):
 def reverse_tcp(device, host_port, device_port):
@@ -149,7 +149,7 @@ def pull_files_to_pc(package_name, logs_path, device=None):
         if 'does not exist' in err.output:
         if 'does not exist' in err.output:
             logger.info('Could not pull logs since none exist on device {}'.format(device))
             logger.info('Could not pull logs since none exist on device {}'.format(device))
         else:
         else:
-            raise
+            raise exceptions.LyTestToolsFrameworkException from err
 
 
     logger.debug('Pull File Command Ran successfully: {}'.format(str(pull_cmd)))
     logger.debug('Pull File Command Ran successfully: {}'.format(str(pull_cmd)))
 
 
@@ -175,7 +175,7 @@ def push_files_to_device(source, destination, device=None):
     push_result = process_utils.check_output(cmd)
     push_result = process_utils.check_output(cmd)
     logger.debug('Push File Command Ran: {}'.format(str(cmd)))
     logger.debug('Push File Command Ran: {}'.format(str(cmd)))
     if 'pushed' not in push_result:
     if 'pushed' not in push_result:
-        raise RuntimeError('[AndroidLauncher] Failed to push file to device: {}!'.format(device))
+        raise exceptions.LyTestToolsFrameworkException('[AndroidLauncher] Failed to push file to device: {}!'.format(device))
 
 
 
 
 def start_adb_server():
 def start_adb_server():

+ 1 - 1
Tools/LyTestTools/ly_test_tools/o3de/ap_log_parser.py

@@ -248,7 +248,7 @@ class APLogParser(APOutputParser):
                 all_lines = log_file.readlines()
                 all_lines = log_file.readlines()
                 self._parse_lines(all_lines)
                 self._parse_lines(all_lines)
         except OSError:
         except OSError:
-            logger.error(f"Error opening file: {self._file_path}")
+            logger.error(f"Error opening file in LyTestTools at path: {self._file_path}")
             self._runs = []
             self._runs = []
 
 
     def _trim_line(self, line: str) -> Tuple[str, int]:
     def _trim_line(self, line: str) -> Tuple[str, int]:

+ 26 - 20
Tools/LyTestTools/ly_test_tools/o3de/asset_processor.py

@@ -24,6 +24,7 @@ import ly_test_tools
 import ly_test_tools.environment.file_system as file_system
 import ly_test_tools.environment.file_system as file_system
 import ly_test_tools.environment.process_utils as process_utils
 import ly_test_tools.environment.process_utils as process_utils
 import ly_test_tools.environment.waiter as waiter
 import ly_test_tools.environment.waiter as waiter
+import ly_test_tools._internal.exceptions as exceptions
 import ly_test_tools.o3de.pipeline_utils as utils
 import ly_test_tools.o3de.pipeline_utils as utils
 from ly_test_tools.o3de.ap_log_parser import APLogParser
 from ly_test_tools.o3de.ap_log_parser import APLogParser
 
 
@@ -111,8 +112,10 @@ class AssetProcessor(object):
 
 
         self.send_message("waitforidle")
         self.send_message("waitforidle")
         result = self.read_message(read_timeout=timeout)
         result = self.read_message(read_timeout=timeout)
-        assert self.process_exists(), "Asset Processor has crashed or unexpectedly shut down during idle wait."
-        assert result == "idle", f"Did not get idle state from AP, message was instead: {result}"
+        if not self.process_exists():
+            raise exceptions.LyTestToolsFrameworkException("Asset Processor has crashed or unexpectedly shut down during idle wait.")
+        if not result == "idle":
+            raise exceptions.LyTestToolsFrameworkException(f"Did not get idle state from AP, message was instead: {result}")
         return True
         return True
 
 
     def next_idle(self):
     def next_idle(self):
@@ -129,8 +132,10 @@ class AssetProcessor(object):
 
 
         self.send_message("signalidle")
         self.send_message("signalidle")
         result = self.read_message()
         result = self.read_message()
-        assert self.process_exists(), "Asset Processor has crashed or unexpectedly shut down during idle request."
-        assert result == "idle", f"Did not get idle state from AP, message was instead: {result}"
+        if not self.process_exists():
+            raise exceptions.LyTestToolsFrameworkException("Asset Processor has crashed or unexpectedly shut down during idle request.")
+        if not result == "idle":
+            raise exceptions.LyTestToolsFrameworkException(f"Did not get idle state from AP, message was instead: {result}")
 
 
     def send_quit(self):
     def send_quit(self):
         """
         """
@@ -158,7 +163,7 @@ class AssetProcessor(object):
             logger.debug(f"Sent input {message}")
             logger.debug(f"Sent input {message}")
             return True
             return True
         except IOError as e:
         except IOError as e:
-            logger.warning(f"Failed to send message {message} to AP with error {e}")
+            logger.warning(f"Failed in LyTestTools to send message {message} to AP with error {e}")
         return False
         return False
 
 
     def read_message(self, read_timeout=DEFAULT_TIMEOUT_SECONDS):
     def read_message(self, read_timeout=DEFAULT_TIMEOUT_SECONDS):
@@ -182,7 +187,7 @@ class AssetProcessor(object):
             logger.debug(f"Got result message {result_message}")
             logger.debug(f"Got result message {result_message}")
             return result_message
             return result_message
         except IOError as e:
         except IOError as e:
-            logger.warning(f"Failed to read message from with error {e}")
+            logger.warning(f"Failed in LyTestTools to read message from with error {e}")
             return f"error_{e}"
             return f"error_{e}"
 
 
     def read_control_port(self):
     def read_control_port(self):
@@ -209,13 +214,13 @@ class AssetProcessor(object):
                     logger.debug(f"Read port type {port_type} : {port}")
                     logger.debug(f"Read port type {port_type} : {port}")
                     return True
                     return True
                 except Exception as ex:  # intentionally broad
                 except Exception as ex:  # intentionally broad
-                    logger.debug(f"Failed to read port type {port_type} : {port} from file", exc_info=ex)
+                    logger.debug(f"Failed in LyTestTools to read port type {port_type} : {port} from file", exc_info=ex)
             return False
             return False
 
 
         # the timeout needs to be large enough to load all the dynamic libraries the AP-GUI loads since the control port
         # the timeout needs to be large enough to load all the dynamic libraries the AP-GUI loads since the control port
         # is opened after all the DLL loads, this can take a long time in a Debug build
         # is opened after all the DLL loads, this can take a long time in a Debug build
         ap_max_activate_time = 60
         ap_max_activate_time = 60
-        err = AssetProcessorError(f"Failed to read port type {port_type} from {self._workspace.paths.ap_gui_log()}. "
+        err = AssetProcessorError(f"Failed in LyTestTools to read port type {port_type} from {self._workspace.paths.ap_gui_log()}. "
                                   f"Waited for {ap_max_activate_time} seconds.")
                                   f"Waited for {ap_max_activate_time} seconds.")
         waiter.wait_for(_get_port_from_log, timeout=ap_max_activate_time, exc=err)
         waiter.wait_for(_get_port_from_log, timeout=ap_max_activate_time, exc=err)
         return port
         return port
@@ -274,7 +279,7 @@ class AssetProcessor(object):
                             logger.debug(f"Found new connect port for {port_name}: {host}:{new_connect_port}")
                             logger.debug(f"Found new connect port for {port_name}: {host}:{new_connect_port}")
                             connect_port = new_connect_port
                             connect_port = new_connect_port
                     except Exception as read_exception:  # Purposefully broad
                     except Exception as read_exception:  # Purposefully broad
-                        logger.debug(f"Failed to read port data for {port_name}: {host}:{new_connect_port}",
+                        logger.debug(f"Failed in LyTestTools to read port data for {port_name}: {host}:{new_connect_port}",
                                        exc_info=read_exception)
                                        exc_info=read_exception)
             return False
             return False
 
 
@@ -306,7 +311,7 @@ class AssetProcessor(object):
                 self.terminate()
                 self.terminate()
                 return StopReason.NO_QUIT
                 return StopReason.NO_QUIT
         except IOError as e:
         except IOError as e:
-            logger.warning(f"Failed to send quit request with error {e}, stopping")
+            logger.warning(f"Failed in LyTestTools to send quit request with error {e}, stopping")
             self.terminate()
             self.terminate()
             return StopReason.IO_ERROR
             return StopReason.IO_ERROR
 
 
@@ -314,12 +319,12 @@ class AssetProcessor(object):
         try:
         try:
             waiter.wait_for(lambda: not self.process_exists(), exc=AssetProcessorError, timeout=wait_timeout)
             waiter.wait_for(lambda: not self.process_exists(), exc=AssetProcessorError, timeout=wait_timeout)
         except AssetProcessorError:
         except AssetProcessorError:
-            logger.warning(f"Timeout attempting to quit asset processor after {wait_timeout} seconds, using terminate")
+            logger.warning(f"Timeout in LyTestTools attempting to quit asset processor after {wait_timeout} seconds, using terminate")
             self.terminate()
             self.terminate()
             return StopReason.TIMEOUT
             return StopReason.TIMEOUT
 
 
         if self.process_exists():
         if self.process_exists():
-            logger.warning(f"Failed to stop process {self.get_pid()} after {wait_timeout} seconds, using terminate")
+            logger.warning(f"Failed in LyTestTools to stop process {self.get_pid()} after {wait_timeout} seconds, using terminate")
             self.terminate()
             self.terminate()
             return StopReason.NO_STOP
             return StopReason.NO_STOP
 
 
@@ -514,7 +519,7 @@ class AssetProcessor(object):
             self._ap_proc = subprocess.Popen(command, cwd=ap_exe_path, env=process_utils.get_display_env())
             self._ap_proc = subprocess.Popen(command, cwd=ap_exe_path, env=process_utils.get_display_env())
             time.sleep(1)
             time.sleep(1)
             if self._ap_proc.poll() is not None:
             if self._ap_proc.poll() is not None:
-                raise AssetProcessorError(f"AssetProcessor immediately quit with errorcode {self._ap_proc.returncode}")
+                raise AssetProcessorError(f"AssetProcessor immediately quit with errorcode {self._ap_proc.returncode} in LyTestTools ")
 
 
             if accept_input:
             if accept_input:
                 self.connect_control()
                 self.connect_control()
@@ -524,7 +529,7 @@ class AssetProcessor(object):
 
 
             if quitonidle:
             if quitonidle:
                 waiter.wait_for(lambda: not self.process_exists(), timeout=timeout,
                 waiter.wait_for(lambda: not self.process_exists(), timeout=timeout,
-                                exc=AssetProcessorError(f"Failed to quit on idle within {timeout} seconds"))
+                                exc=AssetProcessorError(f"Failed in LyTestTools to quit on idle within {timeout} seconds"))
             elif run_until_idle and accept_input:
             elif run_until_idle and accept_input:
                 if not self.wait_for_idle():
                 if not self.wait_for_idle():
                     return False, None
                     return False, None
@@ -536,8 +541,8 @@ class AssetProcessor(object):
                 if self._ap_proc:
                 if self._ap_proc:
                     self._ap_proc.kill()
                     self._ap_proc.kill()
             except Exception as ex:
             except Exception as ex:
-                logger.exception("Ignoring exception while trying to terminate Asset Processor", ex)
-            raise be  # raise whatever prompted us to clean up
+                logger.exception("Ignoring exception while trying to terminate Asset Processor from LyTestTools ", ex)
+            raise exceptions.LyTestToolsFrameworkException from be  # raise whatever prompted us to clean up
 
 
     def connect_listen(self, timeout=DEFAULT_TIMEOUT_SECONDS):
     def connect_listen(self, timeout=DEFAULT_TIMEOUT_SECONDS):
         # Wait for the AP we launched to be ready to accept a connection
         # Wait for the AP we launched to be ready to accept a connection
@@ -720,7 +725,7 @@ class AssetProcessor(object):
         try:
         try:
             os.remove(name)
             os.remove(name)
         except OSError as e:
         except OSError as e:
-            logger.error(f'Failed to clean up {name} : {e}')
+            logger.error(f'In LyTestTools Failed to clean up {name} : {e}')
 
 
     def delete_temp_asset_root_folder(self):
     def delete_temp_asset_root_folder(self):
         """
         """
@@ -1046,15 +1051,16 @@ def assetprocessorbatch_check_output(workspace, project=None, platforms=None, ex
         return output_list
         return output_list
     except subprocess.CalledProcessError as e:
     except subprocess.CalledProcessError as e:
         if not expect_failure:
         if not expect_failure:
-            logger.error(f"AssetProcessorBatch returned error {ap_path} with error {e}")
+            logger.error(f"AssetProcessorBatch returned error {ap_path} to LyTestTools with error {e}")
         # This will sometimes be due to expected asset processing errors - we'll return the output and let the tests
         # This will sometimes be due to expected asset processing errors - we'll return the output and let the tests
         # decide what to do
         # decide what to do
         if not no_split:
         if not no_split:
             return e.output.decode('utf-8').split("\r\n")
             return e.output.decode('utf-8').split("\r\n")
         return e.output.decode('utf-8')
         return e.output.decode('utf-8')
     except FileNotFoundError as e:
     except FileNotFoundError as e:
-        logger.error(f"File Not Found - Failed to call {ap_path} with error {e}")
-    assert not expect_failure, "AP failure didn't occur as expected"
+        logger.error(f"File Not Found - Failed to call {ap_path} from LyTestTools with error {e}")
+    if expect_failure:
+        raise exceptions.LyTestToolsFrameworkException("AP failure didn't occur as expected")
 
 
 
 
 def parse_output_value(output_list, start_value, end_value=None):
 def parse_output_value(output_list, start_value, end_value=None):

+ 2 - 1
Tools/LyTestTools/ly_test_tools/o3de/editor_test_utils.py

@@ -14,6 +14,7 @@ import time
 
 
 import ly_test_tools.environment.process_utils as process_utils
 import ly_test_tools.environment.process_utils as process_utils
 import ly_test_tools.environment.waiter as waiter
 import ly_test_tools.environment.waiter as waiter
+from ly_test_tools._internal.exceptions import EditorToolsFrameworkException
 from ly_test_tools.o3de.asset_processor import AssetProcessor
 from ly_test_tools.o3de.asset_processor import AssetProcessor
 
 
 logger = logging.getLogger(__name__)
 logger = logging.getLogger(__name__)
@@ -349,4 +350,4 @@ def prepare_asset_processor(workspace: ly_test_tools._internal.managers.workspac
             collected_test_data.asset_processor.start()
             collected_test_data.asset_processor.start()
     except Exception as ex:
     except Exception as ex:
         collected_test_data.asset_processor = None
         collected_test_data.asset_processor = None
-        raise ex
+        raise EditorToolsFrameworkException from ex

+ 23 - 15
Tools/LyTestTools/ly_test_tools/o3de/multi_test_framework.py

@@ -39,6 +39,7 @@ from _pytest.skipping import pytest_runtest_setup as skip_pytest_runtest_setup
 
 
 import ly_test_tools.o3de.editor_test_utils as editor_utils
 import ly_test_tools.o3de.editor_test_utils as editor_utils
 from ly_test_tools._internal.managers.workspace import AbstractWorkspaceManager
 from ly_test_tools._internal.managers.workspace import AbstractWorkspaceManager
+from ly_test_tools._internal.exceptions import EditorToolsFrameworkException, TestResultException
 from ly_test_tools.launchers import launcher_helper
 from ly_test_tools.launchers import launcher_helper
 from ly_test_tools.launchers.exceptions import WaitTimeoutError
 from ly_test_tools.launchers.exceptions import WaitTimeoutError
 from ly_test_tools.launchers.platforms.linux.launcher import LinuxEditor, LinuxMaterialEditor
 from ly_test_tools.launchers.platforms.linux.launcher import LinuxEditor, LinuxMaterialEditor
@@ -121,11 +122,6 @@ class BatchedTest(SharedTest):
     is_parallelizable = False
     is_parallelizable = False
 
 
 
 
-class TestResultException(Exception):
-    """Indicates that an unknown result was found during the tests"""
-    pass
-
-
 class Result(object):
 class Result(object):
     """Holds test results for a given program/application."""
     """Holds test results for a given program/application."""
 
 
@@ -425,8 +421,8 @@ class MultiTestSuite(object):
                             # Setup step for wrap_run
                             # Setup step for wrap_run
                             wrap = inner_test_spec.wrap_run(
                             wrap = inner_test_spec.wrap_run(
                                 self, request, workspace, collected_test_data)
                                 self, request, workspace, collected_test_data)
-                            assert isinstance(wrap, types.GeneratorType), (
-                                "wrap_run must return a generator, did you forget 'yield'?")
+                            if not isinstance(wrap, types.GeneratorType):
+                                raise EditorToolsFrameworkException("wrap_run must return a generator, did you forget 'yield'?")
                             next(wrap, None)
                             next(wrap, None)
                             # Setup step
                             # Setup step
                             inner_test_spec.setup(
                             inner_test_spec.setup(
@@ -922,7 +918,9 @@ class MultiTestSuite(object):
             return
             return
 
 
         parallel_executables = self._get_number_parallel_executables(request)
         parallel_executables = self._get_number_parallel_executables(request)
-        assert parallel_executables > 0, "Must have at least one executable"
+        if not parallel_executables > 0:
+            logger.warning("Expected 1 or more parallel_executables, found 0. Setting to 1.")
+            parallel_executables = 1
 
 
         # If there are more tests than max parallel executables, we will split them into multiple consecutive runs.
         # If there are more tests than max parallel executables, we will split them into multiple consecutive runs.
         num_iterations = int(math.ceil(len(test_spec_list) / parallel_executables))
         num_iterations = int(math.ceil(len(test_spec_list) / parallel_executables))
@@ -936,7 +934,9 @@ class MultiTestSuite(object):
                     def run(request, workspace, extra_cmdline_args):
                     def run(request, workspace, extra_cmdline_args):
                         results = self._exec_single_test(
                         results = self._exec_single_test(
                             request, workspace, current_executable, index + 1, self._log_name, test_spec, extra_cmdline_args)
                             request, workspace, current_executable, index + 1, self._log_name, test_spec, extra_cmdline_args)
-                        assert results is not None
+                        if results is None:
+                            raise EditorToolsFrameworkException(f"Results were None. Current log name is "
+                                                                f"{self._log_name} and test is {str(test_spec)}")
                         results_per_thread[index] = results
                         results_per_thread[index] = results
                     return run
                     return run
 
 
@@ -1004,7 +1004,10 @@ class MultiTestSuite(object):
             return
             return
 
 
         total_threads = self._get_number_parallel_executables(request)
         total_threads = self._get_number_parallel_executables(request)
-        assert total_threads > 0, "Must have at least one executable"
+        if not total_threads > 0:
+            logger.warning("Expected 1 or more total_threads, found 0. Setting to 1.")
+            total_threads = 1
+
         threads = []
         threads = []
         tests_per_executable = int(math.ceil(len(test_spec_list) / total_threads))
         tests_per_executable = int(math.ceil(len(test_spec_list) / total_threads))
         results_per_thread = [None] * total_threads
         results_per_thread = [None] * total_threads
@@ -1018,7 +1021,10 @@ class MultiTestSuite(object):
                         results = self._exec_multitest(
                         results = self._exec_multitest(
                             request, workspace, current_executable, index + 1, self._log_name,
                             request, workspace, current_executable, index + 1, self._log_name,
                             test_spec_list_for_executable, extra_cmdline_args)
                             test_spec_list_for_executable, extra_cmdline_args)
-                        assert results is not None
+                        if results is None:
+                            raise EditorToolsFrameworkException(f"Results were None. Current log name is "
+                                                                f"{self._log_name} and tests are "
+                                                                f"{str(test_spec_list_for_executable)}")
                     else:
                     else:
                         results = {}
                         results = {}
                     results_per_thread[index] = results
                     results_per_thread[index] = results
@@ -1172,8 +1178,9 @@ class MultiTestSuite(object):
                 # This function should always populate the result list.
                 # This function should always populate the result list.
                 # If it didn't then it will have "Unknown" as the type of result.
                 # If it didn't then it will have "Unknown" as the type of result.
                 results = self._get_results_using_output(test_spec_list, output, executable_log_content)
                 results = self._get_results_using_output(test_spec_list, output, executable_log_content)
-                assert len(results) == len(test_spec_list), (
-                    "bug in get_results_using_output(), the number of results don't match the tests ran")
+                if not len(results) == len(test_spec_list):
+                    raise EditorToolsFrameworkException("bug in get_results_using_output(), the number of results "
+                                                        "don't match the tests ran")
 
 
                 # If the executable crashed, find out in which test it happened and update the results.
                 # If the executable crashed, find out in which test it happened and update the results.
                 has_crashed = return_code != self._test_fail_retcode
                 has_crashed = return_code != self._test_fail_retcode
@@ -1220,8 +1227,9 @@ class MultiTestSuite(object):
 
 
             # The executable timed out when running the tests, get the data from the output to find out which ones ran
             # The executable timed out when running the tests, get the data from the output to find out which ones ran
             results = self._get_results_using_output(test_spec_list, output, executable_log_content)
             results = self._get_results_using_output(test_spec_list, output, executable_log_content)
-            assert len(results) == len(test_spec_list), (
-                "bug in _get_results_using_output(), the number of results don't match the tests ran")
+            if not len(results) == len(test_spec_list):
+                raise EditorToolsFrameworkException("bug in _get_results_using_output(), the number of results "
+                                                    "don't match the tests ran")
 
 
             # Similar logic here as crashes, the first test that has no result is the one that timed out
             # Similar logic here as crashes, the first test that has no result is the one that timed out
             timed_out_result = None
             timed_out_result = None

+ 10 - 6
Tools/LyTestTools/ly_test_tools/o3de/pipeline_utils.py

@@ -25,6 +25,7 @@ from typing import Dict, List, Tuple, Optional, Callable
 # Import LyTestTools
 # Import LyTestTools
 import ly_test_tools.environment.file_system as fs
 import ly_test_tools.environment.file_system as fs
 import ly_test_tools.environment.process_utils as process_utils
 import ly_test_tools.environment.process_utils as process_utils
+import ly_test_tools._internal.exceptions as exceptions
 from ly_test_tools.o3de.ap_log_parser import APLogParser
 from ly_test_tools.o3de.ap_log_parser import APLogParser
 
 
 logger = logging.getLogger(__name__)
 logger = logging.getLogger(__name__)
@@ -165,7 +166,7 @@ def get_files_hashsum(path_to_files_dir: str) -> Dict[str, int]:
             with open(os.path.join(path_to_files_dir, fname), "rb") as fopen:
             with open(os.path.join(path_to_files_dir, fname), "rb") as fopen:
                 checksum_dict[fname] = hashlib.sha256(fopen.read()).digest()
                 checksum_dict[fname] = hashlib.sha256(fopen.read()).digest()
     except IOError:
     except IOError:
-        logger.error("An error occurred trying to read file")
+        logger.error("An error occurred in LyTestTools when trying to read file.")
     return checksum_dict
     return checksum_dict
 
 
 
 
@@ -251,7 +252,7 @@ def safe_subprocess(command: str or List[str], **kwargs: Dict) -> ProcessOutput:
         # Set object flag
         # Set object flag
         subprocess_output.exception_occurred = True
         subprocess_output.exception_occurred = True
         # If error occurs when **kwargs includes check=True Exceptions are possible
         # If error occurs when **kwargs includes check=True Exceptions are possible
-        logger.warning(f'Command "{cmd_string}" failed with returncode {e.returncode}, output:\n{e.output}')
+        logger.warning(f'Command "{cmd_string}" failed in LyTestTools with returncode {e.returncode}, output:\n{e.output}')
         # Read and process error outputs
         # Read and process error outputs
         subprocess_output.stderr = e.output.read().decode()
         subprocess_output.stderr = e.output.read().decode()
         # Save error return code
         # Save error return code
@@ -275,7 +276,7 @@ def processes_with_substring_in_name(substring: str) -> tuple:
             if substring.lower() in p.name().lower():
             if substring.lower() in p.name().lower():
                 targeted_processes.append(p)
                 targeted_processes.append(p)
         except psutil.NoSuchProcess as e:
         except psutil.NoSuchProcess as e:
-            logger.info(f"Process {p} was killed during processes_with_substring_in_name()!\nError: {e}")
+            logger.info(f"Process {p} was killed in LyTestTools during processes_with_substring_in_name()!\nError: {e}")
             continue
             continue
     return tuple(targeted_processes)
     return tuple(targeted_processes)
 
 
@@ -305,7 +306,8 @@ def process_cpu_usage_below(process_name: str, cpu_usage_threshold: float) -> bo
     """
     """
     # Get all instances of targeted process
     # Get all instances of targeted process
     targeted_processes = processes_with_substring_in_name(process_name)
     targeted_processes = processes_with_substring_in_name(process_name)
-    assert len(targeted_processes) > 0, f"No instances of {process_name} were found"
+    if not len(targeted_processes) > 0:
+        raise exceptions.LyTestToolsFrameworkException(f"No instances of {process_name} were found")
 
 
     # Return whether all instances of targeted process are idle
     # Return whether all instances of targeted process are idle
     for targeted_process in targeted_processes:
     for targeted_process in targeted_processes:
@@ -542,7 +544,7 @@ def check_for_perforce():
     try:
     try:
         p4_output = subprocess.check_output(command_list).decode('utf-8')
         p4_output = subprocess.check_output(command_list).decode('utf-8')
     except subprocess.CalledProcessError as e:
     except subprocess.CalledProcessError as e:
-        logger.error(f"Failed to call {command_list} with error {e}")
+        logger.error(f"Failed to call {command_list} in LyTestTools with error {e}")
         return False
         return False
 
 
     if not p4_output.startswith("User name:"):
     if not p4_output.startswith("User name:"):
@@ -567,7 +569,9 @@ def check_for_perforce():
 
 
 
 
 def get_file_hash(filePath, hashBufferSize = 65536):
 def get_file_hash(filePath, hashBufferSize = 65536):
-    assert os.path.exists(filePath), f"Cannot get file hash, file at path '{filePath}' does not exist."
+    if not os.path.exists(filePath):
+        raise exceptions.LyTestToolsFrameworkException(f"Cannot get file hash, file at path '{filePath}' does not exist.")
+
     sha1 = hashlib.sha1()
     sha1 = hashlib.sha1()
     with open(filePath, 'rb') as cacheFile:
     with open(filePath, 'rb') as cacheFile:
         while True:
         while True:

+ 4 - 2
Tools/LyTestTools/ly_test_tools/o3de/settings.py

@@ -14,6 +14,7 @@ import re
 import os
 import os
 
 
 import ly_test_tools.environment.file_system
 import ly_test_tools.environment.file_system
+import ly_test_tools._internal.exceptions as exceptions
 
 
 logger = logging.getLogger(__name__)
 logger = logging.getLogger(__name__)
 
 
@@ -246,7 +247,7 @@ def _edit_text_settings_file(settings_file, setting, value, comment_char=""):
     """
     """
 
 
     if not os.path.isfile(settings_file):
     if not os.path.isfile(settings_file):
-        raise IOError(f"Invalid file and/or path {settings_file}.")
+        raise exceptions.LyTestToolsFrameworkException(f"Invalid file and/or path {settings_file}.")
 
 
     match_obj = None
     match_obj = None
     document = None
     document = None
@@ -277,7 +278,8 @@ def _edit_text_settings_file(settings_file, setting, value, comment_char=""):
                 print(line)
                 print(line)
 
 
     except PermissionError as error:
     except PermissionError as error:
-        logger.warning(f"PermissionError, possibly due to ({settings_file}) already being open. Error: {error}")
+        logger.warning(f"PermissionError originating from LyTT, possibly due to ({settings_file}) already being open. "
+                       f"Error: {error}")
     finally:
     finally:
         if document is not None:
         if document is not None:
             document.close()
             document.close()

+ 3 - 1
Tools/LyTestTools/tests/unit/test_abstract_resource_locator.py

@@ -13,6 +13,8 @@ import pytest
 
 
 import ly_test_tools._internal.managers.abstract_resource_locator as abstract_resource_locator
 import ly_test_tools._internal.managers.abstract_resource_locator as abstract_resource_locator
 
 
+from ly_test_tools._internal.exceptions import LyTestToolsFrameworkException
+
 pytestmark = pytest.mark.SUITE_smoke
 pytestmark = pytest.mark.SUITE_smoke
 
 
 mock_initial_path = "mock_initial_path"
 mock_initial_path = "mock_initial_path"
@@ -43,7 +45,7 @@ class TestFindEngineRoot(object):
         mock_path_exists.return_value = False
         mock_path_exists.return_value = False
         mock_abspath.return_value = mock_engine_root
         mock_abspath.return_value = mock_engine_root
 
 
-        with pytest.raises(OSError):
+        with pytest.raises(LyTestToolsFrameworkException):
             abstract_resource_locator._find_engine_root(mock_initial_path)
             abstract_resource_locator._find_engine_root(mock_initial_path)
 
 
 
 

+ 2 - 1
Tools/LyTestTools/tests/unit/test_builtin_helpers.py

@@ -16,6 +16,7 @@ import ly_test_tools._internal.managers.workspace
 import ly_test_tools._internal.managers.platforms.mac
 import ly_test_tools._internal.managers.platforms.mac
 import ly_test_tools._internal.managers.platforms.windows
 import ly_test_tools._internal.managers.platforms.windows
 
 
+from ly_test_tools._internal.exceptions import LyTestToolsFrameworkException
 from ly_test_tools import MAC, WINDOWS
 from ly_test_tools import MAC, WINDOWS
 
 
 pytestmark = pytest.mark.SUITE_smoke
 pytestmark = pytest.mark.SUITE_smoke
@@ -107,7 +108,7 @@ class TestBuiltinHelpers(object):
     @mock.patch('os.path.abspath', mock.MagicMock(return_value='mock_base_dir'))
     @mock.patch('os.path.abspath', mock.MagicMock(return_value='mock_base_dir'))
     @mock.patch('os.path.exists', mock.MagicMock(return_value=False))
     @mock.patch('os.path.exists', mock.MagicMock(return_value=False))
     def test_FindEngineRoot_NoRootFile_RaisesOSError(self):
     def test_FindEngineRoot_NoRootFile_RaisesOSError(self):
-        with pytest.raises(OSError):
+        with pytest.raises(LyTestToolsFrameworkException):
             ly_test_tools._internal.managers.abstract_resource_locator._find_engine_root(
             ly_test_tools._internal.managers.abstract_resource_locator._find_engine_root(
                 initial_path='mock_dev_dir')
                 initial_path='mock_dev_dir')